##// END OF EJS Templates
dirstate-item: allow mtime to be None in "parentdata"...
marmoute -
r49201:3d6eb119 default
parent child Browse files
Show More
@@ -1,1337 +1,1355 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <ctype.h>
13 13 #include <stddef.h>
14 14 #include <string.h>
15 15
16 16 #include "bitmanipulation.h"
17 17 #include "charencode.h"
18 18 #include "util.h"
19 19
20 20 #ifdef IS_PY3K
21 21 /* The mapping of Python types is meant to be temporary to get Python
22 22 * 3 to compile. We should remove this once Python 3 support is fully
23 23 * supported and proper types are used in the extensions themselves. */
24 24 #define PyInt_Check PyLong_Check
25 25 #define PyInt_FromLong PyLong_FromLong
26 26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 27 #define PyInt_AsLong PyLong_AsLong
28 28 #else
29 29 /* Windows on Python 2.7 doesn't define S_IFLNK. Python 3+ defines via
30 30 * pyport.h. */
31 31 #ifndef S_IFLNK
32 32 #define S_IFLNK 0120000
33 33 #endif
34 34 #endif
35 35
36 36 static const char *const versionerrortext = "Python minor version mismatch";
37 37
38 38 static const int dirstate_v1_from_p2 = -2;
39 39 static const int dirstate_v1_nonnormal = -1;
40 40 static const int ambiguous_time = -1;
41 41
42 42 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
43 43 {
44 44 Py_ssize_t expected_size;
45 45
46 46 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
47 47 return NULL;
48 48 }
49 49
50 50 return _dict_new_presized(expected_size);
51 51 }
52 52
53 53 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
54 54 PyObject *kwds)
55 55 {
56 56 /* We do all the initialization here and not a tp_init function because
57 57 * dirstate_item is immutable. */
58 58 dirstateItemObject *t;
59 59 int wc_tracked;
60 60 int p1_tracked;
61 61 int p2_info;
62 62 int has_meaningful_data;
63 63 int has_meaningful_mtime;
64 64 int mode;
65 65 int size;
66 66 int mtime_s;
67 67 int mtime_ns;
68 68 PyObject *parentfiledata;
69 PyObject *mtime;
69 70 PyObject *fallback_exec;
70 71 PyObject *fallback_symlink;
71 72 static char *keywords_name[] = {
72 73 "wc_tracked", "p1_tracked", "p2_info",
73 74 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
74 75 "fallback_exec", "fallback_symlink", NULL,
75 76 };
76 77 wc_tracked = 0;
77 78 p1_tracked = 0;
78 79 p2_info = 0;
79 80 has_meaningful_mtime = 1;
80 81 has_meaningful_data = 1;
81 82 parentfiledata = Py_None;
82 83 fallback_exec = Py_None;
83 84 fallback_symlink = Py_None;
84 85 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
85 86 &wc_tracked, &p1_tracked, &p2_info,
86 87 &has_meaningful_data,
87 88 &has_meaningful_mtime, &parentfiledata,
88 89 &fallback_exec, &fallback_symlink)) {
89 90 return NULL;
90 91 }
91 92 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
92 93 if (!t) {
93 94 return NULL;
94 95 }
95 96
96 97 t->flags = 0;
97 98 if (wc_tracked) {
98 99 t->flags |= dirstate_flag_wc_tracked;
99 100 }
100 101 if (p1_tracked) {
101 102 t->flags |= dirstate_flag_p1_tracked;
102 103 }
103 104 if (p2_info) {
104 105 t->flags |= dirstate_flag_p2_info;
105 106 }
106 107
107 108 if (fallback_exec != Py_None) {
108 109 t->flags |= dirstate_flag_has_fallback_exec;
109 110 if (PyObject_IsTrue(fallback_exec)) {
110 111 t->flags |= dirstate_flag_fallback_exec;
111 112 }
112 113 }
113 114 if (fallback_symlink != Py_None) {
114 115 t->flags |= dirstate_flag_has_fallback_symlink;
115 116 if (PyObject_IsTrue(fallback_symlink)) {
116 117 t->flags |= dirstate_flag_fallback_symlink;
117 118 }
118 119 }
119 120
120 121 if (parentfiledata != Py_None) {
121 if (!PyArg_ParseTuple(parentfiledata, "ii(ii)", &mode, &size,
122 &mtime_s, &mtime_ns)) {
122 if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size,
123 &mtime)) {
123 124 return NULL;
124 125 }
126 if (mtime != Py_None) {
127 if (!PyArg_ParseTuple(mtime, "ii", &mtime_s,
128 &mtime_ns)) {
129 return NULL;
130 }
131 } else {
132 has_meaningful_mtime = 0;
133 }
125 134 } else {
126 135 has_meaningful_data = 0;
127 136 has_meaningful_mtime = 0;
128 137 }
129 138 if (has_meaningful_data) {
130 139 t->flags |= dirstate_flag_has_meaningful_data;
131 140 t->mode = mode;
132 141 t->size = size;
133 142 } else {
134 143 t->mode = 0;
135 144 t->size = 0;
136 145 }
137 146 if (has_meaningful_mtime) {
138 147 t->flags |= dirstate_flag_has_mtime;
139 148 t->mtime_s = mtime_s;
140 149 t->mtime_ns = mtime_ns;
141 150 } else {
142 151 t->mtime_s = 0;
143 152 t->mtime_ns = 0;
144 153 }
145 154 return (PyObject *)t;
146 155 }
147 156
148 157 static void dirstate_item_dealloc(PyObject *o)
149 158 {
150 159 PyObject_Del(o);
151 160 }
152 161
153 162 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
154 163 {
155 164 return (self->flags & dirstate_flag_wc_tracked);
156 165 }
157 166
158 167 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
159 168 {
160 169 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
161 170 dirstate_flag_p2_info;
162 171 return (self->flags & mask);
163 172 }
164 173
165 174 static inline bool dirstate_item_c_added(dirstateItemObject *self)
166 175 {
167 176 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
168 177 dirstate_flag_p2_info);
169 178 const int target = dirstate_flag_wc_tracked;
170 179 return (self->flags & mask) == target;
171 180 }
172 181
173 182 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
174 183 {
175 184 if (self->flags & dirstate_flag_wc_tracked) {
176 185 return false;
177 186 }
178 187 return (self->flags &
179 188 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
180 189 }
181 190
182 191 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
183 192 {
184 193 return ((self->flags & dirstate_flag_wc_tracked) &&
185 194 (self->flags & dirstate_flag_p1_tracked) &&
186 195 (self->flags & dirstate_flag_p2_info));
187 196 }
188 197
189 198 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
190 199 {
191 200 return ((self->flags & dirstate_flag_wc_tracked) &&
192 201 !(self->flags & dirstate_flag_p1_tracked) &&
193 202 (self->flags & dirstate_flag_p2_info));
194 203 }
195 204
196 205 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
197 206 {
198 207 if (dirstate_item_c_removed(self)) {
199 208 return 'r';
200 209 } else if (dirstate_item_c_merged(self)) {
201 210 return 'm';
202 211 } else if (dirstate_item_c_added(self)) {
203 212 return 'a';
204 213 } else {
205 214 return 'n';
206 215 }
207 216 }
208 217
209 218 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
210 219 {
211 220 return (bool)self->flags & dirstate_flag_has_fallback_exec;
212 221 }
213 222
214 223 static inline bool
215 224 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
216 225 {
217 226 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
218 227 }
219 228
220 229 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
221 230 {
222 231 if (self->flags & dirstate_flag_has_meaningful_data) {
223 232 return self->mode;
224 233 } else {
225 234 return 0;
226 235 }
227 236 }
228 237
229 238 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
230 239 {
231 240 if (!(self->flags & dirstate_flag_wc_tracked) &&
232 241 (self->flags & dirstate_flag_p2_info)) {
233 242 if (self->flags & dirstate_flag_p1_tracked) {
234 243 return dirstate_v1_nonnormal;
235 244 } else {
236 245 return dirstate_v1_from_p2;
237 246 }
238 247 } else if (dirstate_item_c_removed(self)) {
239 248 return 0;
240 249 } else if (self->flags & dirstate_flag_p2_info) {
241 250 return dirstate_v1_from_p2;
242 251 } else if (dirstate_item_c_added(self)) {
243 252 return dirstate_v1_nonnormal;
244 253 } else if (self->flags & dirstate_flag_has_meaningful_data) {
245 254 return self->size;
246 255 } else {
247 256 return dirstate_v1_nonnormal;
248 257 }
249 258 }
250 259
251 260 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
252 261 {
253 262 if (dirstate_item_c_removed(self)) {
254 263 return 0;
255 264 } else if (!(self->flags & dirstate_flag_has_mtime) ||
256 265 !(self->flags & dirstate_flag_p1_tracked) ||
257 266 !(self->flags & dirstate_flag_wc_tracked) ||
258 267 (self->flags & dirstate_flag_p2_info)) {
259 268 return ambiguous_time;
260 269 } else {
261 270 return self->mtime_s;
262 271 }
263 272 }
264 273
265 274 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
266 275 {
267 276 int flags = self->flags;
268 277 int mode = dirstate_item_c_v1_mode(self);
269 278 #ifdef S_IXUSR
270 279 /* This is for platforms with an exec bit */
271 280 if ((mode & S_IXUSR) != 0) {
272 281 flags |= dirstate_flag_mode_exec_perm;
273 282 } else {
274 283 flags &= ~dirstate_flag_mode_exec_perm;
275 284 }
276 285 #else
277 286 flags &= ~dirstate_flag_mode_exec_perm;
278 287 #endif
279 288 #ifdef S_ISLNK
280 289 /* This is for platforms with support for symlinks */
281 290 if (S_ISLNK(mode)) {
282 291 flags |= dirstate_flag_mode_is_symlink;
283 292 } else {
284 293 flags &= ~dirstate_flag_mode_is_symlink;
285 294 }
286 295 #else
287 296 flags &= ~dirstate_flag_mode_is_symlink;
288 297 #endif
289 298 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
290 299 self->mtime_ns);
291 300 };
292 301
293 302 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
294 303 {
295 304 char state = dirstate_item_c_v1_state(self);
296 305 return PyBytes_FromStringAndSize(&state, 1);
297 306 };
298 307
299 308 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
300 309 {
301 310 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
302 311 };
303 312
304 313 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
305 314 {
306 315 return PyInt_FromLong(dirstate_item_c_v1_size(self));
307 316 };
308 317
309 318 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
310 319 {
311 320 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
312 321 };
313 322
314 323 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
315 324 PyObject *now)
316 325 {
317 326 int now_s;
318 327 int now_ns;
319 328 if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) {
320 329 return NULL;
321 330 }
322 331 if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) {
323 332 Py_RETURN_TRUE;
324 333 } else {
325 334 Py_RETURN_FALSE;
326 335 }
327 336 };
328 337
329 338 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
330 339 PyObject *other)
331 340 {
332 341 int other_s;
333 342 int other_ns;
334 343 if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) {
335 344 return NULL;
336 345 }
337 346 if ((self->flags & dirstate_flag_has_mtime) &&
338 347 self->mtime_s == other_s &&
339 348 (self->mtime_ns == other_ns || self->mtime_ns == 0 ||
340 349 other_ns == 0)) {
341 350 Py_RETURN_TRUE;
342 351 } else {
343 352 Py_RETURN_FALSE;
344 353 }
345 354 };
346 355
347 356 /* This will never change since it's bound to V1
348 357 */
349 358 static inline dirstateItemObject *
350 359 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
351 360 {
352 361 dirstateItemObject *t =
353 362 PyObject_New(dirstateItemObject, &dirstateItemType);
354 363 if (!t) {
355 364 return NULL;
356 365 }
357 366 t->flags = 0;
358 367 t->mode = 0;
359 368 t->size = 0;
360 369 t->mtime_s = 0;
361 370 t->mtime_ns = 0;
362 371
363 372 if (state == 'm') {
364 373 t->flags = (dirstate_flag_wc_tracked |
365 374 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
366 375 } else if (state == 'a') {
367 376 t->flags = dirstate_flag_wc_tracked;
368 377 } else if (state == 'r') {
369 378 if (size == dirstate_v1_nonnormal) {
370 379 t->flags =
371 380 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
372 381 } else if (size == dirstate_v1_from_p2) {
373 382 t->flags = dirstate_flag_p2_info;
374 383 } else {
375 384 t->flags = dirstate_flag_p1_tracked;
376 385 }
377 386 } else if (state == 'n') {
378 387 if (size == dirstate_v1_from_p2) {
379 388 t->flags =
380 389 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
381 390 } else if (size == dirstate_v1_nonnormal) {
382 391 t->flags =
383 392 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
384 393 } else if (mtime == ambiguous_time) {
385 394 t->flags = (dirstate_flag_wc_tracked |
386 395 dirstate_flag_p1_tracked |
387 396 dirstate_flag_has_meaningful_data);
388 397 t->mode = mode;
389 398 t->size = size;
390 399 } else {
391 400 t->flags = (dirstate_flag_wc_tracked |
392 401 dirstate_flag_p1_tracked |
393 402 dirstate_flag_has_meaningful_data |
394 403 dirstate_flag_has_mtime);
395 404 t->mode = mode;
396 405 t->size = size;
397 406 t->mtime_s = mtime;
398 407 }
399 408 } else {
400 409 PyErr_Format(PyExc_RuntimeError,
401 410 "unknown state: `%c` (%d, %d, %d)", state, mode,
402 411 size, mtime, NULL);
403 412 Py_DECREF(t);
404 413 return NULL;
405 414 }
406 415
407 416 return t;
408 417 }
409 418
410 419 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
411 420 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
412 421 PyObject *args)
413 422 {
414 423 /* We do all the initialization here and not a tp_init function because
415 424 * dirstate_item is immutable. */
416 425 char state;
417 426 int size, mode, mtime;
418 427 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
419 428 return NULL;
420 429 }
421 430 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
422 431 };
423 432
424 433 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
425 434 PyObject *args)
426 435 {
427 436 dirstateItemObject *t =
428 437 PyObject_New(dirstateItemObject, &dirstateItemType);
429 438 if (!t) {
430 439 return NULL;
431 440 }
432 441 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
433 442 &t->mtime_ns)) {
434 443 return NULL;
435 444 }
436 445 if (t->flags & dirstate_flag_expected_state_is_modified) {
437 446 t->flags &= ~(dirstate_flag_expected_state_is_modified |
438 447 dirstate_flag_has_meaningful_data |
439 448 dirstate_flag_has_mtime);
440 449 }
441 450 if (t->flags & dirstate_flag_mtime_second_ambiguous) {
442 451 /* The current code is not able to do the more subtle comparison
443 452 * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the
444 453 * mtime */
445 454 t->flags &= ~(dirstate_flag_mtime_second_ambiguous |
446 455 dirstate_flag_has_meaningful_data |
447 456 dirstate_flag_has_mtime);
448 457 }
449 458 t->mode = 0;
450 459 if (t->flags & dirstate_flag_has_meaningful_data) {
451 460 if (t->flags & dirstate_flag_mode_exec_perm) {
452 461 t->mode = 0755;
453 462 } else {
454 463 t->mode = 0644;
455 464 }
456 465 if (t->flags & dirstate_flag_mode_is_symlink) {
457 466 t->mode |= S_IFLNK;
458 467 } else {
459 468 t->mode |= S_IFREG;
460 469 }
461 470 }
462 471 return (PyObject *)t;
463 472 };
464 473
465 474 /* This means the next status call will have to actually check its content
466 475 to make sure it is correct. */
467 476 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
468 477 {
469 478 self->flags &= ~dirstate_flag_has_mtime;
470 479 Py_RETURN_NONE;
471 480 }
472 481
473 482 /* See docstring of the python implementation for details */
474 483 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
475 484 PyObject *args)
476 485 {
477 486 int size, mode, mtime_s, mtime_ns;
478 if (!PyArg_ParseTuple(args, "ii(ii)", &mode, &size, &mtime_s,
479 &mtime_ns)) {
487 PyObject *mtime;
488 mtime_s = 0;
489 mtime_ns = 0;
490 if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) {
480 491 return NULL;
481 492 }
493 if (mtime != Py_None) {
494 if (!PyArg_ParseTuple(mtime, "ii", &mtime_s, &mtime_ns)) {
495 return NULL;
496 }
497 } else {
498 self->flags &= ~dirstate_flag_has_mtime;
499 }
482 500 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
483 501 dirstate_flag_has_meaningful_data |
484 502 dirstate_flag_has_mtime;
485 503 self->mode = mode;
486 504 self->size = size;
487 505 self->mtime_s = mtime_s;
488 506 self->mtime_ns = mtime_ns;
489 507 Py_RETURN_NONE;
490 508 }
491 509
492 510 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
493 511 {
494 512 self->flags |= dirstate_flag_wc_tracked;
495 513 self->flags &= ~dirstate_flag_has_mtime;
496 514 Py_RETURN_NONE;
497 515 }
498 516
499 517 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
500 518 {
501 519 self->flags &= ~dirstate_flag_wc_tracked;
502 520 self->mode = 0;
503 521 self->size = 0;
504 522 self->mtime_s = 0;
505 523 self->mtime_ns = 0;
506 524 Py_RETURN_NONE;
507 525 }
508 526
509 527 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
510 528 {
511 529 if (self->flags & dirstate_flag_p2_info) {
512 530 self->flags &= ~(dirstate_flag_p2_info |
513 531 dirstate_flag_has_meaningful_data |
514 532 dirstate_flag_has_mtime);
515 533 self->mode = 0;
516 534 self->size = 0;
517 535 self->mtime_s = 0;
518 536 self->mtime_ns = 0;
519 537 }
520 538 Py_RETURN_NONE;
521 539 }
522 540 static PyMethodDef dirstate_item_methods[] = {
523 541 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
524 542 "return data suitable for v2 serialization"},
525 543 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
526 544 "return a \"state\" suitable for v1 serialization"},
527 545 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
528 546 "return a \"mode\" suitable for v1 serialization"},
529 547 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
530 548 "return a \"size\" suitable for v1 serialization"},
531 549 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
532 550 "return a \"mtime\" suitable for v1 serialization"},
533 551 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
534 552 "True if the stored mtime would be ambiguous with the current time"},
535 553 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
536 554 METH_O, "True if the stored mtime is likely equal to the given mtime"},
537 555 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
538 556 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
539 557 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
540 558 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
541 559 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
542 560 METH_NOARGS, "mark a file as \"possibly dirty\""},
543 561 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
544 562 "mark a file as \"clean\""},
545 563 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
546 564 "mark a file as \"tracked\""},
547 565 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
548 566 "mark a file as \"untracked\""},
549 567 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
550 568 "remove all \"merge-only\" from a DirstateItem"},
551 569 {NULL} /* Sentinel */
552 570 };
553 571
554 572 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
555 573 {
556 574 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
557 575 };
558 576
559 577 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
560 578 {
561 579 return PyInt_FromLong(dirstate_item_c_v1_size(self));
562 580 };
563 581
564 582 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
565 583 {
566 584 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
567 585 };
568 586
569 587 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
570 588 {
571 589 char state = dirstate_item_c_v1_state(self);
572 590 return PyBytes_FromStringAndSize(&state, 1);
573 591 };
574 592
575 593 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
576 594 {
577 595 if (dirstate_item_c_has_fallback_exec(self)) {
578 596 Py_RETURN_TRUE;
579 597 } else {
580 598 Py_RETURN_FALSE;
581 599 }
582 600 };
583 601
584 602 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
585 603 {
586 604 if (dirstate_item_c_has_fallback_exec(self)) {
587 605 if (self->flags & dirstate_flag_fallback_exec) {
588 606 Py_RETURN_TRUE;
589 607 } else {
590 608 Py_RETURN_FALSE;
591 609 }
592 610 } else {
593 611 Py_RETURN_NONE;
594 612 }
595 613 };
596 614
597 615 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
598 616 PyObject *value)
599 617 {
600 618 if ((value == Py_None) || (value == NULL)) {
601 619 self->flags &= ~dirstate_flag_has_fallback_exec;
602 620 } else {
603 621 self->flags |= dirstate_flag_has_fallback_exec;
604 622 if (PyObject_IsTrue(value)) {
605 623 self->flags |= dirstate_flag_fallback_exec;
606 624 } else {
607 625 self->flags &= ~dirstate_flag_fallback_exec;
608 626 }
609 627 }
610 628 return 0;
611 629 };
612 630
613 631 static PyObject *
614 632 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
615 633 {
616 634 if (dirstate_item_c_has_fallback_symlink(self)) {
617 635 Py_RETURN_TRUE;
618 636 } else {
619 637 Py_RETURN_FALSE;
620 638 }
621 639 };
622 640
623 641 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
624 642 {
625 643 if (dirstate_item_c_has_fallback_symlink(self)) {
626 644 if (self->flags & dirstate_flag_fallback_symlink) {
627 645 Py_RETURN_TRUE;
628 646 } else {
629 647 Py_RETURN_FALSE;
630 648 }
631 649 } else {
632 650 Py_RETURN_NONE;
633 651 }
634 652 };
635 653
636 654 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
637 655 PyObject *value)
638 656 {
639 657 if ((value == Py_None) || (value == NULL)) {
640 658 self->flags &= ~dirstate_flag_has_fallback_symlink;
641 659 } else {
642 660 self->flags |= dirstate_flag_has_fallback_symlink;
643 661 if (PyObject_IsTrue(value)) {
644 662 self->flags |= dirstate_flag_fallback_symlink;
645 663 } else {
646 664 self->flags &= ~dirstate_flag_fallback_symlink;
647 665 }
648 666 }
649 667 return 0;
650 668 };
651 669
652 670 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
653 671 {
654 672 if (dirstate_item_c_tracked(self)) {
655 673 Py_RETURN_TRUE;
656 674 } else {
657 675 Py_RETURN_FALSE;
658 676 }
659 677 };
660 678 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
661 679 {
662 680 if (self->flags & dirstate_flag_p1_tracked) {
663 681 Py_RETURN_TRUE;
664 682 } else {
665 683 Py_RETURN_FALSE;
666 684 }
667 685 };
668 686
669 687 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
670 688 {
671 689 if (dirstate_item_c_added(self)) {
672 690 Py_RETURN_TRUE;
673 691 } else {
674 692 Py_RETURN_FALSE;
675 693 }
676 694 };
677 695
678 696 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
679 697 {
680 698 if (self->flags & dirstate_flag_wc_tracked &&
681 699 self->flags & dirstate_flag_p2_info) {
682 700 Py_RETURN_TRUE;
683 701 } else {
684 702 Py_RETURN_FALSE;
685 703 }
686 704 };
687 705
688 706 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
689 707 {
690 708 if (dirstate_item_c_merged(self)) {
691 709 Py_RETURN_TRUE;
692 710 } else {
693 711 Py_RETURN_FALSE;
694 712 }
695 713 };
696 714
697 715 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
698 716 {
699 717 if (dirstate_item_c_from_p2(self)) {
700 718 Py_RETURN_TRUE;
701 719 } else {
702 720 Py_RETURN_FALSE;
703 721 }
704 722 };
705 723
706 724 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
707 725 {
708 726 if (!(self->flags & dirstate_flag_wc_tracked)) {
709 727 Py_RETURN_FALSE;
710 728 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
711 729 Py_RETURN_FALSE;
712 730 } else if (self->flags & dirstate_flag_p2_info) {
713 731 Py_RETURN_FALSE;
714 732 } else {
715 733 Py_RETURN_TRUE;
716 734 }
717 735 };
718 736
719 737 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
720 738 {
721 739 if (dirstate_item_c_any_tracked(self)) {
722 740 Py_RETURN_TRUE;
723 741 } else {
724 742 Py_RETURN_FALSE;
725 743 }
726 744 };
727 745
728 746 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
729 747 {
730 748 if (dirstate_item_c_removed(self)) {
731 749 Py_RETURN_TRUE;
732 750 } else {
733 751 Py_RETURN_FALSE;
734 752 }
735 753 };
736 754
737 755 static PyGetSetDef dirstate_item_getset[] = {
738 756 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
739 757 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
740 758 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
741 759 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
742 760 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
743 761 "has_fallback_exec", NULL},
744 762 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
745 763 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
746 764 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
747 765 NULL, "has_fallback_symlink", NULL},
748 766 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
749 767 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
750 768 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
751 769 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
752 770 NULL},
753 771 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
754 772 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
755 773 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
756 774 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
757 775 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
758 776 NULL},
759 777 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
760 778 NULL},
761 779 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
762 780 {NULL} /* Sentinel */
763 781 };
764 782
765 783 PyTypeObject dirstateItemType = {
766 784 PyVarObject_HEAD_INIT(NULL, 0) /* header */
767 785 "dirstate_tuple", /* tp_name */
768 786 sizeof(dirstateItemObject), /* tp_basicsize */
769 787 0, /* tp_itemsize */
770 788 (destructor)dirstate_item_dealloc, /* tp_dealloc */
771 789 0, /* tp_print */
772 790 0, /* tp_getattr */
773 791 0, /* tp_setattr */
774 792 0, /* tp_compare */
775 793 0, /* tp_repr */
776 794 0, /* tp_as_number */
777 795 0, /* tp_as_sequence */
778 796 0, /* tp_as_mapping */
779 797 0, /* tp_hash */
780 798 0, /* tp_call */
781 799 0, /* tp_str */
782 800 0, /* tp_getattro */
783 801 0, /* tp_setattro */
784 802 0, /* tp_as_buffer */
785 803 Py_TPFLAGS_DEFAULT, /* tp_flags */
786 804 "dirstate tuple", /* tp_doc */
787 805 0, /* tp_traverse */
788 806 0, /* tp_clear */
789 807 0, /* tp_richcompare */
790 808 0, /* tp_weaklistoffset */
791 809 0, /* tp_iter */
792 810 0, /* tp_iternext */
793 811 dirstate_item_methods, /* tp_methods */
794 812 0, /* tp_members */
795 813 dirstate_item_getset, /* tp_getset */
796 814 0, /* tp_base */
797 815 0, /* tp_dict */
798 816 0, /* tp_descr_get */
799 817 0, /* tp_descr_set */
800 818 0, /* tp_dictoffset */
801 819 0, /* tp_init */
802 820 0, /* tp_alloc */
803 821 dirstate_item_new, /* tp_new */
804 822 };
805 823
806 824 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
807 825 {
808 826 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
809 827 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
810 828 char state, *cur, *str, *cpos;
811 829 int mode, size, mtime;
812 830 unsigned int flen, pos = 40;
813 831 Py_ssize_t len = 40;
814 832 Py_ssize_t readlen;
815 833
816 834 if (!PyArg_ParseTuple(
817 835 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
818 836 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
819 837 goto quit;
820 838 }
821 839
822 840 len = readlen;
823 841
824 842 /* read parents */
825 843 if (len < 40) {
826 844 PyErr_SetString(PyExc_ValueError,
827 845 "too little data for parents");
828 846 goto quit;
829 847 }
830 848
831 849 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
832 850 str + 20, (Py_ssize_t)20);
833 851 if (!parents) {
834 852 goto quit;
835 853 }
836 854
837 855 /* read filenames */
838 856 while (pos >= 40 && pos < len) {
839 857 if (pos + 17 > len) {
840 858 PyErr_SetString(PyExc_ValueError,
841 859 "overflow in dirstate");
842 860 goto quit;
843 861 }
844 862 cur = str + pos;
845 863 /* unpack header */
846 864 state = *cur;
847 865 mode = getbe32(cur + 1);
848 866 size = getbe32(cur + 5);
849 867 mtime = getbe32(cur + 9);
850 868 flen = getbe32(cur + 13);
851 869 pos += 17;
852 870 cur += 17;
853 871 if (flen > len - pos) {
854 872 PyErr_SetString(PyExc_ValueError,
855 873 "overflow in dirstate");
856 874 goto quit;
857 875 }
858 876
859 877 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
860 878 size, mtime);
861 879 if (!entry)
862 880 goto quit;
863 881 cpos = memchr(cur, 0, flen);
864 882 if (cpos) {
865 883 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
866 884 cname = PyBytes_FromStringAndSize(
867 885 cpos + 1, flen - (cpos - cur) - 1);
868 886 if (!fname || !cname ||
869 887 PyDict_SetItem(cmap, fname, cname) == -1 ||
870 888 PyDict_SetItem(dmap, fname, entry) == -1) {
871 889 goto quit;
872 890 }
873 891 Py_DECREF(cname);
874 892 } else {
875 893 fname = PyBytes_FromStringAndSize(cur, flen);
876 894 if (!fname ||
877 895 PyDict_SetItem(dmap, fname, entry) == -1) {
878 896 goto quit;
879 897 }
880 898 }
881 899 Py_DECREF(fname);
882 900 Py_DECREF(entry);
883 901 fname = cname = entry = NULL;
884 902 pos += flen;
885 903 }
886 904
887 905 ret = parents;
888 906 Py_INCREF(ret);
889 907 quit:
890 908 Py_XDECREF(fname);
891 909 Py_XDECREF(cname);
892 910 Py_XDECREF(entry);
893 911 Py_XDECREF(parents);
894 912 return ret;
895 913 }
896 914
897 915 /*
898 916 * Efficiently pack a dirstate object into its on-disk format.
899 917 */
900 918 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
901 919 {
902 920 PyObject *packobj = NULL;
903 921 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
904 922 Py_ssize_t nbytes, pos, l;
905 923 PyObject *k, *v = NULL, *pn;
906 924 char *p, *s;
907 925 int now_s;
908 926 int now_ns;
909 927
910 928 if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type,
911 929 &map, &PyDict_Type, &copymap, &PyTuple_Type, &pl,
912 930 &now_s, &now_ns)) {
913 931 return NULL;
914 932 }
915 933
916 934 if (PyTuple_Size(pl) != 2) {
917 935 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
918 936 return NULL;
919 937 }
920 938
921 939 /* Figure out how much we need to allocate. */
922 940 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
923 941 PyObject *c;
924 942 if (!PyBytes_Check(k)) {
925 943 PyErr_SetString(PyExc_TypeError, "expected string key");
926 944 goto bail;
927 945 }
928 946 nbytes += PyBytes_GET_SIZE(k) + 17;
929 947 c = PyDict_GetItem(copymap, k);
930 948 if (c) {
931 949 if (!PyBytes_Check(c)) {
932 950 PyErr_SetString(PyExc_TypeError,
933 951 "expected string key");
934 952 goto bail;
935 953 }
936 954 nbytes += PyBytes_GET_SIZE(c) + 1;
937 955 }
938 956 }
939 957
940 958 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
941 959 if (packobj == NULL) {
942 960 goto bail;
943 961 }
944 962
945 963 p = PyBytes_AS_STRING(packobj);
946 964
947 965 pn = PyTuple_GET_ITEM(pl, 0);
948 966 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
949 967 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
950 968 goto bail;
951 969 }
952 970 memcpy(p, s, l);
953 971 p += 20;
954 972 pn = PyTuple_GET_ITEM(pl, 1);
955 973 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
956 974 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
957 975 goto bail;
958 976 }
959 977 memcpy(p, s, l);
960 978 p += 20;
961 979
962 980 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
963 981 dirstateItemObject *tuple;
964 982 char state;
965 983 int mode, size, mtime;
966 984 Py_ssize_t len, l;
967 985 PyObject *o;
968 986 char *t;
969 987
970 988 if (!dirstate_tuple_check(v)) {
971 989 PyErr_SetString(PyExc_TypeError,
972 990 "expected a dirstate tuple");
973 991 goto bail;
974 992 }
975 993 tuple = (dirstateItemObject *)v;
976 994
977 995 state = dirstate_item_c_v1_state(tuple);
978 996 mode = dirstate_item_c_v1_mode(tuple);
979 997 size = dirstate_item_c_v1_size(tuple);
980 998 mtime = dirstate_item_c_v1_mtime(tuple);
981 999 if (state == 'n' && tuple->mtime_s == now_s) {
982 1000 /* See pure/parsers.py:pack_dirstate for why we do
983 1001 * this. */
984 1002 mtime = -1;
985 1003 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
986 1004 state, mode, size, mtime);
987 1005 if (!mtime_unset) {
988 1006 goto bail;
989 1007 }
990 1008 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
991 1009 goto bail;
992 1010 }
993 1011 Py_DECREF(mtime_unset);
994 1012 mtime_unset = NULL;
995 1013 }
996 1014 *p++ = state;
997 1015 putbe32((uint32_t)mode, p);
998 1016 putbe32((uint32_t)size, p + 4);
999 1017 putbe32((uint32_t)mtime, p + 8);
1000 1018 t = p + 12;
1001 1019 p += 16;
1002 1020 len = PyBytes_GET_SIZE(k);
1003 1021 memcpy(p, PyBytes_AS_STRING(k), len);
1004 1022 p += len;
1005 1023 o = PyDict_GetItem(copymap, k);
1006 1024 if (o) {
1007 1025 *p++ = '\0';
1008 1026 l = PyBytes_GET_SIZE(o);
1009 1027 memcpy(p, PyBytes_AS_STRING(o), l);
1010 1028 p += l;
1011 1029 len += l + 1;
1012 1030 }
1013 1031 putbe32((uint32_t)len, t);
1014 1032 }
1015 1033
1016 1034 pos = p - PyBytes_AS_STRING(packobj);
1017 1035 if (pos != nbytes) {
1018 1036 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
1019 1037 (long)pos, (long)nbytes);
1020 1038 goto bail;
1021 1039 }
1022 1040
1023 1041 return packobj;
1024 1042 bail:
1025 1043 Py_XDECREF(mtime_unset);
1026 1044 Py_XDECREF(packobj);
1027 1045 Py_XDECREF(v);
1028 1046 return NULL;
1029 1047 }
1030 1048
1031 1049 #define BUMPED_FIX 1
1032 1050 #define USING_SHA_256 2
1033 1051 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
1034 1052
1035 1053 static PyObject *readshas(const char *source, unsigned char num,
1036 1054 Py_ssize_t hashwidth)
1037 1055 {
1038 1056 int i;
1039 1057 PyObject *list = PyTuple_New(num);
1040 1058 if (list == NULL) {
1041 1059 return NULL;
1042 1060 }
1043 1061 for (i = 0; i < num; i++) {
1044 1062 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
1045 1063 if (hash == NULL) {
1046 1064 Py_DECREF(list);
1047 1065 return NULL;
1048 1066 }
1049 1067 PyTuple_SET_ITEM(list, i, hash);
1050 1068 source += hashwidth;
1051 1069 }
1052 1070 return list;
1053 1071 }
1054 1072
1055 1073 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
1056 1074 uint32_t *msize)
1057 1075 {
1058 1076 const char *data = databegin;
1059 1077 const char *meta;
1060 1078
1061 1079 double mtime;
1062 1080 int16_t tz;
1063 1081 uint16_t flags;
1064 1082 unsigned char nsuccs, nparents, nmetadata;
1065 1083 Py_ssize_t hashwidth = 20;
1066 1084
1067 1085 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
1068 1086 PyObject *metadata = NULL, *ret = NULL;
1069 1087 int i;
1070 1088
1071 1089 if (data + FM1_HEADER_SIZE > dataend) {
1072 1090 goto overflow;
1073 1091 }
1074 1092
1075 1093 *msize = getbe32(data);
1076 1094 data += 4;
1077 1095 mtime = getbefloat64(data);
1078 1096 data += 8;
1079 1097 tz = getbeint16(data);
1080 1098 data += 2;
1081 1099 flags = getbeuint16(data);
1082 1100 data += 2;
1083 1101
1084 1102 if (flags & USING_SHA_256) {
1085 1103 hashwidth = 32;
1086 1104 }
1087 1105
1088 1106 nsuccs = (unsigned char)(*data++);
1089 1107 nparents = (unsigned char)(*data++);
1090 1108 nmetadata = (unsigned char)(*data++);
1091 1109
1092 1110 if (databegin + *msize > dataend) {
1093 1111 goto overflow;
1094 1112 }
1095 1113 dataend = databegin + *msize; /* narrow down to marker size */
1096 1114
1097 1115 if (data + hashwidth > dataend) {
1098 1116 goto overflow;
1099 1117 }
1100 1118 prec = PyBytes_FromStringAndSize(data, hashwidth);
1101 1119 data += hashwidth;
1102 1120 if (prec == NULL) {
1103 1121 goto bail;
1104 1122 }
1105 1123
1106 1124 if (data + nsuccs * hashwidth > dataend) {
1107 1125 goto overflow;
1108 1126 }
1109 1127 succs = readshas(data, nsuccs, hashwidth);
1110 1128 if (succs == NULL) {
1111 1129 goto bail;
1112 1130 }
1113 1131 data += nsuccs * hashwidth;
1114 1132
1115 1133 if (nparents == 1 || nparents == 2) {
1116 1134 if (data + nparents * hashwidth > dataend) {
1117 1135 goto overflow;
1118 1136 }
1119 1137 parents = readshas(data, nparents, hashwidth);
1120 1138 if (parents == NULL) {
1121 1139 goto bail;
1122 1140 }
1123 1141 data += nparents * hashwidth;
1124 1142 } else {
1125 1143 parents = Py_None;
1126 1144 Py_INCREF(parents);
1127 1145 }
1128 1146
1129 1147 if (data + 2 * nmetadata > dataend) {
1130 1148 goto overflow;
1131 1149 }
1132 1150 meta = data + (2 * nmetadata);
1133 1151 metadata = PyTuple_New(nmetadata);
1134 1152 if (metadata == NULL) {
1135 1153 goto bail;
1136 1154 }
1137 1155 for (i = 0; i < nmetadata; i++) {
1138 1156 PyObject *tmp, *left = NULL, *right = NULL;
1139 1157 Py_ssize_t leftsize = (unsigned char)(*data++);
1140 1158 Py_ssize_t rightsize = (unsigned char)(*data++);
1141 1159 if (meta + leftsize + rightsize > dataend) {
1142 1160 goto overflow;
1143 1161 }
1144 1162 left = PyBytes_FromStringAndSize(meta, leftsize);
1145 1163 meta += leftsize;
1146 1164 right = PyBytes_FromStringAndSize(meta, rightsize);
1147 1165 meta += rightsize;
1148 1166 tmp = PyTuple_New(2);
1149 1167 if (!left || !right || !tmp) {
1150 1168 Py_XDECREF(left);
1151 1169 Py_XDECREF(right);
1152 1170 Py_XDECREF(tmp);
1153 1171 goto bail;
1154 1172 }
1155 1173 PyTuple_SET_ITEM(tmp, 0, left);
1156 1174 PyTuple_SET_ITEM(tmp, 1, right);
1157 1175 PyTuple_SET_ITEM(metadata, i, tmp);
1158 1176 }
1159 1177 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1160 1178 (int)tz * 60, parents);
1161 1179 goto bail; /* return successfully */
1162 1180
1163 1181 overflow:
1164 1182 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1165 1183 bail:
1166 1184 Py_XDECREF(prec);
1167 1185 Py_XDECREF(succs);
1168 1186 Py_XDECREF(metadata);
1169 1187 Py_XDECREF(parents);
1170 1188 return ret;
1171 1189 }
1172 1190
1173 1191 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1174 1192 {
1175 1193 const char *data, *dataend;
1176 1194 Py_ssize_t datalen, offset, stop;
1177 1195 PyObject *markers = NULL;
1178 1196
1179 1197 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
1180 1198 &offset, &stop)) {
1181 1199 return NULL;
1182 1200 }
1183 1201 if (offset < 0) {
1184 1202 PyErr_SetString(PyExc_ValueError,
1185 1203 "invalid negative offset in fm1readmarkers");
1186 1204 return NULL;
1187 1205 }
1188 1206 if (stop > datalen) {
1189 1207 PyErr_SetString(
1190 1208 PyExc_ValueError,
1191 1209 "stop longer than data length in fm1readmarkers");
1192 1210 return NULL;
1193 1211 }
1194 1212 dataend = data + datalen;
1195 1213 data += offset;
1196 1214 markers = PyList_New(0);
1197 1215 if (!markers) {
1198 1216 return NULL;
1199 1217 }
1200 1218 while (offset < stop) {
1201 1219 uint32_t msize;
1202 1220 int error;
1203 1221 PyObject *record = fm1readmarker(data, dataend, &msize);
1204 1222 if (!record) {
1205 1223 goto bail;
1206 1224 }
1207 1225 error = PyList_Append(markers, record);
1208 1226 Py_DECREF(record);
1209 1227 if (error) {
1210 1228 goto bail;
1211 1229 }
1212 1230 data += msize;
1213 1231 offset += msize;
1214 1232 }
1215 1233 return markers;
1216 1234 bail:
1217 1235 Py_DECREF(markers);
1218 1236 return NULL;
1219 1237 }
1220 1238
1221 1239 static char parsers_doc[] = "Efficient content parsing.";
1222 1240
1223 1241 PyObject *encodedir(PyObject *self, PyObject *args);
1224 1242 PyObject *pathencode(PyObject *self, PyObject *args);
1225 1243 PyObject *lowerencode(PyObject *self, PyObject *args);
1226 1244 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1227 1245
1228 1246 static PyMethodDef methods[] = {
1229 1247 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1230 1248 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1231 1249 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1232 1250 "parse a revlog index\n"},
1233 1251 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1234 1252 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1235 1253 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1236 1254 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1237 1255 "construct a dict with an expected size\n"},
1238 1256 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1239 1257 "make file foldmap\n"},
1240 1258 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1241 1259 "escape a UTF-8 byte string to JSON (fast path)\n"},
1242 1260 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1243 1261 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1244 1262 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1245 1263 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1246 1264 "parse v1 obsolete markers\n"},
1247 1265 {NULL, NULL}};
1248 1266
1249 1267 void dirs_module_init(PyObject *mod);
1250 1268 void manifest_module_init(PyObject *mod);
1251 1269 void revlog_module_init(PyObject *mod);
1252 1270
1253 1271 static const int version = 20;
1254 1272
1255 1273 static void module_init(PyObject *mod)
1256 1274 {
1257 1275 PyModule_AddIntConstant(mod, "version", version);
1258 1276
1259 1277 /* This module constant has two purposes. First, it lets us unit test
1260 1278 * the ImportError raised without hard-coding any error text. This
1261 1279 * means we can change the text in the future without breaking tests,
1262 1280 * even across changesets without a recompile. Second, its presence
1263 1281 * can be used to determine whether the version-checking logic is
1264 1282 * present, which also helps in testing across changesets without a
1265 1283 * recompile. Note that this means the pure-Python version of parsers
1266 1284 * should not have this module constant. */
1267 1285 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1268 1286
1269 1287 dirs_module_init(mod);
1270 1288 manifest_module_init(mod);
1271 1289 revlog_module_init(mod);
1272 1290
1273 1291 if (PyType_Ready(&dirstateItemType) < 0) {
1274 1292 return;
1275 1293 }
1276 1294 Py_INCREF(&dirstateItemType);
1277 1295 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1278 1296 }
1279 1297
1280 1298 static int check_python_version(void)
1281 1299 {
1282 1300 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1283 1301 long hexversion;
1284 1302 if (!sys) {
1285 1303 return -1;
1286 1304 }
1287 1305 ver = PyObject_GetAttrString(sys, "hexversion");
1288 1306 Py_DECREF(sys);
1289 1307 if (!ver) {
1290 1308 return -1;
1291 1309 }
1292 1310 hexversion = PyInt_AsLong(ver);
1293 1311 Py_DECREF(ver);
1294 1312 /* sys.hexversion is a 32-bit number by default, so the -1 case
1295 1313 * should only occur in unusual circumstances (e.g. if sys.hexversion
1296 1314 * is manually set to an invalid value). */
1297 1315 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1298 1316 PyErr_Format(PyExc_ImportError,
1299 1317 "%s: The Mercurial extension "
1300 1318 "modules were compiled with Python " PY_VERSION
1301 1319 ", but "
1302 1320 "Mercurial is currently using Python with "
1303 1321 "sys.hexversion=%ld: "
1304 1322 "Python %s\n at: %s",
1305 1323 versionerrortext, hexversion, Py_GetVersion(),
1306 1324 Py_GetProgramFullPath());
1307 1325 return -1;
1308 1326 }
1309 1327 return 0;
1310 1328 }
1311 1329
1312 1330 #ifdef IS_PY3K
1313 1331 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1314 1332 parsers_doc, -1, methods};
1315 1333
1316 1334 PyMODINIT_FUNC PyInit_parsers(void)
1317 1335 {
1318 1336 PyObject *mod;
1319 1337
1320 1338 if (check_python_version() == -1)
1321 1339 return NULL;
1322 1340 mod = PyModule_Create(&parsers_module);
1323 1341 module_init(mod);
1324 1342 return mod;
1325 1343 }
1326 1344 #else
1327 1345 PyMODINIT_FUNC initparsers(void)
1328 1346 {
1329 1347 PyObject *mod;
1330 1348
1331 1349 if (check_python_version() == -1) {
1332 1350 return;
1333 1351 }
1334 1352 mod = Py_InitModule3("parsers", methods, parsers_doc);
1335 1353 module_init(mod);
1336 1354 }
1337 1355 #endif
@@ -1,1533 +1,1534 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .dirstateutils import (
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 69 def _getfsnow(vfs):
70 70 '''Get "now" timestamp on filesystem'''
71 71 tmpfd, tmpname = vfs.mkstemp()
72 72 try:
73 73 return timestamp.mtime_of(os.fstat(tmpfd))
74 74 finally:
75 75 os.close(tmpfd)
76 76 vfs.unlink(tmpname)
77 77
78 78
79 79 def requires_parents_change(func):
80 80 def wrap(self, *args, **kwargs):
81 81 if not self.pendingparentchange():
82 82 msg = 'calling `%s` outside of a parentchange context'
83 83 msg %= func.__name__
84 84 raise error.ProgrammingError(msg)
85 85 return func(self, *args, **kwargs)
86 86
87 87 return wrap
88 88
89 89
90 90 def requires_no_parents_change(func):
91 91 def wrap(self, *args, **kwargs):
92 92 if self.pendingparentchange():
93 93 msg = 'calling `%s` inside of a parentchange context'
94 94 msg %= func.__name__
95 95 raise error.ProgrammingError(msg)
96 96 return func(self, *args, **kwargs)
97 97
98 98 return wrap
99 99
100 100
101 101 @interfaceutil.implementer(intdirstate.idirstate)
102 102 class dirstate(object):
103 103 def __init__(
104 104 self,
105 105 opener,
106 106 ui,
107 107 root,
108 108 validate,
109 109 sparsematchfn,
110 110 nodeconstants,
111 111 use_dirstate_v2,
112 112 ):
113 113 """Create a new dirstate object.
114 114
115 115 opener is an open()-like callable that can be used to open the
116 116 dirstate file; root is the root of the directory tracked by
117 117 the dirstate.
118 118 """
119 119 self._use_dirstate_v2 = use_dirstate_v2
120 120 self._nodeconstants = nodeconstants
121 121 self._opener = opener
122 122 self._validate = validate
123 123 self._root = root
124 124 self._sparsematchfn = sparsematchfn
125 125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
126 126 # UNC path pointing to root share (issue4557)
127 127 self._rootdir = pathutil.normasprefix(root)
128 128 self._dirty = False
129 129 self._lastnormaltime = timestamp.zero()
130 130 self._ui = ui
131 131 self._filecache = {}
132 132 self._parentwriters = 0
133 133 self._filename = b'dirstate'
134 134 self._pendingfilename = b'%s.pending' % self._filename
135 135 self._plchangecallbacks = {}
136 136 self._origpl = None
137 137 self._mapcls = dirstatemap.dirstatemap
138 138 # Access and cache cwd early, so we don't access it for the first time
139 139 # after a working-copy update caused it to not exist (accessing it then
140 140 # raises an exception).
141 141 self._cwd
142 142
143 143 def prefetch_parents(self):
144 144 """make sure the parents are loaded
145 145
146 146 Used to avoid a race condition.
147 147 """
148 148 self._pl
149 149
150 150 @contextlib.contextmanager
151 151 def parentchange(self):
152 152 """Context manager for handling dirstate parents.
153 153
154 154 If an exception occurs in the scope of the context manager,
155 155 the incoherent dirstate won't be written when wlock is
156 156 released.
157 157 """
158 158 self._parentwriters += 1
159 159 yield
160 160 # Typically we want the "undo" step of a context manager in a
161 161 # finally block so it happens even when an exception
162 162 # occurs. In this case, however, we only want to decrement
163 163 # parentwriters if the code in the with statement exits
164 164 # normally, so we don't have a try/finally here on purpose.
165 165 self._parentwriters -= 1
166 166
167 167 def pendingparentchange(self):
168 168 """Returns true if the dirstate is in the middle of a set of changes
169 169 that modify the dirstate parent.
170 170 """
171 171 return self._parentwriters > 0
172 172
173 173 @propertycache
174 174 def _map(self):
175 175 """Return the dirstate contents (see documentation for dirstatemap)."""
176 176 self._map = self._mapcls(
177 177 self._ui,
178 178 self._opener,
179 179 self._root,
180 180 self._nodeconstants,
181 181 self._use_dirstate_v2,
182 182 )
183 183 return self._map
184 184
185 185 @property
186 186 def _sparsematcher(self):
187 187 """The matcher for the sparse checkout.
188 188
189 189 The working directory may not include every file from a manifest. The
190 190 matcher obtained by this property will match a path if it is to be
191 191 included in the working directory.
192 192 """
193 193 # TODO there is potential to cache this property. For now, the matcher
194 194 # is resolved on every access. (But the called function does use a
195 195 # cache to keep the lookup fast.)
196 196 return self._sparsematchfn()
197 197
198 198 @repocache(b'branch')
199 199 def _branch(self):
200 200 try:
201 201 return self._opener.read(b"branch").strip() or b"default"
202 202 except IOError as inst:
203 203 if inst.errno != errno.ENOENT:
204 204 raise
205 205 return b"default"
206 206
207 207 @property
208 208 def _pl(self):
209 209 return self._map.parents()
210 210
211 211 def hasdir(self, d):
212 212 return self._map.hastrackeddir(d)
213 213
214 214 @rootcache(b'.hgignore')
215 215 def _ignore(self):
216 216 files = self._ignorefiles()
217 217 if not files:
218 218 return matchmod.never()
219 219
220 220 pats = [b'include:%s' % f for f in files]
221 221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
222 222
223 223 @propertycache
224 224 def _slash(self):
225 225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
226 226
227 227 @propertycache
228 228 def _checklink(self):
229 229 return util.checklink(self._root)
230 230
231 231 @propertycache
232 232 def _checkexec(self):
233 233 return bool(util.checkexec(self._root))
234 234
235 235 @propertycache
236 236 def _checkcase(self):
237 237 return not util.fscasesensitive(self._join(b'.hg'))
238 238
239 239 def _join(self, f):
240 240 # much faster than os.path.join()
241 241 # it's safe because f is always a relative path
242 242 return self._rootdir + f
243 243
244 244 def flagfunc(self, buildfallback):
245 245 """build a callable that returns flags associated with a filename
246 246
247 247 The information is extracted from three possible layers:
248 248 1. the file system if it supports the information
249 249 2. the "fallback" information stored in the dirstate if any
250 250 3. a more expensive mechanism inferring the flags from the parents.
251 251 """
252 252
253 253 # small hack to cache the result of buildfallback()
254 254 fallback_func = []
255 255
256 256 def get_flags(x):
257 257 entry = None
258 258 fallback_value = None
259 259 try:
260 260 st = os.lstat(self._join(x))
261 261 except OSError:
262 262 return b''
263 263
264 264 if self._checklink:
265 265 if util.statislink(st):
266 266 return b'l'
267 267 else:
268 268 entry = self.get_entry(x)
269 269 if entry.has_fallback_symlink:
270 270 if entry.fallback_symlink:
271 271 return b'l'
272 272 else:
273 273 if not fallback_func:
274 274 fallback_func.append(buildfallback())
275 275 fallback_value = fallback_func[0](x)
276 276 if b'l' in fallback_value:
277 277 return b'l'
278 278
279 279 if self._checkexec:
280 280 if util.statisexec(st):
281 281 return b'x'
282 282 else:
283 283 if entry is None:
284 284 entry = self.get_entry(x)
285 285 if entry.has_fallback_exec:
286 286 if entry.fallback_exec:
287 287 return b'x'
288 288 else:
289 289 if fallback_value is None:
290 290 if not fallback_func:
291 291 fallback_func.append(buildfallback())
292 292 fallback_value = fallback_func[0](x)
293 293 if b'x' in fallback_value:
294 294 return b'x'
295 295 return b''
296 296
297 297 return get_flags
298 298
299 299 @propertycache
300 300 def _cwd(self):
301 301 # internal config: ui.forcecwd
302 302 forcecwd = self._ui.config(b'ui', b'forcecwd')
303 303 if forcecwd:
304 304 return forcecwd
305 305 return encoding.getcwd()
306 306
307 307 def getcwd(self):
308 308 """Return the path from which a canonical path is calculated.
309 309
310 310 This path should be used to resolve file patterns or to convert
311 311 canonical paths back to file paths for display. It shouldn't be
312 312 used to get real file paths. Use vfs functions instead.
313 313 """
314 314 cwd = self._cwd
315 315 if cwd == self._root:
316 316 return b''
317 317 # self._root ends with a path separator if self._root is '/' or 'C:\'
318 318 rootsep = self._root
319 319 if not util.endswithsep(rootsep):
320 320 rootsep += pycompat.ossep
321 321 if cwd.startswith(rootsep):
322 322 return cwd[len(rootsep) :]
323 323 else:
324 324 # we're outside the repo. return an absolute path.
325 325 return cwd
326 326
327 327 def pathto(self, f, cwd=None):
328 328 if cwd is None:
329 329 cwd = self.getcwd()
330 330 path = util.pathto(self._root, cwd, f)
331 331 if self._slash:
332 332 return util.pconvert(path)
333 333 return path
334 334
335 335 def __getitem__(self, key):
336 336 """Return the current state of key (a filename) in the dirstate.
337 337
338 338 States are:
339 339 n normal
340 340 m needs merging
341 341 r marked for removal
342 342 a marked for addition
343 343 ? not tracked
344 344
345 345 XXX The "state" is a bit obscure to be in the "public" API. we should
346 346 consider migrating all user of this to going through the dirstate entry
347 347 instead.
348 348 """
349 349 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
350 350 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
351 351 entry = self._map.get(key)
352 352 if entry is not None:
353 353 return entry.state
354 354 return b'?'
355 355
356 356 def get_entry(self, path):
357 357 """return a DirstateItem for the associated path"""
358 358 entry = self._map.get(path)
359 359 if entry is None:
360 360 return DirstateItem()
361 361 return entry
362 362
363 363 def __contains__(self, key):
364 364 return key in self._map
365 365
366 366 def __iter__(self):
367 367 return iter(sorted(self._map))
368 368
369 369 def items(self):
370 370 return pycompat.iteritems(self._map)
371 371
372 372 iteritems = items
373 373
374 374 def parents(self):
375 375 return [self._validate(p) for p in self._pl]
376 376
377 377 def p1(self):
378 378 return self._validate(self._pl[0])
379 379
380 380 def p2(self):
381 381 return self._validate(self._pl[1])
382 382
383 383 @property
384 384 def in_merge(self):
385 385 """True if a merge is in progress"""
386 386 return self._pl[1] != self._nodeconstants.nullid
387 387
388 388 def branch(self):
389 389 return encoding.tolocal(self._branch)
390 390
391 391 def setparents(self, p1, p2=None):
392 392 """Set dirstate parents to p1 and p2.
393 393
394 394 When moving from two parents to one, "merged" entries a
395 395 adjusted to normal and previous copy records discarded and
396 396 returned by the call.
397 397
398 398 See localrepo.setparents()
399 399 """
400 400 if p2 is None:
401 401 p2 = self._nodeconstants.nullid
402 402 if self._parentwriters == 0:
403 403 raise ValueError(
404 404 b"cannot set dirstate parent outside of "
405 405 b"dirstate.parentchange context manager"
406 406 )
407 407
408 408 self._dirty = True
409 409 oldp2 = self._pl[1]
410 410 if self._origpl is None:
411 411 self._origpl = self._pl
412 412 nullid = self._nodeconstants.nullid
413 413 # True if we need to fold p2 related state back to a linear case
414 414 fold_p2 = oldp2 != nullid and p2 == nullid
415 415 return self._map.setparents(p1, p2, fold_p2=fold_p2)
416 416
417 417 def setbranch(self, branch):
418 418 self.__class__._branch.set(self, encoding.fromlocal(branch))
419 419 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
420 420 try:
421 421 f.write(self._branch + b'\n')
422 422 f.close()
423 423
424 424 # make sure filecache has the correct stat info for _branch after
425 425 # replacing the underlying file
426 426 ce = self._filecache[b'_branch']
427 427 if ce:
428 428 ce.refresh()
429 429 except: # re-raises
430 430 f.discard()
431 431 raise
432 432
433 433 def invalidate(self):
434 434 """Causes the next access to reread the dirstate.
435 435
436 436 This is different from localrepo.invalidatedirstate() because it always
437 437 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
438 438 check whether the dirstate has changed before rereading it."""
439 439
440 440 for a in ("_map", "_branch", "_ignore"):
441 441 if a in self.__dict__:
442 442 delattr(self, a)
443 443 self._lastnormaltime = timestamp.zero()
444 444 self._dirty = False
445 445 self._parentwriters = 0
446 446 self._origpl = None
447 447
448 448 def copy(self, source, dest):
449 449 """Mark dest as a copy of source. Unmark dest if source is None."""
450 450 if source == dest:
451 451 return
452 452 self._dirty = True
453 453 if source is not None:
454 454 self._map.copymap[dest] = source
455 455 else:
456 456 self._map.copymap.pop(dest, None)
457 457
458 458 def copied(self, file):
459 459 return self._map.copymap.get(file, None)
460 460
461 461 def copies(self):
462 462 return self._map.copymap
463 463
464 464 @requires_no_parents_change
465 465 def set_tracked(self, filename):
466 466 """a "public" method for generic code to mark a file as tracked
467 467
468 468 This function is to be called outside of "update/merge" case. For
469 469 example by a command like `hg add X`.
470 470
471 471 return True the file was previously untracked, False otherwise.
472 472 """
473 473 self._dirty = True
474 474 entry = self._map.get(filename)
475 475 if entry is None or not entry.tracked:
476 476 self._check_new_tracked_filename(filename)
477 477 return self._map.set_tracked(filename)
478 478
479 479 @requires_no_parents_change
480 480 def set_untracked(self, filename):
481 481 """a "public" method for generic code to mark a file as untracked
482 482
483 483 This function is to be called outside of "update/merge" case. For
484 484 example by a command like `hg remove X`.
485 485
486 486 return True the file was previously tracked, False otherwise.
487 487 """
488 488 ret = self._map.set_untracked(filename)
489 489 if ret:
490 490 self._dirty = True
491 491 return ret
492 492
493 493 @requires_no_parents_change
494 494 def set_clean(self, filename, parentfiledata=None):
495 495 """record that the current state of the file on disk is known to be clean"""
496 496 self._dirty = True
497 497 if parentfiledata:
498 498 (mode, size, mtime) = parentfiledata
499 499 else:
500 500 (mode, size, mtime) = self._get_filedata(filename)
501 501 if not self._map[filename].tracked:
502 502 self._check_new_tracked_filename(filename)
503 503 self._map.set_clean(filename, mode, size, mtime)
504 504 if mtime > self._lastnormaltime:
505 505 # Remember the most recent modification timeslot for status(),
506 506 # to make sure we won't miss future size-preserving file content
507 507 # modifications that happen within the same timeslot.
508 508 self._lastnormaltime = mtime
509 509
510 510 @requires_no_parents_change
511 511 def set_possibly_dirty(self, filename):
512 512 """record that the current state of the file on disk is unknown"""
513 513 self._dirty = True
514 514 self._map.set_possibly_dirty(filename)
515 515
516 516 @requires_parents_change
517 517 def update_file_p1(
518 518 self,
519 519 filename,
520 520 p1_tracked,
521 521 ):
522 522 """Set a file as tracked in the parent (or not)
523 523
524 524 This is to be called when adjust the dirstate to a new parent after an history
525 525 rewriting operation.
526 526
527 527 It should not be called during a merge (p2 != nullid) and only within
528 528 a `with dirstate.parentchange():` context.
529 529 """
530 530 if self.in_merge:
531 531 msg = b'update_file_reference should not be called when merging'
532 532 raise error.ProgrammingError(msg)
533 533 entry = self._map.get(filename)
534 534 if entry is None:
535 535 wc_tracked = False
536 536 else:
537 537 wc_tracked = entry.tracked
538 538 if not (p1_tracked or wc_tracked):
539 539 # the file is no longer relevant to anyone
540 540 if self._map.get(filename) is not None:
541 541 self._map.reset_state(filename)
542 542 self._dirty = True
543 543 elif (not p1_tracked) and wc_tracked:
544 544 if entry is not None and entry.added:
545 545 return # avoid dropping copy information (maybe?)
546 546
547 547 parentfiledata = None
548 548 if wc_tracked and p1_tracked:
549 549 parentfiledata = self._get_filedata(filename)
550 550
551 551 self._map.reset_state(
552 552 filename,
553 553 wc_tracked,
554 554 p1_tracked,
555 555 # the underlying reference might have changed, we will have to
556 556 # check it.
557 557 has_meaningful_mtime=False,
558 558 parentfiledata=parentfiledata,
559 559 )
560 560 if (
561 561 parentfiledata is not None
562 562 and parentfiledata[2] > self._lastnormaltime
563 563 ):
564 564 # Remember the most recent modification timeslot for status(),
565 565 # to make sure we won't miss future size-preserving file content
566 566 # modifications that happen within the same timeslot.
567 567 self._lastnormaltime = parentfiledata[2]
568 568
569 569 @requires_parents_change
570 570 def update_file(
571 571 self,
572 572 filename,
573 573 wc_tracked,
574 574 p1_tracked,
575 575 p2_info=False,
576 576 possibly_dirty=False,
577 577 parentfiledata=None,
578 578 ):
579 579 """update the information about a file in the dirstate
580 580
581 581 This is to be called when the direstates parent changes to keep track
582 582 of what is the file situation in regards to the working copy and its parent.
583 583
584 584 This function must be called within a `dirstate.parentchange` context.
585 585
586 586 note: the API is at an early stage and we might need to adjust it
587 587 depending of what information ends up being relevant and useful to
588 588 other processing.
589 589 """
590 590
591 591 # note: I do not think we need to double check name clash here since we
592 592 # are in a update/merge case that should already have taken care of
593 593 # this. The test agrees
594 594
595 595 self._dirty = True
596 596
597 597 need_parent_file_data = (
598 598 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
599 599 )
600 600
601 601 if need_parent_file_data and parentfiledata is None:
602 602 parentfiledata = self._get_filedata(filename)
603 603
604 604 self._map.reset_state(
605 605 filename,
606 606 wc_tracked,
607 607 p1_tracked,
608 608 p2_info=p2_info,
609 609 has_meaningful_mtime=not possibly_dirty,
610 610 parentfiledata=parentfiledata,
611 611 )
612 612 if (
613 613 parentfiledata is not None
614 and parentfiledata[2] is not None
614 615 and parentfiledata[2] > self._lastnormaltime
615 616 ):
616 617 # Remember the most recent modification timeslot for status(),
617 618 # to make sure we won't miss future size-preserving file content
618 619 # modifications that happen within the same timeslot.
619 620 self._lastnormaltime = parentfiledata[2]
620 621
621 622 def _check_new_tracked_filename(self, filename):
622 623 scmutil.checkfilename(filename)
623 624 if self._map.hastrackeddir(filename):
624 625 msg = _(b'directory %r already in dirstate')
625 626 msg %= pycompat.bytestr(filename)
626 627 raise error.Abort(msg)
627 628 # shadows
628 629 for d in pathutil.finddirs(filename):
629 630 if self._map.hastrackeddir(d):
630 631 break
631 632 entry = self._map.get(d)
632 633 if entry is not None and not entry.removed:
633 634 msg = _(b'file %r in dirstate clashes with %r')
634 635 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
635 636 raise error.Abort(msg)
636 637
637 638 def _get_filedata(self, filename):
638 639 """returns"""
639 640 s = os.lstat(self._join(filename))
640 641 mode = s.st_mode
641 642 size = s.st_size
642 643 mtime = timestamp.mtime_of(s)
643 644 return (mode, size, mtime)
644 645
645 646 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
646 647 if exists is None:
647 648 exists = os.path.lexists(os.path.join(self._root, path))
648 649 if not exists:
649 650 # Maybe a path component exists
650 651 if not ignoremissing and b'/' in path:
651 652 d, f = path.rsplit(b'/', 1)
652 653 d = self._normalize(d, False, ignoremissing, None)
653 654 folded = d + b"/" + f
654 655 else:
655 656 # No path components, preserve original case
656 657 folded = path
657 658 else:
658 659 # recursively normalize leading directory components
659 660 # against dirstate
660 661 if b'/' in normed:
661 662 d, f = normed.rsplit(b'/', 1)
662 663 d = self._normalize(d, False, ignoremissing, True)
663 664 r = self._root + b"/" + d
664 665 folded = d + b"/" + util.fspath(f, r)
665 666 else:
666 667 folded = util.fspath(normed, self._root)
667 668 storemap[normed] = folded
668 669
669 670 return folded
670 671
671 672 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
672 673 normed = util.normcase(path)
673 674 folded = self._map.filefoldmap.get(normed, None)
674 675 if folded is None:
675 676 if isknown:
676 677 folded = path
677 678 else:
678 679 folded = self._discoverpath(
679 680 path, normed, ignoremissing, exists, self._map.filefoldmap
680 681 )
681 682 return folded
682 683
683 684 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
684 685 normed = util.normcase(path)
685 686 folded = self._map.filefoldmap.get(normed, None)
686 687 if folded is None:
687 688 folded = self._map.dirfoldmap.get(normed, None)
688 689 if folded is None:
689 690 if isknown:
690 691 folded = path
691 692 else:
692 693 # store discovered result in dirfoldmap so that future
693 694 # normalizefile calls don't start matching directories
694 695 folded = self._discoverpath(
695 696 path, normed, ignoremissing, exists, self._map.dirfoldmap
696 697 )
697 698 return folded
698 699
699 700 def normalize(self, path, isknown=False, ignoremissing=False):
700 701 """
701 702 normalize the case of a pathname when on a casefolding filesystem
702 703
703 704 isknown specifies whether the filename came from walking the
704 705 disk, to avoid extra filesystem access.
705 706
706 707 If ignoremissing is True, missing path are returned
707 708 unchanged. Otherwise, we try harder to normalize possibly
708 709 existing path components.
709 710
710 711 The normalized case is determined based on the following precedence:
711 712
712 713 - version of name already stored in the dirstate
713 714 - version of name stored on disk
714 715 - version provided via command arguments
715 716 """
716 717
717 718 if self._checkcase:
718 719 return self._normalize(path, isknown, ignoremissing)
719 720 return path
720 721
721 722 def clear(self):
722 723 self._map.clear()
723 724 self._lastnormaltime = timestamp.zero()
724 725 self._dirty = True
725 726
726 727 def rebuild(self, parent, allfiles, changedfiles=None):
727 728 if changedfiles is None:
728 729 # Rebuild entire dirstate
729 730 to_lookup = allfiles
730 731 to_drop = []
731 732 lastnormaltime = self._lastnormaltime
732 733 self.clear()
733 734 self._lastnormaltime = lastnormaltime
734 735 elif len(changedfiles) < 10:
735 736 # Avoid turning allfiles into a set, which can be expensive if it's
736 737 # large.
737 738 to_lookup = []
738 739 to_drop = []
739 740 for f in changedfiles:
740 741 if f in allfiles:
741 742 to_lookup.append(f)
742 743 else:
743 744 to_drop.append(f)
744 745 else:
745 746 changedfilesset = set(changedfiles)
746 747 to_lookup = changedfilesset & set(allfiles)
747 748 to_drop = changedfilesset - to_lookup
748 749
749 750 if self._origpl is None:
750 751 self._origpl = self._pl
751 752 self._map.setparents(parent, self._nodeconstants.nullid)
752 753
753 754 for f in to_lookup:
754 755
755 756 if self.in_merge:
756 757 self.set_tracked(f)
757 758 else:
758 759 self._map.reset_state(
759 760 f,
760 761 wc_tracked=True,
761 762 p1_tracked=True,
762 763 )
763 764 for f in to_drop:
764 765 self._map.reset_state(f)
765 766
766 767 self._dirty = True
767 768
768 769 def identity(self):
769 770 """Return identity of dirstate itself to detect changing in storage
770 771
771 772 If identity of previous dirstate is equal to this, writing
772 773 changes based on the former dirstate out can keep consistency.
773 774 """
774 775 return self._map.identity
775 776
776 777 def write(self, tr):
777 778 if not self._dirty:
778 779 return
779 780
780 781 filename = self._filename
781 782 if tr:
782 783 # 'dirstate.write()' is not only for writing in-memory
783 784 # changes out, but also for dropping ambiguous timestamp.
784 785 # delayed writing re-raise "ambiguous timestamp issue".
785 786 # See also the wiki page below for detail:
786 787 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
787 788
788 789 # record when mtime start to be ambiguous
789 790 now = _getfsnow(self._opener)
790 791
791 792 # delay writing in-memory changes out
792 793 tr.addfilegenerator(
793 794 b'dirstate',
794 795 (self._filename,),
795 796 lambda f: self._writedirstate(tr, f, now=now),
796 797 location=b'plain',
797 798 )
798 799 return
799 800
800 801 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
801 802 self._writedirstate(tr, st)
802 803
803 804 def addparentchangecallback(self, category, callback):
804 805 """add a callback to be called when the wd parents are changed
805 806
806 807 Callback will be called with the following arguments:
807 808 dirstate, (oldp1, oldp2), (newp1, newp2)
808 809
809 810 Category is a unique identifier to allow overwriting an old callback
810 811 with a newer callback.
811 812 """
812 813 self._plchangecallbacks[category] = callback
813 814
814 815 def _writedirstate(self, tr, st, now=None):
815 816 # notify callbacks about parents change
816 817 if self._origpl is not None and self._origpl != self._pl:
817 818 for c, callback in sorted(
818 819 pycompat.iteritems(self._plchangecallbacks)
819 820 ):
820 821 callback(self, self._origpl, self._pl)
821 822 self._origpl = None
822 823
823 824 if now is None:
824 825 # use the modification time of the newly created temporary file as the
825 826 # filesystem's notion of 'now'
826 827 now = timestamp.mtime_of(util.fstat(st))
827 828
828 829 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
829 830 # timestamp of each entries in dirstate, because of 'now > mtime'
830 831 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
831 832 if delaywrite > 0:
832 833 # do we have any files to delay for?
833 834 for f, e in pycompat.iteritems(self._map):
834 835 if e.need_delay(now):
835 836 import time # to avoid useless import
836 837
837 838 # rather than sleep n seconds, sleep until the next
838 839 # multiple of n seconds
839 840 clock = time.time()
840 841 start = int(clock) - (int(clock) % delaywrite)
841 842 end = start + delaywrite
842 843 time.sleep(end - clock)
843 844 # trust our estimate that the end is near now
844 845 now = timestamp.timestamp((end, 0))
845 846 break
846 847
847 848 self._map.write(tr, st, now)
848 849 self._lastnormaltime = timestamp.zero()
849 850 self._dirty = False
850 851
851 852 def _dirignore(self, f):
852 853 if self._ignore(f):
853 854 return True
854 855 for p in pathutil.finddirs(f):
855 856 if self._ignore(p):
856 857 return True
857 858 return False
858 859
859 860 def _ignorefiles(self):
860 861 files = []
861 862 if os.path.exists(self._join(b'.hgignore')):
862 863 files.append(self._join(b'.hgignore'))
863 864 for name, path in self._ui.configitems(b"ui"):
864 865 if name == b'ignore' or name.startswith(b'ignore.'):
865 866 # we need to use os.path.join here rather than self._join
866 867 # because path is arbitrary and user-specified
867 868 files.append(os.path.join(self._rootdir, util.expandpath(path)))
868 869 return files
869 870
870 871 def _ignorefileandline(self, f):
871 872 files = collections.deque(self._ignorefiles())
872 873 visited = set()
873 874 while files:
874 875 i = files.popleft()
875 876 patterns = matchmod.readpatternfile(
876 877 i, self._ui.warn, sourceinfo=True
877 878 )
878 879 for pattern, lineno, line in patterns:
879 880 kind, p = matchmod._patsplit(pattern, b'glob')
880 881 if kind == b"subinclude":
881 882 if p not in visited:
882 883 files.append(p)
883 884 continue
884 885 m = matchmod.match(
885 886 self._root, b'', [], [pattern], warn=self._ui.warn
886 887 )
887 888 if m(f):
888 889 return (i, lineno, line)
889 890 visited.add(i)
890 891 return (None, -1, b"")
891 892
892 893 def _walkexplicit(self, match, subrepos):
893 894 """Get stat data about the files explicitly specified by match.
894 895
895 896 Return a triple (results, dirsfound, dirsnotfound).
896 897 - results is a mapping from filename to stat result. It also contains
897 898 listings mapping subrepos and .hg to None.
898 899 - dirsfound is a list of files found to be directories.
899 900 - dirsnotfound is a list of files that the dirstate thinks are
900 901 directories and that were not found."""
901 902
902 903 def badtype(mode):
903 904 kind = _(b'unknown')
904 905 if stat.S_ISCHR(mode):
905 906 kind = _(b'character device')
906 907 elif stat.S_ISBLK(mode):
907 908 kind = _(b'block device')
908 909 elif stat.S_ISFIFO(mode):
909 910 kind = _(b'fifo')
910 911 elif stat.S_ISSOCK(mode):
911 912 kind = _(b'socket')
912 913 elif stat.S_ISDIR(mode):
913 914 kind = _(b'directory')
914 915 return _(b'unsupported file type (type is %s)') % kind
915 916
916 917 badfn = match.bad
917 918 dmap = self._map
918 919 lstat = os.lstat
919 920 getkind = stat.S_IFMT
920 921 dirkind = stat.S_IFDIR
921 922 regkind = stat.S_IFREG
922 923 lnkkind = stat.S_IFLNK
923 924 join = self._join
924 925 dirsfound = []
925 926 foundadd = dirsfound.append
926 927 dirsnotfound = []
927 928 notfoundadd = dirsnotfound.append
928 929
929 930 if not match.isexact() and self._checkcase:
930 931 normalize = self._normalize
931 932 else:
932 933 normalize = None
933 934
934 935 files = sorted(match.files())
935 936 subrepos.sort()
936 937 i, j = 0, 0
937 938 while i < len(files) and j < len(subrepos):
938 939 subpath = subrepos[j] + b"/"
939 940 if files[i] < subpath:
940 941 i += 1
941 942 continue
942 943 while i < len(files) and files[i].startswith(subpath):
943 944 del files[i]
944 945 j += 1
945 946
946 947 if not files or b'' in files:
947 948 files = [b'']
948 949 # constructing the foldmap is expensive, so don't do it for the
949 950 # common case where files is ['']
950 951 normalize = None
951 952 results = dict.fromkeys(subrepos)
952 953 results[b'.hg'] = None
953 954
954 955 for ff in files:
955 956 if normalize:
956 957 nf = normalize(ff, False, True)
957 958 else:
958 959 nf = ff
959 960 if nf in results:
960 961 continue
961 962
962 963 try:
963 964 st = lstat(join(nf))
964 965 kind = getkind(st.st_mode)
965 966 if kind == dirkind:
966 967 if nf in dmap:
967 968 # file replaced by dir on disk but still in dirstate
968 969 results[nf] = None
969 970 foundadd((nf, ff))
970 971 elif kind == regkind or kind == lnkkind:
971 972 results[nf] = st
972 973 else:
973 974 badfn(ff, badtype(kind))
974 975 if nf in dmap:
975 976 results[nf] = None
976 977 except OSError as inst: # nf not found on disk - it is dirstate only
977 978 if nf in dmap: # does it exactly match a missing file?
978 979 results[nf] = None
979 980 else: # does it match a missing directory?
980 981 if self._map.hasdir(nf):
981 982 notfoundadd(nf)
982 983 else:
983 984 badfn(ff, encoding.strtolocal(inst.strerror))
984 985
985 986 # match.files() may contain explicitly-specified paths that shouldn't
986 987 # be taken; drop them from the list of files found. dirsfound/notfound
987 988 # aren't filtered here because they will be tested later.
988 989 if match.anypats():
989 990 for f in list(results):
990 991 if f == b'.hg' or f in subrepos:
991 992 # keep sentinel to disable further out-of-repo walks
992 993 continue
993 994 if not match(f):
994 995 del results[f]
995 996
996 997 # Case insensitive filesystems cannot rely on lstat() failing to detect
997 998 # a case-only rename. Prune the stat object for any file that does not
998 999 # match the case in the filesystem, if there are multiple files that
999 1000 # normalize to the same path.
1000 1001 if match.isexact() and self._checkcase:
1001 1002 normed = {}
1002 1003
1003 1004 for f, st in pycompat.iteritems(results):
1004 1005 if st is None:
1005 1006 continue
1006 1007
1007 1008 nc = util.normcase(f)
1008 1009 paths = normed.get(nc)
1009 1010
1010 1011 if paths is None:
1011 1012 paths = set()
1012 1013 normed[nc] = paths
1013 1014
1014 1015 paths.add(f)
1015 1016
1016 1017 for norm, paths in pycompat.iteritems(normed):
1017 1018 if len(paths) > 1:
1018 1019 for path in paths:
1019 1020 folded = self._discoverpath(
1020 1021 path, norm, True, None, self._map.dirfoldmap
1021 1022 )
1022 1023 if path != folded:
1023 1024 results[path] = None
1024 1025
1025 1026 return results, dirsfound, dirsnotfound
1026 1027
1027 1028 def walk(self, match, subrepos, unknown, ignored, full=True):
1028 1029 """
1029 1030 Walk recursively through the directory tree, finding all files
1030 1031 matched by match.
1031 1032
1032 1033 If full is False, maybe skip some known-clean files.
1033 1034
1034 1035 Return a dict mapping filename to stat-like object (either
1035 1036 mercurial.osutil.stat instance or return value of os.stat()).
1036 1037
1037 1038 """
1038 1039 # full is a flag that extensions that hook into walk can use -- this
1039 1040 # implementation doesn't use it at all. This satisfies the contract
1040 1041 # because we only guarantee a "maybe".
1041 1042
1042 1043 if ignored:
1043 1044 ignore = util.never
1044 1045 dirignore = util.never
1045 1046 elif unknown:
1046 1047 ignore = self._ignore
1047 1048 dirignore = self._dirignore
1048 1049 else:
1049 1050 # if not unknown and not ignored, drop dir recursion and step 2
1050 1051 ignore = util.always
1051 1052 dirignore = util.always
1052 1053
1053 1054 matchfn = match.matchfn
1054 1055 matchalways = match.always()
1055 1056 matchtdir = match.traversedir
1056 1057 dmap = self._map
1057 1058 listdir = util.listdir
1058 1059 lstat = os.lstat
1059 1060 dirkind = stat.S_IFDIR
1060 1061 regkind = stat.S_IFREG
1061 1062 lnkkind = stat.S_IFLNK
1062 1063 join = self._join
1063 1064
1064 1065 exact = skipstep3 = False
1065 1066 if match.isexact(): # match.exact
1066 1067 exact = True
1067 1068 dirignore = util.always # skip step 2
1068 1069 elif match.prefix(): # match.match, no patterns
1069 1070 skipstep3 = True
1070 1071
1071 1072 if not exact and self._checkcase:
1072 1073 normalize = self._normalize
1073 1074 normalizefile = self._normalizefile
1074 1075 skipstep3 = False
1075 1076 else:
1076 1077 normalize = self._normalize
1077 1078 normalizefile = None
1078 1079
1079 1080 # step 1: find all explicit files
1080 1081 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1081 1082 if matchtdir:
1082 1083 for d in work:
1083 1084 matchtdir(d[0])
1084 1085 for d in dirsnotfound:
1085 1086 matchtdir(d)
1086 1087
1087 1088 skipstep3 = skipstep3 and not (work or dirsnotfound)
1088 1089 work = [d for d in work if not dirignore(d[0])]
1089 1090
1090 1091 # step 2: visit subdirectories
1091 1092 def traverse(work, alreadynormed):
1092 1093 wadd = work.append
1093 1094 while work:
1094 1095 tracing.counter('dirstate.walk work', len(work))
1095 1096 nd = work.pop()
1096 1097 visitentries = match.visitchildrenset(nd)
1097 1098 if not visitentries:
1098 1099 continue
1099 1100 if visitentries == b'this' or visitentries == b'all':
1100 1101 visitentries = None
1101 1102 skip = None
1102 1103 if nd != b'':
1103 1104 skip = b'.hg'
1104 1105 try:
1105 1106 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1106 1107 entries = listdir(join(nd), stat=True, skip=skip)
1107 1108 except OSError as inst:
1108 1109 if inst.errno in (errno.EACCES, errno.ENOENT):
1109 1110 match.bad(
1110 1111 self.pathto(nd), encoding.strtolocal(inst.strerror)
1111 1112 )
1112 1113 continue
1113 1114 raise
1114 1115 for f, kind, st in entries:
1115 1116 # Some matchers may return files in the visitentries set,
1116 1117 # instead of 'this', if the matcher explicitly mentions them
1117 1118 # and is not an exactmatcher. This is acceptable; we do not
1118 1119 # make any hard assumptions about file-or-directory below
1119 1120 # based on the presence of `f` in visitentries. If
1120 1121 # visitchildrenset returned a set, we can always skip the
1121 1122 # entries *not* in the set it provided regardless of whether
1122 1123 # they're actually a file or a directory.
1123 1124 if visitentries and f not in visitentries:
1124 1125 continue
1125 1126 if normalizefile:
1126 1127 # even though f might be a directory, we're only
1127 1128 # interested in comparing it to files currently in the
1128 1129 # dmap -- therefore normalizefile is enough
1129 1130 nf = normalizefile(
1130 1131 nd and (nd + b"/" + f) or f, True, True
1131 1132 )
1132 1133 else:
1133 1134 nf = nd and (nd + b"/" + f) or f
1134 1135 if nf not in results:
1135 1136 if kind == dirkind:
1136 1137 if not ignore(nf):
1137 1138 if matchtdir:
1138 1139 matchtdir(nf)
1139 1140 wadd(nf)
1140 1141 if nf in dmap and (matchalways or matchfn(nf)):
1141 1142 results[nf] = None
1142 1143 elif kind == regkind or kind == lnkkind:
1143 1144 if nf in dmap:
1144 1145 if matchalways or matchfn(nf):
1145 1146 results[nf] = st
1146 1147 elif (matchalways or matchfn(nf)) and not ignore(
1147 1148 nf
1148 1149 ):
1149 1150 # unknown file -- normalize if necessary
1150 1151 if not alreadynormed:
1151 1152 nf = normalize(nf, False, True)
1152 1153 results[nf] = st
1153 1154 elif nf in dmap and (matchalways or matchfn(nf)):
1154 1155 results[nf] = None
1155 1156
1156 1157 for nd, d in work:
1157 1158 # alreadynormed means that processwork doesn't have to do any
1158 1159 # expensive directory normalization
1159 1160 alreadynormed = not normalize or nd == d
1160 1161 traverse([d], alreadynormed)
1161 1162
1162 1163 for s in subrepos:
1163 1164 del results[s]
1164 1165 del results[b'.hg']
1165 1166
1166 1167 # step 3: visit remaining files from dmap
1167 1168 if not skipstep3 and not exact:
1168 1169 # If a dmap file is not in results yet, it was either
1169 1170 # a) not matching matchfn b) ignored, c) missing, or d) under a
1170 1171 # symlink directory.
1171 1172 if not results and matchalways:
1172 1173 visit = [f for f in dmap]
1173 1174 else:
1174 1175 visit = [f for f in dmap if f not in results and matchfn(f)]
1175 1176 visit.sort()
1176 1177
1177 1178 if unknown:
1178 1179 # unknown == True means we walked all dirs under the roots
1179 1180 # that wasn't ignored, and everything that matched was stat'ed
1180 1181 # and is already in results.
1181 1182 # The rest must thus be ignored or under a symlink.
1182 1183 audit_path = pathutil.pathauditor(self._root, cached=True)
1183 1184
1184 1185 for nf in iter(visit):
1185 1186 # If a stat for the same file was already added with a
1186 1187 # different case, don't add one for this, since that would
1187 1188 # make it appear as if the file exists under both names
1188 1189 # on disk.
1189 1190 if (
1190 1191 normalizefile
1191 1192 and normalizefile(nf, True, True) in results
1192 1193 ):
1193 1194 results[nf] = None
1194 1195 # Report ignored items in the dmap as long as they are not
1195 1196 # under a symlink directory.
1196 1197 elif audit_path.check(nf):
1197 1198 try:
1198 1199 results[nf] = lstat(join(nf))
1199 1200 # file was just ignored, no links, and exists
1200 1201 except OSError:
1201 1202 # file doesn't exist
1202 1203 results[nf] = None
1203 1204 else:
1204 1205 # It's either missing or under a symlink directory
1205 1206 # which we in this case report as missing
1206 1207 results[nf] = None
1207 1208 else:
1208 1209 # We may not have walked the full directory tree above,
1209 1210 # so stat and check everything we missed.
1210 1211 iv = iter(visit)
1211 1212 for st in util.statfiles([join(i) for i in visit]):
1212 1213 results[next(iv)] = st
1213 1214 return results
1214 1215
1215 1216 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1216 1217 # Force Rayon (Rust parallelism library) to respect the number of
1217 1218 # workers. This is a temporary workaround until Rust code knows
1218 1219 # how to read the config file.
1219 1220 numcpus = self._ui.configint(b"worker", b"numcpus")
1220 1221 if numcpus is not None:
1221 1222 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1222 1223
1223 1224 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1224 1225 if not workers_enabled:
1225 1226 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1226 1227
1227 1228 (
1228 1229 lookup,
1229 1230 modified,
1230 1231 added,
1231 1232 removed,
1232 1233 deleted,
1233 1234 clean,
1234 1235 ignored,
1235 1236 unknown,
1236 1237 warnings,
1237 1238 bad,
1238 1239 traversed,
1239 1240 dirty,
1240 1241 ) = rustmod.status(
1241 1242 self._map._map,
1242 1243 matcher,
1243 1244 self._rootdir,
1244 1245 self._ignorefiles(),
1245 1246 self._checkexec,
1246 1247 self._lastnormaltime,
1247 1248 bool(list_clean),
1248 1249 bool(list_ignored),
1249 1250 bool(list_unknown),
1250 1251 bool(matcher.traversedir),
1251 1252 )
1252 1253
1253 1254 self._dirty |= dirty
1254 1255
1255 1256 if matcher.traversedir:
1256 1257 for dir in traversed:
1257 1258 matcher.traversedir(dir)
1258 1259
1259 1260 if self._ui.warn:
1260 1261 for item in warnings:
1261 1262 if isinstance(item, tuple):
1262 1263 file_path, syntax = item
1263 1264 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1264 1265 file_path,
1265 1266 syntax,
1266 1267 )
1267 1268 self._ui.warn(msg)
1268 1269 else:
1269 1270 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1270 1271 self._ui.warn(
1271 1272 msg
1272 1273 % (
1273 1274 pathutil.canonpath(
1274 1275 self._rootdir, self._rootdir, item
1275 1276 ),
1276 1277 b"No such file or directory",
1277 1278 )
1278 1279 )
1279 1280
1280 1281 for (fn, message) in bad:
1281 1282 matcher.bad(fn, encoding.strtolocal(message))
1282 1283
1283 1284 status = scmutil.status(
1284 1285 modified=modified,
1285 1286 added=added,
1286 1287 removed=removed,
1287 1288 deleted=deleted,
1288 1289 unknown=unknown,
1289 1290 ignored=ignored,
1290 1291 clean=clean,
1291 1292 )
1292 1293 return (lookup, status)
1293 1294
1294 1295 def status(self, match, subrepos, ignored, clean, unknown):
1295 1296 """Determine the status of the working copy relative to the
1296 1297 dirstate and return a pair of (unsure, status), where status is of type
1297 1298 scmutil.status and:
1298 1299
1299 1300 unsure:
1300 1301 files that might have been modified since the dirstate was
1301 1302 written, but need to be read to be sure (size is the same
1302 1303 but mtime differs)
1303 1304 status.modified:
1304 1305 files that have definitely been modified since the dirstate
1305 1306 was written (different size or mode)
1306 1307 status.clean:
1307 1308 files that have definitely not been modified since the
1308 1309 dirstate was written
1309 1310 """
1310 1311 listignored, listclean, listunknown = ignored, clean, unknown
1311 1312 lookup, modified, added, unknown, ignored = [], [], [], [], []
1312 1313 removed, deleted, clean = [], [], []
1313 1314
1314 1315 dmap = self._map
1315 1316 dmap.preload()
1316 1317
1317 1318 use_rust = True
1318 1319
1319 1320 allowed_matchers = (
1320 1321 matchmod.alwaysmatcher,
1321 1322 matchmod.exactmatcher,
1322 1323 matchmod.includematcher,
1323 1324 )
1324 1325
1325 1326 if rustmod is None:
1326 1327 use_rust = False
1327 1328 elif self._checkcase:
1328 1329 # Case-insensitive filesystems are not handled yet
1329 1330 use_rust = False
1330 1331 elif subrepos:
1331 1332 use_rust = False
1332 1333 elif sparse.enabled:
1333 1334 use_rust = False
1334 1335 elif not isinstance(match, allowed_matchers):
1335 1336 # Some matchers have yet to be implemented
1336 1337 use_rust = False
1337 1338
1338 1339 if use_rust:
1339 1340 try:
1340 1341 return self._rust_status(
1341 1342 match, listclean, listignored, listunknown
1342 1343 )
1343 1344 except rustmod.FallbackError:
1344 1345 pass
1345 1346
1346 1347 def noop(f):
1347 1348 pass
1348 1349
1349 1350 dcontains = dmap.__contains__
1350 1351 dget = dmap.__getitem__
1351 1352 ladd = lookup.append # aka "unsure"
1352 1353 madd = modified.append
1353 1354 aadd = added.append
1354 1355 uadd = unknown.append if listunknown else noop
1355 1356 iadd = ignored.append if listignored else noop
1356 1357 radd = removed.append
1357 1358 dadd = deleted.append
1358 1359 cadd = clean.append if listclean else noop
1359 1360 mexact = match.exact
1360 1361 dirignore = self._dirignore
1361 1362 checkexec = self._checkexec
1362 1363 checklink = self._checklink
1363 1364 copymap = self._map.copymap
1364 1365 lastnormaltime = self._lastnormaltime
1365 1366
1366 1367 # We need to do full walks when either
1367 1368 # - we're listing all clean files, or
1368 1369 # - match.traversedir does something, because match.traversedir should
1369 1370 # be called for every dir in the working dir
1370 1371 full = listclean or match.traversedir is not None
1371 1372 for fn, st in pycompat.iteritems(
1372 1373 self.walk(match, subrepos, listunknown, listignored, full=full)
1373 1374 ):
1374 1375 if not dcontains(fn):
1375 1376 if (listignored or mexact(fn)) and dirignore(fn):
1376 1377 if listignored:
1377 1378 iadd(fn)
1378 1379 else:
1379 1380 uadd(fn)
1380 1381 continue
1381 1382
1382 1383 t = dget(fn)
1383 1384 mode = t.mode
1384 1385 size = t.size
1385 1386
1386 1387 if not st and t.tracked:
1387 1388 dadd(fn)
1388 1389 elif t.p2_info:
1389 1390 madd(fn)
1390 1391 elif t.added:
1391 1392 aadd(fn)
1392 1393 elif t.removed:
1393 1394 radd(fn)
1394 1395 elif t.tracked:
1395 1396 if not checklink and t.has_fallback_symlink:
1396 1397 # If the file system does not support symlink, the mode
1397 1398 # might not be correctly stored in the dirstate, so do not
1398 1399 # trust it.
1399 1400 ladd(fn)
1400 1401 elif not checkexec and t.has_fallback_exec:
1401 1402 # If the file system does not support exec bits, the mode
1402 1403 # might not be correctly stored in the dirstate, so do not
1403 1404 # trust it.
1404 1405 ladd(fn)
1405 1406 elif (
1406 1407 size >= 0
1407 1408 and (
1408 1409 (size != st.st_size and size != st.st_size & _rangemask)
1409 1410 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1410 1411 )
1411 1412 or fn in copymap
1412 1413 ):
1413 1414 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1414 1415 # issue6456: Size returned may be longer due to
1415 1416 # encryption on EXT-4 fscrypt, undecided.
1416 1417 ladd(fn)
1417 1418 else:
1418 1419 madd(fn)
1419 1420 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1420 1421 ladd(fn)
1421 1422 elif timestamp.mtime_of(st) == lastnormaltime:
1422 1423 # fn may have just been marked as normal and it may have
1423 1424 # changed in the same second without changing its size.
1424 1425 # This can happen if we quickly do multiple commits.
1425 1426 # Force lookup, so we don't miss such a racy file change.
1426 1427 ladd(fn)
1427 1428 elif listclean:
1428 1429 cadd(fn)
1429 1430 status = scmutil.status(
1430 1431 modified, added, removed, deleted, unknown, ignored, clean
1431 1432 )
1432 1433 return (lookup, status)
1433 1434
1434 1435 def matches(self, match):
1435 1436 """
1436 1437 return files in the dirstate (in whatever state) filtered by match
1437 1438 """
1438 1439 dmap = self._map
1439 1440 if rustmod is not None:
1440 1441 dmap = self._map._map
1441 1442
1442 1443 if match.always():
1443 1444 return dmap.keys()
1444 1445 files = match.files()
1445 1446 if match.isexact():
1446 1447 # fast path -- filter the other way around, since typically files is
1447 1448 # much smaller than dmap
1448 1449 return [f for f in files if f in dmap]
1449 1450 if match.prefix() and all(fn in dmap for fn in files):
1450 1451 # fast path -- all the values are known to be files, so just return
1451 1452 # that
1452 1453 return list(files)
1453 1454 return [f for f in dmap if match(f)]
1454 1455
1455 1456 def _actualfilename(self, tr):
1456 1457 if tr:
1457 1458 return self._pendingfilename
1458 1459 else:
1459 1460 return self._filename
1460 1461
1461 1462 def savebackup(self, tr, backupname):
1462 1463 '''Save current dirstate into backup file'''
1463 1464 filename = self._actualfilename(tr)
1464 1465 assert backupname != filename
1465 1466
1466 1467 # use '_writedirstate' instead of 'write' to write changes certainly,
1467 1468 # because the latter omits writing out if transaction is running.
1468 1469 # output file will be used to create backup of dirstate at this point.
1469 1470 if self._dirty or not self._opener.exists(filename):
1470 1471 self._writedirstate(
1471 1472 tr,
1472 1473 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1473 1474 )
1474 1475
1475 1476 if tr:
1476 1477 # ensure that subsequent tr.writepending returns True for
1477 1478 # changes written out above, even if dirstate is never
1478 1479 # changed after this
1479 1480 tr.addfilegenerator(
1480 1481 b'dirstate',
1481 1482 (self._filename,),
1482 1483 lambda f: self._writedirstate(tr, f),
1483 1484 location=b'plain',
1484 1485 )
1485 1486
1486 1487 # ensure that pending file written above is unlinked at
1487 1488 # failure, even if tr.writepending isn't invoked until the
1488 1489 # end of this transaction
1489 1490 tr.registertmp(filename, location=b'plain')
1490 1491
1491 1492 self._opener.tryunlink(backupname)
1492 1493 # hardlink backup is okay because _writedirstate is always called
1493 1494 # with an "atomictemp=True" file.
1494 1495 util.copyfile(
1495 1496 self._opener.join(filename),
1496 1497 self._opener.join(backupname),
1497 1498 hardlink=True,
1498 1499 )
1499 1500
1500 1501 def restorebackup(self, tr, backupname):
1501 1502 '''Restore dirstate by backup file'''
1502 1503 # this "invalidate()" prevents "wlock.release()" from writing
1503 1504 # changes of dirstate out after restoring from backup file
1504 1505 self.invalidate()
1505 1506 filename = self._actualfilename(tr)
1506 1507 o = self._opener
1507 1508 if util.samefile(o.join(backupname), o.join(filename)):
1508 1509 o.unlink(backupname)
1509 1510 else:
1510 1511 o.rename(backupname, filename, checkambig=True)
1511 1512
1512 1513 def clearbackup(self, tr, backupname):
1513 1514 '''Clear backup file'''
1514 1515 self._opener.unlink(backupname)
1515 1516
1516 1517 def verify(self, m1, m2):
1517 1518 """check the dirstate content again the parent manifest and yield errors"""
1518 1519 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1519 1520 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1520 1521 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1521 1522 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1522 1523 for f, entry in self.items():
1523 1524 state = entry.state
1524 1525 if state in b"nr" and f not in m1:
1525 1526 yield (missing_from_p1, f, state)
1526 1527 if state in b"a" and f in m1:
1527 1528 yield (unexpected_in_p1, f, state)
1528 1529 if state in b"m" and f not in m1 and f not in m2:
1529 1530 yield (missing_from_ps, f, state)
1530 1531 for f in m1:
1531 1532 state = self.get_entry(f).state
1532 1533 if state not in b"nrm":
1533 1534 yield (missing_from_ds, f, state)
@@ -1,935 +1,937 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11 import struct
12 12 import zlib
13 13
14 14 from ..node import (
15 15 nullrev,
16 16 sha1nodeconstants,
17 17 )
18 18 from ..thirdparty import attr
19 19 from .. import (
20 20 error,
21 21 pycompat,
22 22 revlogutils,
23 23 util,
24 24 )
25 25
26 26 from ..revlogutils import nodemap as nodemaputil
27 27 from ..revlogutils import constants as revlog_constants
28 28
29 29 stringio = pycompat.bytesio
30 30
31 31
32 32 _pack = struct.pack
33 33 _unpack = struct.unpack
34 34 _compress = zlib.compress
35 35 _decompress = zlib.decompress
36 36
37 37
38 38 # a special value used internally for `size` if the file come from the other parent
39 39 FROM_P2 = -2
40 40
41 41 # a special value used internally for `size` if the file is modified/merged/added
42 42 NONNORMAL = -1
43 43
44 44 # a special value used internally for `time` if the time is ambigeous
45 45 AMBIGUOUS_TIME = -1
46 46
47 47 # Bits of the `flags` byte inside a node in the file format
48 48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
49 49 DIRSTATE_V2_P1_TRACKED = 1 << 1
50 50 DIRSTATE_V2_P2_INFO = 1 << 2
51 51 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
52 52 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
53 53 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
54 54 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
55 55 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
56 56 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
57 57 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
58 58 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
59 59 DIRSTATE_V2_HAS_MTIME = 1 << 11
60 60 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
61 61 DIRSTATE_V2_DIRECTORY = 1 << 13
62 62 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
63 63 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
64 64
65 65
66 66 @attr.s(slots=True, init=False)
67 67 class DirstateItem(object):
68 68 """represent a dirstate entry
69 69
70 70 It hold multiple attributes
71 71
72 72 # about file tracking
73 73 - wc_tracked: is the file tracked by the working copy
74 74 - p1_tracked: is the file tracked in working copy first parent
75 75 - p2_info: the file has been involved in some merge operation. Either
76 76 because it was actually merged, or because the p2 version was
77 77 ahead, or because some rename moved it there. In either case
78 78 `hg status` will want it displayed as modified.
79 79
80 80 # about the file state expected from p1 manifest:
81 81 - mode: the file mode in p1
82 82 - size: the file size in p1
83 83
84 84 These value can be set to None, which mean we don't have a meaningful value
85 85 to compare with. Either because we don't really care about them as there
86 86 `status` is known without having to look at the disk or because we don't
87 87 know these right now and a full comparison will be needed to find out if
88 88 the file is clean.
89 89
90 90 # about the file state on disk last time we saw it:
91 91 - mtime: the last known clean mtime for the file.
92 92
93 93 This value can be set to None if no cachable state exist. Either because we
94 94 do not care (see previous section) or because we could not cache something
95 95 yet.
96 96 """
97 97
98 98 _wc_tracked = attr.ib()
99 99 _p1_tracked = attr.ib()
100 100 _p2_info = attr.ib()
101 101 _mode = attr.ib()
102 102 _size = attr.ib()
103 103 _mtime_s = attr.ib()
104 104 _mtime_ns = attr.ib()
105 105 _fallback_exec = attr.ib()
106 106 _fallback_symlink = attr.ib()
107 107
108 108 def __init__(
109 109 self,
110 110 wc_tracked=False,
111 111 p1_tracked=False,
112 112 p2_info=False,
113 113 has_meaningful_data=True,
114 114 has_meaningful_mtime=True,
115 115 parentfiledata=None,
116 116 fallback_exec=None,
117 117 fallback_symlink=None,
118 118 ):
119 119 self._wc_tracked = wc_tracked
120 120 self._p1_tracked = p1_tracked
121 121 self._p2_info = p2_info
122 122
123 123 self._fallback_exec = fallback_exec
124 124 self._fallback_symlink = fallback_symlink
125 125
126 126 self._mode = None
127 127 self._size = None
128 128 self._mtime_s = None
129 129 self._mtime_ns = None
130 130 if parentfiledata is None:
131 131 has_meaningful_mtime = False
132 132 has_meaningful_data = False
133 elif parentfiledata[2] is None:
134 has_meaningful_mtime = False
133 135 if has_meaningful_data:
134 136 self._mode = parentfiledata[0]
135 137 self._size = parentfiledata[1]
136 138 if has_meaningful_mtime:
137 139 self._mtime_s, self._mtime_ns = parentfiledata[2]
138 140
139 141 @classmethod
140 142 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
141 143 """Build a new DirstateItem object from V2 data"""
142 144 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
143 145 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
144 146 if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS:
145 147 # The current code is not able to do the more subtle comparison that the
146 148 # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
147 149 has_meaningful_mtime = False
148 150 mode = None
149 151
150 152 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
151 153 # we do not have support for this flag in the code yet,
152 154 # force a lookup for this file.
153 155 has_mode_size = False
154 156 has_meaningful_mtime = False
155 157
156 158 fallback_exec = None
157 159 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
158 160 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
159 161
160 162 fallback_symlink = None
161 163 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
162 164 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
163 165
164 166 if has_mode_size:
165 167 assert stat.S_IXUSR == 0o100
166 168 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
167 169 mode = 0o755
168 170 else:
169 171 mode = 0o644
170 172 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
171 173 mode |= stat.S_IFLNK
172 174 else:
173 175 mode |= stat.S_IFREG
174 176 return cls(
175 177 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
176 178 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
177 179 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
178 180 has_meaningful_data=has_mode_size,
179 181 has_meaningful_mtime=has_meaningful_mtime,
180 182 parentfiledata=(mode, size, (mtime_s, mtime_ns)),
181 183 fallback_exec=fallback_exec,
182 184 fallback_symlink=fallback_symlink,
183 185 )
184 186
185 187 @classmethod
186 188 def from_v1_data(cls, state, mode, size, mtime):
187 189 """Build a new DirstateItem object from V1 data
188 190
189 191 Since the dirstate-v1 format is frozen, the signature of this function
190 192 is not expected to change, unlike the __init__ one.
191 193 """
192 194 if state == b'm':
193 195 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
194 196 elif state == b'a':
195 197 return cls(wc_tracked=True)
196 198 elif state == b'r':
197 199 if size == NONNORMAL:
198 200 p1_tracked = True
199 201 p2_info = True
200 202 elif size == FROM_P2:
201 203 p1_tracked = False
202 204 p2_info = True
203 205 else:
204 206 p1_tracked = True
205 207 p2_info = False
206 208 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
207 209 elif state == b'n':
208 210 if size == FROM_P2:
209 211 return cls(wc_tracked=True, p2_info=True)
210 212 elif size == NONNORMAL:
211 213 return cls(wc_tracked=True, p1_tracked=True)
212 214 elif mtime == AMBIGUOUS_TIME:
213 215 return cls(
214 216 wc_tracked=True,
215 217 p1_tracked=True,
216 218 has_meaningful_mtime=False,
217 219 parentfiledata=(mode, size, (42, 0)),
218 220 )
219 221 else:
220 222 return cls(
221 223 wc_tracked=True,
222 224 p1_tracked=True,
223 225 parentfiledata=(mode, size, (mtime, 0)),
224 226 )
225 227 else:
226 228 raise RuntimeError(b'unknown state: %s' % state)
227 229
228 230 def set_possibly_dirty(self):
229 231 """Mark a file as "possibly dirty"
230 232
231 233 This means the next status call will have to actually check its content
232 234 to make sure it is correct.
233 235 """
234 236 self._mtime_s = None
235 237 self._mtime_ns = None
236 238
237 239 def set_clean(self, mode, size, mtime):
238 240 """mark a file as "clean" cancelling potential "possibly dirty call"
239 241
240 242 Note: this function is a descendant of `dirstate.normal` and is
241 243 currently expected to be call on "normal" entry only. There are not
242 244 reason for this to not change in the future as long as the ccode is
243 245 updated to preserve the proper state of the non-normal files.
244 246 """
245 247 self._wc_tracked = True
246 248 self._p1_tracked = True
247 249 self._mode = mode
248 250 self._size = size
249 251 self._mtime_s, self._mtime_ns = mtime
250 252
251 253 def set_tracked(self):
252 254 """mark a file as tracked in the working copy
253 255
254 256 This will ultimately be called by command like `hg add`.
255 257 """
256 258 self._wc_tracked = True
257 259 # `set_tracked` is replacing various `normallookup` call. So we mark
258 260 # the files as needing lookup
259 261 #
260 262 # Consider dropping this in the future in favor of something less broad.
261 263 self._mtime_s = None
262 264 self._mtime_ns = None
263 265
264 266 def set_untracked(self):
265 267 """mark a file as untracked in the working copy
266 268
267 269 This will ultimately be called by command like `hg remove`.
268 270 """
269 271 self._wc_tracked = False
270 272 self._mode = None
271 273 self._size = None
272 274 self._mtime_s = None
273 275 self._mtime_ns = None
274 276
275 277 def drop_merge_data(self):
276 278 """remove all "merge-only" from a DirstateItem
277 279
278 280 This is to be call by the dirstatemap code when the second parent is dropped
279 281 """
280 282 if self._p2_info:
281 283 self._p2_info = False
282 284 self._mode = None
283 285 self._size = None
284 286 self._mtime_s = None
285 287 self._mtime_ns = None
286 288
287 289 @property
288 290 def mode(self):
289 291 return self.v1_mode()
290 292
291 293 @property
292 294 def size(self):
293 295 return self.v1_size()
294 296
295 297 @property
296 298 def mtime(self):
297 299 return self.v1_mtime()
298 300
299 301 def mtime_likely_equal_to(self, other_mtime):
300 302 self_sec = self._mtime_s
301 303 if self_sec is None:
302 304 return False
303 305 self_ns = self._mtime_ns
304 306 other_sec, other_ns = other_mtime
305 307 return self_sec == other_sec and (
306 308 self_ns == other_ns or self_ns == 0 or other_ns == 0
307 309 )
308 310
309 311 @property
310 312 def state(self):
311 313 """
312 314 States are:
313 315 n normal
314 316 m needs merging
315 317 r marked for removal
316 318 a marked for addition
317 319
318 320 XXX This "state" is a bit obscure and mostly a direct expression of the
319 321 dirstatev1 format. It would make sense to ultimately deprecate it in
320 322 favor of the more "semantic" attributes.
321 323 """
322 324 if not self.any_tracked:
323 325 return b'?'
324 326 return self.v1_state()
325 327
326 328 @property
327 329 def has_fallback_exec(self):
328 330 """True if "fallback" information are available for the "exec" bit
329 331
330 332 Fallback information can be stored in the dirstate to keep track of
331 333 filesystem attribute tracked by Mercurial when the underlying file
332 334 system or operating system does not support that property, (e.g.
333 335 Windows).
334 336
335 337 Not all version of the dirstate on-disk storage support preserving this
336 338 information.
337 339 """
338 340 return self._fallback_exec is not None
339 341
340 342 @property
341 343 def fallback_exec(self):
342 344 """ "fallback" information for the executable bit
343 345
344 346 True if the file should be considered executable when we cannot get
345 347 this information from the files system. False if it should be
346 348 considered non-executable.
347 349
348 350 See has_fallback_exec for details."""
349 351 return self._fallback_exec
350 352
351 353 @fallback_exec.setter
352 354 def set_fallback_exec(self, value):
353 355 """control "fallback" executable bit
354 356
355 357 Set to:
356 358 - True if the file should be considered executable,
357 359 - False if the file should be considered non-executable,
358 360 - None if we do not have valid fallback data.
359 361
360 362 See has_fallback_exec for details."""
361 363 if value is None:
362 364 self._fallback_exec = None
363 365 else:
364 366 self._fallback_exec = bool(value)
365 367
366 368 @property
367 369 def has_fallback_symlink(self):
368 370 """True if "fallback" information are available for symlink status
369 371
370 372 Fallback information can be stored in the dirstate to keep track of
371 373 filesystem attribute tracked by Mercurial when the underlying file
372 374 system or operating system does not support that property, (e.g.
373 375 Windows).
374 376
375 377 Not all version of the dirstate on-disk storage support preserving this
376 378 information."""
377 379 return self._fallback_symlink is not None
378 380
379 381 @property
380 382 def fallback_symlink(self):
381 383 """ "fallback" information for symlink status
382 384
383 385 True if the file should be considered executable when we cannot get
384 386 this information from the files system. False if it should be
385 387 considered non-executable.
386 388
387 389 See has_fallback_exec for details."""
388 390 return self._fallback_symlink
389 391
390 392 @fallback_symlink.setter
391 393 def set_fallback_symlink(self, value):
392 394 """control "fallback" symlink status
393 395
394 396 Set to:
395 397 - True if the file should be considered a symlink,
396 398 - False if the file should be considered not a symlink,
397 399 - None if we do not have valid fallback data.
398 400
399 401 See has_fallback_symlink for details."""
400 402 if value is None:
401 403 self._fallback_symlink = None
402 404 else:
403 405 self._fallback_symlink = bool(value)
404 406
405 407 @property
406 408 def tracked(self):
407 409 """True is the file is tracked in the working copy"""
408 410 return self._wc_tracked
409 411
410 412 @property
411 413 def any_tracked(self):
412 414 """True is the file is tracked anywhere (wc or parents)"""
413 415 return self._wc_tracked or self._p1_tracked or self._p2_info
414 416
415 417 @property
416 418 def added(self):
417 419 """True if the file has been added"""
418 420 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
419 421
420 422 @property
421 423 def maybe_clean(self):
422 424 """True if the file has a chance to be in the "clean" state"""
423 425 if not self._wc_tracked:
424 426 return False
425 427 elif not self._p1_tracked:
426 428 return False
427 429 elif self._p2_info:
428 430 return False
429 431 return True
430 432
431 433 @property
432 434 def p1_tracked(self):
433 435 """True if the file is tracked in the first parent manifest"""
434 436 return self._p1_tracked
435 437
436 438 @property
437 439 def p2_info(self):
438 440 """True if the file needed to merge or apply any input from p2
439 441
440 442 See the class documentation for details.
441 443 """
442 444 return self._wc_tracked and self._p2_info
443 445
444 446 @property
445 447 def removed(self):
446 448 """True if the file has been removed"""
447 449 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
448 450
449 451 def v2_data(self):
450 452 """Returns (flags, mode, size, mtime) for v2 serialization"""
451 453 flags = 0
452 454 if self._wc_tracked:
453 455 flags |= DIRSTATE_V2_WDIR_TRACKED
454 456 if self._p1_tracked:
455 457 flags |= DIRSTATE_V2_P1_TRACKED
456 458 if self._p2_info:
457 459 flags |= DIRSTATE_V2_P2_INFO
458 460 if self._mode is not None and self._size is not None:
459 461 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
460 462 if self.mode & stat.S_IXUSR:
461 463 flags |= DIRSTATE_V2_MODE_EXEC_PERM
462 464 if stat.S_ISLNK(self.mode):
463 465 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
464 466 if self._mtime_s is not None:
465 467 flags |= DIRSTATE_V2_HAS_MTIME
466 468
467 469 if self._fallback_exec is not None:
468 470 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
469 471 if self._fallback_exec:
470 472 flags |= DIRSTATE_V2_FALLBACK_EXEC
471 473
472 474 if self._fallback_symlink is not None:
473 475 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
474 476 if self._fallback_symlink:
475 477 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
476 478
477 479 # Note: we do not need to do anything regarding
478 480 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
479 481 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
480 482 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
481 483
482 484 def v1_state(self):
483 485 """return a "state" suitable for v1 serialization"""
484 486 if not self.any_tracked:
485 487 # the object has no state to record, this is -currently-
486 488 # unsupported
487 489 raise RuntimeError('untracked item')
488 490 elif self.removed:
489 491 return b'r'
490 492 elif self._p1_tracked and self._p2_info:
491 493 return b'm'
492 494 elif self.added:
493 495 return b'a'
494 496 else:
495 497 return b'n'
496 498
497 499 def v1_mode(self):
498 500 """return a "mode" suitable for v1 serialization"""
499 501 return self._mode if self._mode is not None else 0
500 502
501 503 def v1_size(self):
502 504 """return a "size" suitable for v1 serialization"""
503 505 if not self.any_tracked:
504 506 # the object has no state to record, this is -currently-
505 507 # unsupported
506 508 raise RuntimeError('untracked item')
507 509 elif self.removed and self._p1_tracked and self._p2_info:
508 510 return NONNORMAL
509 511 elif self._p2_info:
510 512 return FROM_P2
511 513 elif self.removed:
512 514 return 0
513 515 elif self.added:
514 516 return NONNORMAL
515 517 elif self._size is None:
516 518 return NONNORMAL
517 519 else:
518 520 return self._size
519 521
520 522 def v1_mtime(self):
521 523 """return a "mtime" suitable for v1 serialization"""
522 524 if not self.any_tracked:
523 525 # the object has no state to record, this is -currently-
524 526 # unsupported
525 527 raise RuntimeError('untracked item')
526 528 elif self.removed:
527 529 return 0
528 530 elif self._mtime_s is None:
529 531 return AMBIGUOUS_TIME
530 532 elif self._p2_info:
531 533 return AMBIGUOUS_TIME
532 534 elif not self._p1_tracked:
533 535 return AMBIGUOUS_TIME
534 536 else:
535 537 return self._mtime_s
536 538
537 539 def need_delay(self, now):
538 540 """True if the stored mtime would be ambiguous with the current time"""
539 541 return self.v1_state() == b'n' and self._mtime_s == now[0]
540 542
541 543
542 544 def gettype(q):
543 545 return int(q & 0xFFFF)
544 546
545 547
546 548 class BaseIndexObject(object):
547 549 # Can I be passed to an algorithme implemented in Rust ?
548 550 rust_ext_compat = 0
549 551 # Format of an index entry according to Python's `struct` language
550 552 index_format = revlog_constants.INDEX_ENTRY_V1
551 553 # Size of a C unsigned long long int, platform independent
552 554 big_int_size = struct.calcsize(b'>Q')
553 555 # Size of a C long int, platform independent
554 556 int_size = struct.calcsize(b'>i')
555 557 # An empty index entry, used as a default value to be overridden, or nullrev
556 558 null_item = (
557 559 0,
558 560 0,
559 561 0,
560 562 -1,
561 563 -1,
562 564 -1,
563 565 -1,
564 566 sha1nodeconstants.nullid,
565 567 0,
566 568 0,
567 569 revlog_constants.COMP_MODE_INLINE,
568 570 revlog_constants.COMP_MODE_INLINE,
569 571 )
570 572
571 573 @util.propertycache
572 574 def entry_size(self):
573 575 return self.index_format.size
574 576
575 577 @property
576 578 def nodemap(self):
577 579 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
578 580 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
579 581 return self._nodemap
580 582
581 583 @util.propertycache
582 584 def _nodemap(self):
583 585 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
584 586 for r in range(0, len(self)):
585 587 n = self[r][7]
586 588 nodemap[n] = r
587 589 return nodemap
588 590
589 591 def has_node(self, node):
590 592 """return True if the node exist in the index"""
591 593 return node in self._nodemap
592 594
593 595 def rev(self, node):
594 596 """return a revision for a node
595 597
596 598 If the node is unknown, raise a RevlogError"""
597 599 return self._nodemap[node]
598 600
599 601 def get_rev(self, node):
600 602 """return a revision for a node
601 603
602 604 If the node is unknown, return None"""
603 605 return self._nodemap.get(node)
604 606
605 607 def _stripnodes(self, start):
606 608 if '_nodemap' in vars(self):
607 609 for r in range(start, len(self)):
608 610 n = self[r][7]
609 611 del self._nodemap[n]
610 612
611 613 def clearcaches(self):
612 614 self.__dict__.pop('_nodemap', None)
613 615
614 616 def __len__(self):
615 617 return self._lgt + len(self._extra)
616 618
617 619 def append(self, tup):
618 620 if '_nodemap' in vars(self):
619 621 self._nodemap[tup[7]] = len(self)
620 622 data = self._pack_entry(len(self), tup)
621 623 self._extra.append(data)
622 624
623 625 def _pack_entry(self, rev, entry):
624 626 assert entry[8] == 0
625 627 assert entry[9] == 0
626 628 return self.index_format.pack(*entry[:8])
627 629
628 630 def _check_index(self, i):
629 631 if not isinstance(i, int):
630 632 raise TypeError(b"expecting int indexes")
631 633 if i < 0 or i >= len(self):
632 634 raise IndexError
633 635
634 636 def __getitem__(self, i):
635 637 if i == -1:
636 638 return self.null_item
637 639 self._check_index(i)
638 640 if i >= self._lgt:
639 641 data = self._extra[i - self._lgt]
640 642 else:
641 643 index = self._calculate_index(i)
642 644 data = self._data[index : index + self.entry_size]
643 645 r = self._unpack_entry(i, data)
644 646 if self._lgt and i == 0:
645 647 offset = revlogutils.offset_type(0, gettype(r[0]))
646 648 r = (offset,) + r[1:]
647 649 return r
648 650
649 651 def _unpack_entry(self, rev, data):
650 652 r = self.index_format.unpack(data)
651 653 r = r + (
652 654 0,
653 655 0,
654 656 revlog_constants.COMP_MODE_INLINE,
655 657 revlog_constants.COMP_MODE_INLINE,
656 658 )
657 659 return r
658 660
659 661 def pack_header(self, header):
660 662 """pack header information as binary"""
661 663 v_fmt = revlog_constants.INDEX_HEADER
662 664 return v_fmt.pack(header)
663 665
664 666 def entry_binary(self, rev):
665 667 """return the raw binary string representing a revision"""
666 668 entry = self[rev]
667 669 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
668 670 if rev == 0:
669 671 p = p[revlog_constants.INDEX_HEADER.size :]
670 672 return p
671 673
672 674
673 675 class IndexObject(BaseIndexObject):
674 676 def __init__(self, data):
675 677 assert len(data) % self.entry_size == 0, (
676 678 len(data),
677 679 self.entry_size,
678 680 len(data) % self.entry_size,
679 681 )
680 682 self._data = data
681 683 self._lgt = len(data) // self.entry_size
682 684 self._extra = []
683 685
684 686 def _calculate_index(self, i):
685 687 return i * self.entry_size
686 688
687 689 def __delitem__(self, i):
688 690 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
689 691 raise ValueError(b"deleting slices only supports a:-1 with step 1")
690 692 i = i.start
691 693 self._check_index(i)
692 694 self._stripnodes(i)
693 695 if i < self._lgt:
694 696 self._data = self._data[: i * self.entry_size]
695 697 self._lgt = i
696 698 self._extra = []
697 699 else:
698 700 self._extra = self._extra[: i - self._lgt]
699 701
700 702
701 703 class PersistentNodeMapIndexObject(IndexObject):
702 704 """a Debug oriented class to test persistent nodemap
703 705
704 706 We need a simple python object to test API and higher level behavior. See
705 707 the Rust implementation for more serious usage. This should be used only
706 708 through the dedicated `devel.persistent-nodemap` config.
707 709 """
708 710
709 711 def nodemap_data_all(self):
710 712 """Return bytes containing a full serialization of a nodemap
711 713
712 714 The nodemap should be valid for the full set of revisions in the
713 715 index."""
714 716 return nodemaputil.persistent_data(self)
715 717
716 718 def nodemap_data_incremental(self):
717 719 """Return bytes containing a incremental update to persistent nodemap
718 720
719 721 This containst the data for an append-only update of the data provided
720 722 in the last call to `update_nodemap_data`.
721 723 """
722 724 if self._nm_root is None:
723 725 return None
724 726 docket = self._nm_docket
725 727 changed, data = nodemaputil.update_persistent_data(
726 728 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
727 729 )
728 730
729 731 self._nm_root = self._nm_max_idx = self._nm_docket = None
730 732 return docket, changed, data
731 733
732 734 def update_nodemap_data(self, docket, nm_data):
733 735 """provide full block of persisted binary data for a nodemap
734 736
735 737 The data are expected to come from disk. See `nodemap_data_all` for a
736 738 produceur of such data."""
737 739 if nm_data is not None:
738 740 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
739 741 if self._nm_root:
740 742 self._nm_docket = docket
741 743 else:
742 744 self._nm_root = self._nm_max_idx = self._nm_docket = None
743 745
744 746
745 747 class InlinedIndexObject(BaseIndexObject):
746 748 def __init__(self, data, inline=0):
747 749 self._data = data
748 750 self._lgt = self._inline_scan(None)
749 751 self._inline_scan(self._lgt)
750 752 self._extra = []
751 753
752 754 def _inline_scan(self, lgt):
753 755 off = 0
754 756 if lgt is not None:
755 757 self._offsets = [0] * lgt
756 758 count = 0
757 759 while off <= len(self._data) - self.entry_size:
758 760 start = off + self.big_int_size
759 761 (s,) = struct.unpack(
760 762 b'>i',
761 763 self._data[start : start + self.int_size],
762 764 )
763 765 if lgt is not None:
764 766 self._offsets[count] = off
765 767 count += 1
766 768 off += self.entry_size + s
767 769 if off != len(self._data):
768 770 raise ValueError(b"corrupted data")
769 771 return count
770 772
771 773 def __delitem__(self, i):
772 774 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
773 775 raise ValueError(b"deleting slices only supports a:-1 with step 1")
774 776 i = i.start
775 777 self._check_index(i)
776 778 self._stripnodes(i)
777 779 if i < self._lgt:
778 780 self._offsets = self._offsets[:i]
779 781 self._lgt = i
780 782 self._extra = []
781 783 else:
782 784 self._extra = self._extra[: i - self._lgt]
783 785
784 786 def _calculate_index(self, i):
785 787 return self._offsets[i]
786 788
787 789
788 790 def parse_index2(data, inline, revlogv2=False):
789 791 if not inline:
790 792 cls = IndexObject2 if revlogv2 else IndexObject
791 793 return cls(data), None
792 794 cls = InlinedIndexObject
793 795 return cls(data, inline), (0, data)
794 796
795 797
796 798 def parse_index_cl_v2(data):
797 799 return IndexChangelogV2(data), None
798 800
799 801
800 802 class IndexObject2(IndexObject):
801 803 index_format = revlog_constants.INDEX_ENTRY_V2
802 804
803 805 def replace_sidedata_info(
804 806 self,
805 807 rev,
806 808 sidedata_offset,
807 809 sidedata_length,
808 810 offset_flags,
809 811 compression_mode,
810 812 ):
811 813 """
812 814 Replace an existing index entry's sidedata offset and length with new
813 815 ones.
814 816 This cannot be used outside of the context of sidedata rewriting,
815 817 inside the transaction that creates the revision `rev`.
816 818 """
817 819 if rev < 0:
818 820 raise KeyError
819 821 self._check_index(rev)
820 822 if rev < self._lgt:
821 823 msg = b"cannot rewrite entries outside of this transaction"
822 824 raise KeyError(msg)
823 825 else:
824 826 entry = list(self[rev])
825 827 entry[0] = offset_flags
826 828 entry[8] = sidedata_offset
827 829 entry[9] = sidedata_length
828 830 entry[11] = compression_mode
829 831 entry = tuple(entry)
830 832 new = self._pack_entry(rev, entry)
831 833 self._extra[rev - self._lgt] = new
832 834
833 835 def _unpack_entry(self, rev, data):
834 836 data = self.index_format.unpack(data)
835 837 entry = data[:10]
836 838 data_comp = data[10] & 3
837 839 sidedata_comp = (data[10] & (3 << 2)) >> 2
838 840 return entry + (data_comp, sidedata_comp)
839 841
840 842 def _pack_entry(self, rev, entry):
841 843 data = entry[:10]
842 844 data_comp = entry[10] & 3
843 845 sidedata_comp = (entry[11] & 3) << 2
844 846 data += (data_comp | sidedata_comp,)
845 847
846 848 return self.index_format.pack(*data)
847 849
848 850 def entry_binary(self, rev):
849 851 """return the raw binary string representing a revision"""
850 852 entry = self[rev]
851 853 return self._pack_entry(rev, entry)
852 854
853 855 def pack_header(self, header):
854 856 """pack header information as binary"""
855 857 msg = 'version header should go in the docket, not the index: %d'
856 858 msg %= header
857 859 raise error.ProgrammingError(msg)
858 860
859 861
860 862 class IndexChangelogV2(IndexObject2):
861 863 index_format = revlog_constants.INDEX_ENTRY_CL_V2
862 864
863 865 def _unpack_entry(self, rev, data, r=True):
864 866 items = self.index_format.unpack(data)
865 867 entry = items[:3] + (rev, rev) + items[3:8]
866 868 data_comp = items[8] & 3
867 869 sidedata_comp = (items[8] >> 2) & 3
868 870 return entry + (data_comp, sidedata_comp)
869 871
870 872 def _pack_entry(self, rev, entry):
871 873 assert entry[3] == rev, entry[3]
872 874 assert entry[4] == rev, entry[4]
873 875 data = entry[:3] + entry[5:10]
874 876 data_comp = entry[10] & 3
875 877 sidedata_comp = (entry[11] & 3) << 2
876 878 data += (data_comp | sidedata_comp,)
877 879 return self.index_format.pack(*data)
878 880
879 881
880 882 def parse_index_devel_nodemap(data, inline):
881 883 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
882 884 return PersistentNodeMapIndexObject(data), None
883 885
884 886
885 887 def parse_dirstate(dmap, copymap, st):
886 888 parents = [st[:20], st[20:40]]
887 889 # dereference fields so they will be local in loop
888 890 format = b">cllll"
889 891 e_size = struct.calcsize(format)
890 892 pos1 = 40
891 893 l = len(st)
892 894
893 895 # the inner loop
894 896 while pos1 < l:
895 897 pos2 = pos1 + e_size
896 898 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
897 899 pos1 = pos2 + e[4]
898 900 f = st[pos2:pos1]
899 901 if b'\0' in f:
900 902 f, c = f.split(b'\0')
901 903 copymap[f] = c
902 904 dmap[f] = DirstateItem.from_v1_data(*e[:4])
903 905 return parents
904 906
905 907
906 908 def pack_dirstate(dmap, copymap, pl, now):
907 909 cs = stringio()
908 910 write = cs.write
909 911 write(b"".join(pl))
910 912 for f, e in pycompat.iteritems(dmap):
911 913 if e.need_delay(now):
912 914 # The file was last modified "simultaneously" with the current
913 915 # write to dirstate (i.e. within the same second for file-
914 916 # systems with a granularity of 1 sec). This commonly happens
915 917 # for at least a couple of files on 'update'.
916 918 # The user could change the file without changing its size
917 919 # within the same second. Invalidate the file's mtime in
918 920 # dirstate, forcing future 'status' calls to compare the
919 921 # contents of the file if the size is the same. This prevents
920 922 # mistakenly treating such files as clean.
921 923 e.set_possibly_dirty()
922 924
923 925 if f in copymap:
924 926 f = b"%s\0%s" % (f, copymap[f])
925 927 e = _pack(
926 928 b">cllll",
927 929 e.v1_state(),
928 930 e.v1_mode(),
929 931 e.v1_size(),
930 932 e.v1_mtime(),
931 933 len(f),
932 934 )
933 935 write(e)
934 936 write(f)
935 937 return cs.getvalue()
@@ -1,286 +1,288 b''
1 1 use cpython::exc;
2 2 use cpython::ObjectProtocol;
3 3 use cpython::PyBytes;
4 4 use cpython::PyErr;
5 5 use cpython::PyNone;
6 6 use cpython::PyObject;
7 7 use cpython::PyResult;
8 8 use cpython::Python;
9 9 use cpython::PythonObject;
10 10 use hg::dirstate::DirstateEntry;
11 11 use hg::dirstate::EntryState;
12 12 use hg::dirstate::TruncatedTimestamp;
13 13 use std::cell::Cell;
14 14 use std::convert::TryFrom;
15 15
16 16 py_class!(pub class DirstateItem |py| {
17 17 data entry: Cell<DirstateEntry>;
18 18
19 19 def __new__(
20 20 _cls,
21 21 wc_tracked: bool = false,
22 22 p1_tracked: bool = false,
23 23 p2_info: bool = false,
24 24 has_meaningful_data: bool = true,
25 25 has_meaningful_mtime: bool = true,
26 parentfiledata: Option<(u32, u32, (u32, u32))> = None,
26 parentfiledata: Option<(u32, u32, Option<(u32, u32)>)> = None,
27 27 fallback_exec: Option<bool> = None,
28 28 fallback_symlink: Option<bool> = None,
29 29
30 30 ) -> PyResult<DirstateItem> {
31 31 let mut mode_size_opt = None;
32 32 let mut mtime_opt = None;
33 33 if let Some((mode, size, mtime)) = parentfiledata {
34 34 if has_meaningful_data {
35 35 mode_size_opt = Some((mode, size))
36 36 }
37 37 if has_meaningful_mtime {
38 mtime_opt = Some(timestamp(py, mtime)?)
38 if let Some(m) = mtime {
39 mtime_opt = Some(timestamp(py, m)?);
40 }
39 41 }
40 42 }
41 43 let entry = DirstateEntry::from_v2_data(
42 44 wc_tracked,
43 45 p1_tracked,
44 46 p2_info,
45 47 mode_size_opt,
46 48 mtime_opt,
47 49 fallback_exec,
48 50 fallback_symlink,
49 51 );
50 52 DirstateItem::create_instance(py, Cell::new(entry))
51 53 }
52 54
53 55 @property
54 56 def state(&self) -> PyResult<PyBytes> {
55 57 let state_byte: u8 = self.entry(py).get().state().into();
56 58 Ok(PyBytes::new(py, &[state_byte]))
57 59 }
58 60
59 61 @property
60 62 def mode(&self) -> PyResult<i32> {
61 63 Ok(self.entry(py).get().mode())
62 64 }
63 65
64 66 @property
65 67 def size(&self) -> PyResult<i32> {
66 68 Ok(self.entry(py).get().size())
67 69 }
68 70
69 71 @property
70 72 def mtime(&self) -> PyResult<i32> {
71 73 Ok(self.entry(py).get().mtime())
72 74 }
73 75
74 76 @property
75 77 def has_fallback_exec(&self) -> PyResult<bool> {
76 78 match self.entry(py).get().get_fallback_exec() {
77 79 Some(_) => Ok(true),
78 80 None => Ok(false),
79 81 }
80 82 }
81 83
82 84 @property
83 85 def fallback_exec(&self) -> PyResult<Option<bool>> {
84 86 match self.entry(py).get().get_fallback_exec() {
85 87 Some(exec) => Ok(Some(exec)),
86 88 None => Ok(None),
87 89 }
88 90 }
89 91
90 92 @fallback_exec.setter
91 93 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
92 94 match value {
93 95 None => {self.entry(py).get().set_fallback_exec(None);},
94 96 Some(value) => {
95 97 if value.is_none(py) {
96 98 self.entry(py).get().set_fallback_exec(None);
97 99 } else {
98 100 self.entry(py).get().set_fallback_exec(
99 101 Some(value.is_true(py)?)
100 102 );
101 103 }},
102 104 }
103 105 Ok(())
104 106 }
105 107
106 108 @property
107 109 def has_fallback_symlink(&self) -> PyResult<bool> {
108 110 match self.entry(py).get().get_fallback_symlink() {
109 111 Some(_) => Ok(true),
110 112 None => Ok(false),
111 113 }
112 114 }
113 115
114 116 @property
115 117 def fallback_symlink(&self) -> PyResult<Option<bool>> {
116 118 match self.entry(py).get().get_fallback_symlink() {
117 119 Some(symlink) => Ok(Some(symlink)),
118 120 None => Ok(None),
119 121 }
120 122 }
121 123
122 124 @fallback_symlink.setter
123 125 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
124 126 match value {
125 127 None => {self.entry(py).get().set_fallback_symlink(None);},
126 128 Some(value) => {
127 129 if value.is_none(py) {
128 130 self.entry(py).get().set_fallback_symlink(None);
129 131 } else {
130 132 self.entry(py).get().set_fallback_symlink(
131 133 Some(value.is_true(py)?)
132 134 );
133 135 }},
134 136 }
135 137 Ok(())
136 138 }
137 139
138 140 @property
139 141 def tracked(&self) -> PyResult<bool> {
140 142 Ok(self.entry(py).get().tracked())
141 143 }
142 144
143 145 @property
144 146 def p1_tracked(&self) -> PyResult<bool> {
145 147 Ok(self.entry(py).get().p1_tracked())
146 148 }
147 149
148 150 @property
149 151 def added(&self) -> PyResult<bool> {
150 152 Ok(self.entry(py).get().added())
151 153 }
152 154
153 155
154 156 @property
155 157 def p2_info(&self) -> PyResult<bool> {
156 158 Ok(self.entry(py).get().p2_info())
157 159 }
158 160
159 161 @property
160 162 def removed(&self) -> PyResult<bool> {
161 163 Ok(self.entry(py).get().removed())
162 164 }
163 165
164 166 @property
165 167 def maybe_clean(&self) -> PyResult<bool> {
166 168 Ok(self.entry(py).get().maybe_clean())
167 169 }
168 170
169 171 @property
170 172 def any_tracked(&self) -> PyResult<bool> {
171 173 Ok(self.entry(py).get().any_tracked())
172 174 }
173 175
174 176 def v1_state(&self) -> PyResult<PyBytes> {
175 177 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
176 178 let state_byte: u8 = state.into();
177 179 Ok(PyBytes::new(py, &[state_byte]))
178 180 }
179 181
180 182 def v1_mode(&self) -> PyResult<i32> {
181 183 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
182 184 Ok(mode)
183 185 }
184 186
185 187 def v1_size(&self) -> PyResult<i32> {
186 188 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
187 189 Ok(size)
188 190 }
189 191
190 192 def v1_mtime(&self) -> PyResult<i32> {
191 193 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
192 194 Ok(mtime)
193 195 }
194 196
195 197 def need_delay(&self, now: (u32, u32)) -> PyResult<bool> {
196 198 let now = timestamp(py, now)?;
197 199 Ok(self.entry(py).get().need_delay(now))
198 200 }
199 201
200 202 def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> {
201 203 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
202 204 Ok(mtime.likely_equal(timestamp(py, other)?))
203 205 } else {
204 206 Ok(false)
205 207 }
206 208 }
207 209
208 210 @classmethod
209 211 def from_v1_data(
210 212 _cls,
211 213 state: PyBytes,
212 214 mode: i32,
213 215 size: i32,
214 216 mtime: i32,
215 217 ) -> PyResult<Self> {
216 218 let state = <[u8; 1]>::try_from(state.data(py))
217 219 .ok()
218 220 .and_then(|state| EntryState::try_from(state[0]).ok())
219 221 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
220 222 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
221 223 DirstateItem::create_instance(py, Cell::new(entry))
222 224 }
223 225
224 226 def drop_merge_data(&self) -> PyResult<PyNone> {
225 227 self.update(py, |entry| entry.drop_merge_data());
226 228 Ok(PyNone)
227 229 }
228 230
229 231 def set_clean(
230 232 &self,
231 233 mode: u32,
232 234 size: u32,
233 235 mtime: (u32, u32),
234 236 ) -> PyResult<PyNone> {
235 237 let mtime = timestamp(py, mtime)?;
236 238 self.update(py, |entry| entry.set_clean(mode, size, mtime));
237 239 Ok(PyNone)
238 240 }
239 241
240 242 def set_possibly_dirty(&self) -> PyResult<PyNone> {
241 243 self.update(py, |entry| entry.set_possibly_dirty());
242 244 Ok(PyNone)
243 245 }
244 246
245 247 def set_tracked(&self) -> PyResult<PyNone> {
246 248 self.update(py, |entry| entry.set_tracked());
247 249 Ok(PyNone)
248 250 }
249 251
250 252 def set_untracked(&self) -> PyResult<PyNone> {
251 253 self.update(py, |entry| entry.set_untracked());
252 254 Ok(PyNone)
253 255 }
254 256 });
255 257
256 258 impl DirstateItem {
257 259 pub fn new_as_pyobject(
258 260 py: Python<'_>,
259 261 entry: DirstateEntry,
260 262 ) -> PyResult<PyObject> {
261 263 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
262 264 }
263 265
264 266 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
265 267 self.entry(py).get()
266 268 }
267 269
268 270 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
269 271 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
270 272 let mut entry = self.entry(py).get();
271 273 f(&mut entry);
272 274 self.entry(py).set(entry)
273 275 }
274 276 }
275 277
276 278 pub(crate) fn timestamp(
277 279 py: Python<'_>,
278 280 (s, ns): (u32, u32),
279 281 ) -> PyResult<TruncatedTimestamp> {
280 282 TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| {
281 283 PyErr::new::<exc::ValueError, _>(
282 284 py,
283 285 "expected mtime truncated to 31 bits",
284 286 )
285 287 })
286 288 }
General Comments 0
You need to be logged in to leave comments. Login now