##// END OF EJS Templates
dirstate-item: drop the deprecated __getitem__ variante...
marmoute -
r48737:ba9491be default
parent child Browse files
Show More
@@ -1,1194 +1,1160 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <ctype.h>
13 13 #include <stddef.h>
14 14 #include <string.h>
15 15
16 16 #include "bitmanipulation.h"
17 17 #include "charencode.h"
18 18 #include "util.h"
19 19
20 20 #ifdef IS_PY3K
21 21 /* The mapping of Python types is meant to be temporary to get Python
22 22 * 3 to compile. We should remove this once Python 3 support is fully
23 23 * supported and proper types are used in the extensions themselves. */
24 24 #define PyInt_Check PyLong_Check
25 25 #define PyInt_FromLong PyLong_FromLong
26 26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 27 #define PyInt_AsLong PyLong_AsLong
28 28 #endif
29 29
30 30 static const char *const versionerrortext = "Python minor version mismatch";
31 31
32 32 static const int dirstate_v1_from_p2 = -2;
33 33 static const int dirstate_v1_nonnormal = -1;
34 34 static const int ambiguous_time = -1;
35 35
36 36 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
37 37 {
38 38 Py_ssize_t expected_size;
39 39
40 40 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
41 41 return NULL;
42 42 }
43 43
44 44 return _dict_new_presized(expected_size);
45 45 }
46 46
47 47 static inline dirstateItemObject *make_dirstate_item(char state, int mode,
48 48 int size, int mtime)
49 49 {
50 50 dirstateItemObject *t =
51 51 PyObject_New(dirstateItemObject, &dirstateItemType);
52 52 if (!t) {
53 53 return NULL;
54 54 }
55 55 t->state = state;
56 56 t->mode = mode;
57 57 t->size = size;
58 58 t->mtime = mtime;
59 59 return t;
60 60 }
61 61
62 62 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
63 63 PyObject *kwds)
64 64 {
65 65 /* We do all the initialization here and not a tp_init function because
66 66 * dirstate_item is immutable. */
67 67 dirstateItemObject *t;
68 68 int wc_tracked;
69 69 int p1_tracked;
70 70 int p2_tracked;
71 71 int merged;
72 72 int clean_p1;
73 73 int clean_p2;
74 74 int possibly_dirty;
75 75 PyObject *parentfiledata;
76 76 static char *keywords_name[] = {
77 77 "wc_tracked", "p1_tracked", "p2_tracked",
78 78 "merged", "clean_p1", "clean_p2",
79 79 "possibly_dirty", "parentfiledata", NULL,
80 80 };
81 81 wc_tracked = 0;
82 82 p1_tracked = 0;
83 83 p2_tracked = 0;
84 84 merged = 0;
85 85 clean_p1 = 0;
86 86 clean_p2 = 0;
87 87 possibly_dirty = 0;
88 88 parentfiledata = Py_None;
89 89 if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiiiiiiO", keywords_name,
90 90 &wc_tracked, &p1_tracked, &p2_tracked,
91 91 &merged, &clean_p1, &clean_p2,
92 92 &possibly_dirty, &parentfiledata
93 93
94 94 )) {
95 95 return NULL;
96 96 }
97 97 if (merged && (clean_p1 || clean_p2)) {
98 98 PyErr_SetString(PyExc_RuntimeError,
99 99 "`merged` argument incompatible with "
100 100 "`clean_p1`/`clean_p2`");
101 101 return NULL;
102 102 }
103 103 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
104 104 if (!t) {
105 105 return NULL;
106 106 }
107 107 t->state = 'r';
108 108 t->mode = 0;
109 109 t->size = dirstate_v1_nonnormal;
110 110 t->mtime = ambiguous_time;
111 111 if (!(p1_tracked || p2_tracked || wc_tracked)) {
112 112 /* Nothing special to do, file is untracked */
113 113 } else if (merged) {
114 114 t->state = 'm';
115 115 t->size = dirstate_v1_from_p2;
116 116 t->mtime = ambiguous_time;
117 117 } else if (!(p1_tracked || p2_tracked) && wc_tracked) {
118 118 t->state = 'a';
119 119 t->size = dirstate_v1_nonnormal;
120 120 t->mtime = ambiguous_time;
121 121 } else if ((p1_tracked || p2_tracked) && !wc_tracked) {
122 122 t->state = 'r';
123 123 t->size = 0;
124 124 t->mtime = 0;
125 125 } else if (clean_p2 && wc_tracked) {
126 126 t->state = 'n';
127 127 t->size = dirstate_v1_from_p2;
128 128 t->mtime = ambiguous_time;
129 129 } else if (!p1_tracked && p2_tracked && wc_tracked) {
130 130 t->state = 'n';
131 131 t->size = dirstate_v1_from_p2;
132 132 t->mtime = ambiguous_time;
133 133 } else if (possibly_dirty) {
134 134 t->state = 'n';
135 135 t->size = dirstate_v1_nonnormal;
136 136 t->mtime = ambiguous_time;
137 137 } else if (wc_tracked) {
138 138 /* this is a "normal" file */
139 139 if (parentfiledata == Py_None) {
140 140 PyErr_SetString(
141 141 PyExc_RuntimeError,
142 142 "failed to pass parentfiledata for a normal file");
143 143 return NULL;
144 144 }
145 145 if (!PyTuple_CheckExact(parentfiledata)) {
146 146 PyErr_SetString(
147 147 PyExc_TypeError,
148 148 "parentfiledata should be a Tuple or None");
149 149 return NULL;
150 150 }
151 151 t->state = 'n';
152 152 t->mode =
153 153 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 0));
154 154 t->size =
155 155 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 1));
156 156 t->mtime =
157 157 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 2));
158 158 } else {
159 159 PyErr_SetString(PyExc_RuntimeError, "unreachable");
160 160 return NULL;
161 161 }
162 162 return (PyObject *)t;
163 163 }
164 164
165 165 static void dirstate_item_dealloc(PyObject *o)
166 166 {
167 167 PyObject_Del(o);
168 168 }
169 169
170 static Py_ssize_t dirstate_item_length(PyObject *o)
171 {
172 return 4;
173 }
174
175 static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i)
176 {
177 dirstateItemObject *t = (dirstateItemObject *)o;
178 switch (i) {
179 case 0:
180 return PyBytes_FromStringAndSize(&t->state, 1);
181 case 1:
182 return PyInt_FromLong(t->mode);
183 case 2:
184 return PyInt_FromLong(t->size);
185 case 3:
186 return PyInt_FromLong(t->mtime);
187 default:
188 PyErr_SetString(PyExc_IndexError, "index out of range");
189 return NULL;
190 }
191 }
192
193 static PySequenceMethods dirstate_item_sq = {
194 dirstate_item_length, /* sq_length */
195 0, /* sq_concat */
196 0, /* sq_repeat */
197 dirstate_item_item, /* sq_item */
198 0, /* sq_ass_item */
199 0, /* sq_contains */
200 0, /* sq_inplace_concat */
201 0 /* sq_inplace_repeat */
202 };
203
204 170 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
205 171 {
206 172 return PyBytes_FromStringAndSize(&self->state, 1);
207 173 };
208 174
209 175 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
210 176 {
211 177 return PyInt_FromLong(self->mode);
212 178 };
213 179
214 180 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
215 181 {
216 182 return PyInt_FromLong(self->size);
217 183 };
218 184
219 185 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
220 186 {
221 187 return PyInt_FromLong(self->mtime);
222 188 };
223 189
224 190 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
225 191 PyObject *value)
226 192 {
227 193 long now;
228 194 if (!pylong_to_long(value, &now)) {
229 195 return NULL;
230 196 }
231 197 if (self->state == 'n' && self->mtime == now) {
232 198 Py_RETURN_TRUE;
233 199 } else {
234 200 Py_RETURN_FALSE;
235 201 }
236 202 };
237 203
238 204 /* This will never change since it's bound to V1, unlike `make_dirstate_item`
239 205 */
240 206 static inline dirstateItemObject *
241 207 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
242 208 {
243 209 dirstateItemObject *t =
244 210 PyObject_New(dirstateItemObject, &dirstateItemType);
245 211 if (!t) {
246 212 return NULL;
247 213 }
248 214 t->state = state;
249 215 t->mode = mode;
250 216 t->size = size;
251 217 t->mtime = mtime;
252 218 return t;
253 219 }
254 220
255 221 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
256 222 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
257 223 PyObject *args)
258 224 {
259 225 /* We do all the initialization here and not a tp_init function because
260 226 * dirstate_item is immutable. */
261 227 dirstateItemObject *t;
262 228 char state;
263 229 int size, mode, mtime;
264 230 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
265 231 return NULL;
266 232 }
267 233
268 234 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
269 235 if (!t) {
270 236 return NULL;
271 237 }
272 238 t->state = state;
273 239 t->mode = mode;
274 240 t->size = size;
275 241 t->mtime = mtime;
276 242
277 243 return (PyObject *)t;
278 244 };
279 245
280 246 /* constructor to help legacy API to build a new "added" item
281 247
282 248 Should eventually be removed */
283 249 static PyObject *dirstate_item_new_added(PyTypeObject *subtype)
284 250 {
285 251 dirstateItemObject *t;
286 252 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
287 253 if (!t) {
288 254 return NULL;
289 255 }
290 256 t->state = 'a';
291 257 t->mode = 0;
292 258 t->size = dirstate_v1_nonnormal;
293 259 t->mtime = ambiguous_time;
294 260 return (PyObject *)t;
295 261 };
296 262
297 263 /* constructor to help legacy API to build a new "merged" item
298 264
299 265 Should eventually be removed */
300 266 static PyObject *dirstate_item_new_merged(PyTypeObject *subtype)
301 267 {
302 268 dirstateItemObject *t;
303 269 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
304 270 if (!t) {
305 271 return NULL;
306 272 }
307 273 t->state = 'm';
308 274 t->mode = 0;
309 275 t->size = dirstate_v1_from_p2;
310 276 t->mtime = ambiguous_time;
311 277 return (PyObject *)t;
312 278 };
313 279
314 280 /* constructor to help legacy API to build a new "from_p2" item
315 281
316 282 Should eventually be removed */
317 283 static PyObject *dirstate_item_new_from_p2(PyTypeObject *subtype)
318 284 {
319 285 /* We do all the initialization here and not a tp_init function because
320 286 * dirstate_item is immutable. */
321 287 dirstateItemObject *t;
322 288 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
323 289 if (!t) {
324 290 return NULL;
325 291 }
326 292 t->state = 'n';
327 293 t->mode = 0;
328 294 t->size = dirstate_v1_from_p2;
329 295 t->mtime = ambiguous_time;
330 296 return (PyObject *)t;
331 297 };
332 298
333 299 /* constructor to help legacy API to build a new "possibly" item
334 300
335 301 Should eventually be removed */
336 302 static PyObject *dirstate_item_new_possibly_dirty(PyTypeObject *subtype)
337 303 {
338 304 /* We do all the initialization here and not a tp_init function because
339 305 * dirstate_item is immutable. */
340 306 dirstateItemObject *t;
341 307 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
342 308 if (!t) {
343 309 return NULL;
344 310 }
345 311 t->state = 'n';
346 312 t->mode = 0;
347 313 t->size = dirstate_v1_nonnormal;
348 314 t->mtime = ambiguous_time;
349 315 return (PyObject *)t;
350 316 };
351 317
352 318 /* constructor to help legacy API to build a new "normal" item
353 319
354 320 Should eventually be removed */
355 321 static PyObject *dirstate_item_new_normal(PyTypeObject *subtype, PyObject *args)
356 322 {
357 323 /* We do all the initialization here and not a tp_init function because
358 324 * dirstate_item is immutable. */
359 325 dirstateItemObject *t;
360 326 int size, mode, mtime;
361 327 if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) {
362 328 return NULL;
363 329 }
364 330
365 331 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
366 332 if (!t) {
367 333 return NULL;
368 334 }
369 335 t->state = 'n';
370 336 t->mode = mode;
371 337 t->size = size;
372 338 t->mtime = mtime;
373 339 return (PyObject *)t;
374 340 };
375 341
376 342 /* This means the next status call will have to actually check its content
377 343 to make sure it is correct. */
378 344 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
379 345 {
380 346 self->mtime = ambiguous_time;
381 347 Py_RETURN_NONE;
382 348 }
383 349
384 350 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
385 351 {
386 352 if (self->state == 'm') {
387 353 self->size = dirstate_v1_nonnormal;
388 354 } else if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
389 355 self->size = dirstate_v1_from_p2;
390 356 } else {
391 357 self->size = 0;
392 358 }
393 359 self->state = 'r';
394 360 self->mode = 0;
395 361 self->mtime = 0;
396 362 Py_RETURN_NONE;
397 363 }
398 364
399 365 static PyMethodDef dirstate_item_methods[] = {
400 366 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
401 367 "return a \"state\" suitable for v1 serialization"},
402 368 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
403 369 "return a \"mode\" suitable for v1 serialization"},
404 370 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
405 371 "return a \"size\" suitable for v1 serialization"},
406 372 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
407 373 "return a \"mtime\" suitable for v1 serialization"},
408 374 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
409 375 "True if the stored mtime would be ambiguous with the current time"},
410 376 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
411 377 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
412 378 {"new_added", (PyCFunction)dirstate_item_new_added,
413 379 METH_NOARGS | METH_CLASS,
414 380 "constructor to help legacy API to build a new \"added\" item"},
415 381 {"new_merged", (PyCFunction)dirstate_item_new_merged,
416 382 METH_NOARGS | METH_CLASS,
417 383 "constructor to help legacy API to build a new \"merged\" item"},
418 384 {"new_from_p2", (PyCFunction)dirstate_item_new_from_p2,
419 385 METH_NOARGS | METH_CLASS,
420 386 "constructor to help legacy API to build a new \"from_p2\" item"},
421 387 {"new_possibly_dirty", (PyCFunction)dirstate_item_new_possibly_dirty,
422 388 METH_NOARGS | METH_CLASS,
423 389 "constructor to help legacy API to build a new \"possibly_dirty\" item"},
424 390 {"new_normal", (PyCFunction)dirstate_item_new_normal,
425 391 METH_VARARGS | METH_CLASS,
426 392 "constructor to help legacy API to build a new \"normal\" item"},
427 393 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
428 394 METH_NOARGS, "mark a file as \"possibly dirty\""},
429 395 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
430 396 "mark a file as \"untracked\""},
431 397 {NULL} /* Sentinel */
432 398 };
433 399
434 400 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
435 401 {
436 402 return PyInt_FromLong(self->mode);
437 403 };
438 404
439 405 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
440 406 {
441 407 return PyInt_FromLong(self->size);
442 408 };
443 409
444 410 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
445 411 {
446 412 return PyInt_FromLong(self->mtime);
447 413 };
448 414
449 415 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
450 416 {
451 417 return PyBytes_FromStringAndSize(&self->state, 1);
452 418 };
453 419
454 420 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
455 421 {
456 422 if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
457 423 Py_RETURN_TRUE;
458 424 } else {
459 425 Py_RETURN_FALSE;
460 426 }
461 427 };
462 428
463 429 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
464 430 {
465 431 if (self->state == 'a') {
466 432 Py_RETURN_TRUE;
467 433 } else {
468 434 Py_RETURN_FALSE;
469 435 }
470 436 };
471 437
472 438 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
473 439 {
474 440 if (self->state == 'm') {
475 441 Py_RETURN_TRUE;
476 442 } else {
477 443 Py_RETURN_FALSE;
478 444 }
479 445 };
480 446
481 447 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
482 448 {
483 449 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
484 450 Py_RETURN_TRUE;
485 451 } else {
486 452 Py_RETURN_FALSE;
487 453 }
488 454 };
489 455
490 456 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
491 457 {
492 458 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
493 459 Py_RETURN_TRUE;
494 460 } else {
495 461 Py_RETURN_FALSE;
496 462 }
497 463 };
498 464
499 465 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
500 466 {
501 467 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
502 468 Py_RETURN_TRUE;
503 469 } else {
504 470 Py_RETURN_FALSE;
505 471 }
506 472 };
507 473
508 474 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
509 475 {
510 476 if (self->state == 'r') {
511 477 Py_RETURN_TRUE;
512 478 } else {
513 479 Py_RETURN_FALSE;
514 480 }
515 481 };
516 482
517 483 static PyObject *dm_nonnormal(dirstateItemObject *self)
518 484 {
519 485 if (self->state != 'n' || self->mtime == ambiguous_time) {
520 486 Py_RETURN_TRUE;
521 487 } else {
522 488 Py_RETURN_FALSE;
523 489 }
524 490 };
525 491 static PyObject *dm_otherparent(dirstateItemObject *self)
526 492 {
527 493 if (self->size == dirstate_v1_from_p2) {
528 494 Py_RETURN_TRUE;
529 495 } else {
530 496 Py_RETURN_FALSE;
531 497 }
532 498 };
533 499
534 500 static PyGetSetDef dirstate_item_getset[] = {
535 501 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
536 502 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
537 503 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
538 504 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
539 505 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
540 506 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
541 507 {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL,
542 508 "merged_removed", NULL},
543 509 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
544 510 {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL,
545 511 "from_p2_removed", NULL},
546 512 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
547 513 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
548 514 {"dm_nonnormal", (getter)dm_nonnormal, NULL, "dm_nonnormal", NULL},
549 515 {"dm_otherparent", (getter)dm_otherparent, NULL, "dm_otherparent", NULL},
550 516 {NULL} /* Sentinel */
551 517 };
552 518
553 519 PyTypeObject dirstateItemType = {
554 520 PyVarObject_HEAD_INIT(NULL, 0) /* header */
555 521 "dirstate_tuple", /* tp_name */
556 522 sizeof(dirstateItemObject), /* tp_basicsize */
557 523 0, /* tp_itemsize */
558 524 (destructor)dirstate_item_dealloc, /* tp_dealloc */
559 525 0, /* tp_print */
560 526 0, /* tp_getattr */
561 527 0, /* tp_setattr */
562 528 0, /* tp_compare */
563 529 0, /* tp_repr */
564 530 0, /* tp_as_number */
565 &dirstate_item_sq, /* tp_as_sequence */
531 0, /* tp_as_sequence */
566 532 0, /* tp_as_mapping */
567 533 0, /* tp_hash */
568 534 0, /* tp_call */
569 535 0, /* tp_str */
570 536 0, /* tp_getattro */
571 537 0, /* tp_setattro */
572 538 0, /* tp_as_buffer */
573 539 Py_TPFLAGS_DEFAULT, /* tp_flags */
574 540 "dirstate tuple", /* tp_doc */
575 541 0, /* tp_traverse */
576 542 0, /* tp_clear */
577 543 0, /* tp_richcompare */
578 544 0, /* tp_weaklistoffset */
579 545 0, /* tp_iter */
580 546 0, /* tp_iternext */
581 547 dirstate_item_methods, /* tp_methods */
582 548 0, /* tp_members */
583 549 dirstate_item_getset, /* tp_getset */
584 550 0, /* tp_base */
585 551 0, /* tp_dict */
586 552 0, /* tp_descr_get */
587 553 0, /* tp_descr_set */
588 554 0, /* tp_dictoffset */
589 555 0, /* tp_init */
590 556 0, /* tp_alloc */
591 557 dirstate_item_new, /* tp_new */
592 558 };
593 559
594 560 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
595 561 {
596 562 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
597 563 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
598 564 char state, *cur, *str, *cpos;
599 565 int mode, size, mtime;
600 566 unsigned int flen, pos = 40;
601 567 Py_ssize_t len = 40;
602 568 Py_ssize_t readlen;
603 569
604 570 if (!PyArg_ParseTuple(
605 571 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
606 572 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
607 573 goto quit;
608 574 }
609 575
610 576 len = readlen;
611 577
612 578 /* read parents */
613 579 if (len < 40) {
614 580 PyErr_SetString(PyExc_ValueError,
615 581 "too little data for parents");
616 582 goto quit;
617 583 }
618 584
619 585 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
620 586 str + 20, (Py_ssize_t)20);
621 587 if (!parents) {
622 588 goto quit;
623 589 }
624 590
625 591 /* read filenames */
626 592 while (pos >= 40 && pos < len) {
627 593 if (pos + 17 > len) {
628 594 PyErr_SetString(PyExc_ValueError,
629 595 "overflow in dirstate");
630 596 goto quit;
631 597 }
632 598 cur = str + pos;
633 599 /* unpack header */
634 600 state = *cur;
635 601 mode = getbe32(cur + 1);
636 602 size = getbe32(cur + 5);
637 603 mtime = getbe32(cur + 9);
638 604 flen = getbe32(cur + 13);
639 605 pos += 17;
640 606 cur += 17;
641 607 if (flen > len - pos) {
642 608 PyErr_SetString(PyExc_ValueError,
643 609 "overflow in dirstate");
644 610 goto quit;
645 611 }
646 612
647 613 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
648 614 size, mtime);
649 615 cpos = memchr(cur, 0, flen);
650 616 if (cpos) {
651 617 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
652 618 cname = PyBytes_FromStringAndSize(
653 619 cpos + 1, flen - (cpos - cur) - 1);
654 620 if (!fname || !cname ||
655 621 PyDict_SetItem(cmap, fname, cname) == -1 ||
656 622 PyDict_SetItem(dmap, fname, entry) == -1) {
657 623 goto quit;
658 624 }
659 625 Py_DECREF(cname);
660 626 } else {
661 627 fname = PyBytes_FromStringAndSize(cur, flen);
662 628 if (!fname ||
663 629 PyDict_SetItem(dmap, fname, entry) == -1) {
664 630 goto quit;
665 631 }
666 632 }
667 633 Py_DECREF(fname);
668 634 Py_DECREF(entry);
669 635 fname = cname = entry = NULL;
670 636 pos += flen;
671 637 }
672 638
673 639 ret = parents;
674 640 Py_INCREF(ret);
675 641 quit:
676 642 Py_XDECREF(fname);
677 643 Py_XDECREF(cname);
678 644 Py_XDECREF(entry);
679 645 Py_XDECREF(parents);
680 646 return ret;
681 647 }
682 648
683 649 /*
684 650 * Build a set of non-normal and other parent entries from the dirstate dmap
685 651 */
686 652 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
687 653 {
688 654 PyObject *dmap, *fname, *v;
689 655 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
690 656 Py_ssize_t pos;
691 657
692 658 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
693 659 &dmap)) {
694 660 goto bail;
695 661 }
696 662
697 663 nonnset = PySet_New(NULL);
698 664 if (nonnset == NULL) {
699 665 goto bail;
700 666 }
701 667
702 668 otherpset = PySet_New(NULL);
703 669 if (otherpset == NULL) {
704 670 goto bail;
705 671 }
706 672
707 673 pos = 0;
708 674 while (PyDict_Next(dmap, &pos, &fname, &v)) {
709 675 dirstateItemObject *t;
710 676 if (!dirstate_tuple_check(v)) {
711 677 PyErr_SetString(PyExc_TypeError,
712 678 "expected a dirstate tuple");
713 679 goto bail;
714 680 }
715 681 t = (dirstateItemObject *)v;
716 682
717 683 if (t->state == 'n' && t->size == -2) {
718 684 if (PySet_Add(otherpset, fname) == -1) {
719 685 goto bail;
720 686 }
721 687 }
722 688
723 689 if (t->state == 'n' && t->mtime != -1) {
724 690 continue;
725 691 }
726 692 if (PySet_Add(nonnset, fname) == -1) {
727 693 goto bail;
728 694 }
729 695 }
730 696
731 697 result = Py_BuildValue("(OO)", nonnset, otherpset);
732 698 if (result == NULL) {
733 699 goto bail;
734 700 }
735 701 Py_DECREF(nonnset);
736 702 Py_DECREF(otherpset);
737 703 return result;
738 704 bail:
739 705 Py_XDECREF(nonnset);
740 706 Py_XDECREF(otherpset);
741 707 Py_XDECREF(result);
742 708 return NULL;
743 709 }
744 710
745 711 /*
746 712 * Efficiently pack a dirstate object into its on-disk format.
747 713 */
748 714 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
749 715 {
750 716 PyObject *packobj = NULL;
751 717 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
752 718 Py_ssize_t nbytes, pos, l;
753 719 PyObject *k, *v = NULL, *pn;
754 720 char *p, *s;
755 721 int now;
756 722
757 723 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
758 724 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
759 725 &now)) {
760 726 return NULL;
761 727 }
762 728
763 729 if (PyTuple_Size(pl) != 2) {
764 730 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
765 731 return NULL;
766 732 }
767 733
768 734 /* Figure out how much we need to allocate. */
769 735 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
770 736 PyObject *c;
771 737 if (!PyBytes_Check(k)) {
772 738 PyErr_SetString(PyExc_TypeError, "expected string key");
773 739 goto bail;
774 740 }
775 741 nbytes += PyBytes_GET_SIZE(k) + 17;
776 742 c = PyDict_GetItem(copymap, k);
777 743 if (c) {
778 744 if (!PyBytes_Check(c)) {
779 745 PyErr_SetString(PyExc_TypeError,
780 746 "expected string key");
781 747 goto bail;
782 748 }
783 749 nbytes += PyBytes_GET_SIZE(c) + 1;
784 750 }
785 751 }
786 752
787 753 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
788 754 if (packobj == NULL) {
789 755 goto bail;
790 756 }
791 757
792 758 p = PyBytes_AS_STRING(packobj);
793 759
794 760 pn = PyTuple_GET_ITEM(pl, 0);
795 761 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
796 762 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
797 763 goto bail;
798 764 }
799 765 memcpy(p, s, l);
800 766 p += 20;
801 767 pn = PyTuple_GET_ITEM(pl, 1);
802 768 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
803 769 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
804 770 goto bail;
805 771 }
806 772 memcpy(p, s, l);
807 773 p += 20;
808 774
809 775 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
810 776 dirstateItemObject *tuple;
811 777 char state;
812 778 int mode, size, mtime;
813 779 Py_ssize_t len, l;
814 780 PyObject *o;
815 781 char *t;
816 782
817 783 if (!dirstate_tuple_check(v)) {
818 784 PyErr_SetString(PyExc_TypeError,
819 785 "expected a dirstate tuple");
820 786 goto bail;
821 787 }
822 788 tuple = (dirstateItemObject *)v;
823 789
824 790 state = tuple->state;
825 791 mode = tuple->mode;
826 792 size = tuple->size;
827 793 mtime = tuple->mtime;
828 794 if (state == 'n' && mtime == now) {
829 795 /* See pure/parsers.py:pack_dirstate for why we do
830 796 * this. */
831 797 mtime = -1;
832 798 mtime_unset = (PyObject *)make_dirstate_item(
833 799 state, mode, size, mtime);
834 800 if (!mtime_unset) {
835 801 goto bail;
836 802 }
837 803 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
838 804 goto bail;
839 805 }
840 806 Py_DECREF(mtime_unset);
841 807 mtime_unset = NULL;
842 808 }
843 809 *p++ = state;
844 810 putbe32((uint32_t)mode, p);
845 811 putbe32((uint32_t)size, p + 4);
846 812 putbe32((uint32_t)mtime, p + 8);
847 813 t = p + 12;
848 814 p += 16;
849 815 len = PyBytes_GET_SIZE(k);
850 816 memcpy(p, PyBytes_AS_STRING(k), len);
851 817 p += len;
852 818 o = PyDict_GetItem(copymap, k);
853 819 if (o) {
854 820 *p++ = '\0';
855 821 l = PyBytes_GET_SIZE(o);
856 822 memcpy(p, PyBytes_AS_STRING(o), l);
857 823 p += l;
858 824 len += l + 1;
859 825 }
860 826 putbe32((uint32_t)len, t);
861 827 }
862 828
863 829 pos = p - PyBytes_AS_STRING(packobj);
864 830 if (pos != nbytes) {
865 831 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
866 832 (long)pos, (long)nbytes);
867 833 goto bail;
868 834 }
869 835
870 836 return packobj;
871 837 bail:
872 838 Py_XDECREF(mtime_unset);
873 839 Py_XDECREF(packobj);
874 840 Py_XDECREF(v);
875 841 return NULL;
876 842 }
877 843
878 844 #define BUMPED_FIX 1
879 845 #define USING_SHA_256 2
880 846 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
881 847
882 848 static PyObject *readshas(const char *source, unsigned char num,
883 849 Py_ssize_t hashwidth)
884 850 {
885 851 int i;
886 852 PyObject *list = PyTuple_New(num);
887 853 if (list == NULL) {
888 854 return NULL;
889 855 }
890 856 for (i = 0; i < num; i++) {
891 857 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
892 858 if (hash == NULL) {
893 859 Py_DECREF(list);
894 860 return NULL;
895 861 }
896 862 PyTuple_SET_ITEM(list, i, hash);
897 863 source += hashwidth;
898 864 }
899 865 return list;
900 866 }
901 867
902 868 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
903 869 uint32_t *msize)
904 870 {
905 871 const char *data = databegin;
906 872 const char *meta;
907 873
908 874 double mtime;
909 875 int16_t tz;
910 876 uint16_t flags;
911 877 unsigned char nsuccs, nparents, nmetadata;
912 878 Py_ssize_t hashwidth = 20;
913 879
914 880 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
915 881 PyObject *metadata = NULL, *ret = NULL;
916 882 int i;
917 883
918 884 if (data + FM1_HEADER_SIZE > dataend) {
919 885 goto overflow;
920 886 }
921 887
922 888 *msize = getbe32(data);
923 889 data += 4;
924 890 mtime = getbefloat64(data);
925 891 data += 8;
926 892 tz = getbeint16(data);
927 893 data += 2;
928 894 flags = getbeuint16(data);
929 895 data += 2;
930 896
931 897 if (flags & USING_SHA_256) {
932 898 hashwidth = 32;
933 899 }
934 900
935 901 nsuccs = (unsigned char)(*data++);
936 902 nparents = (unsigned char)(*data++);
937 903 nmetadata = (unsigned char)(*data++);
938 904
939 905 if (databegin + *msize > dataend) {
940 906 goto overflow;
941 907 }
942 908 dataend = databegin + *msize; /* narrow down to marker size */
943 909
944 910 if (data + hashwidth > dataend) {
945 911 goto overflow;
946 912 }
947 913 prec = PyBytes_FromStringAndSize(data, hashwidth);
948 914 data += hashwidth;
949 915 if (prec == NULL) {
950 916 goto bail;
951 917 }
952 918
953 919 if (data + nsuccs * hashwidth > dataend) {
954 920 goto overflow;
955 921 }
956 922 succs = readshas(data, nsuccs, hashwidth);
957 923 if (succs == NULL) {
958 924 goto bail;
959 925 }
960 926 data += nsuccs * hashwidth;
961 927
962 928 if (nparents == 1 || nparents == 2) {
963 929 if (data + nparents * hashwidth > dataend) {
964 930 goto overflow;
965 931 }
966 932 parents = readshas(data, nparents, hashwidth);
967 933 if (parents == NULL) {
968 934 goto bail;
969 935 }
970 936 data += nparents * hashwidth;
971 937 } else {
972 938 parents = Py_None;
973 939 Py_INCREF(parents);
974 940 }
975 941
976 942 if (data + 2 * nmetadata > dataend) {
977 943 goto overflow;
978 944 }
979 945 meta = data + (2 * nmetadata);
980 946 metadata = PyTuple_New(nmetadata);
981 947 if (metadata == NULL) {
982 948 goto bail;
983 949 }
984 950 for (i = 0; i < nmetadata; i++) {
985 951 PyObject *tmp, *left = NULL, *right = NULL;
986 952 Py_ssize_t leftsize = (unsigned char)(*data++);
987 953 Py_ssize_t rightsize = (unsigned char)(*data++);
988 954 if (meta + leftsize + rightsize > dataend) {
989 955 goto overflow;
990 956 }
991 957 left = PyBytes_FromStringAndSize(meta, leftsize);
992 958 meta += leftsize;
993 959 right = PyBytes_FromStringAndSize(meta, rightsize);
994 960 meta += rightsize;
995 961 tmp = PyTuple_New(2);
996 962 if (!left || !right || !tmp) {
997 963 Py_XDECREF(left);
998 964 Py_XDECREF(right);
999 965 Py_XDECREF(tmp);
1000 966 goto bail;
1001 967 }
1002 968 PyTuple_SET_ITEM(tmp, 0, left);
1003 969 PyTuple_SET_ITEM(tmp, 1, right);
1004 970 PyTuple_SET_ITEM(metadata, i, tmp);
1005 971 }
1006 972 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1007 973 (int)tz * 60, parents);
1008 974 goto bail; /* return successfully */
1009 975
1010 976 overflow:
1011 977 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1012 978 bail:
1013 979 Py_XDECREF(prec);
1014 980 Py_XDECREF(succs);
1015 981 Py_XDECREF(metadata);
1016 982 Py_XDECREF(parents);
1017 983 return ret;
1018 984 }
1019 985
1020 986 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1021 987 {
1022 988 const char *data, *dataend;
1023 989 Py_ssize_t datalen, offset, stop;
1024 990 PyObject *markers = NULL;
1025 991
1026 992 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
1027 993 &offset, &stop)) {
1028 994 return NULL;
1029 995 }
1030 996 if (offset < 0) {
1031 997 PyErr_SetString(PyExc_ValueError,
1032 998 "invalid negative offset in fm1readmarkers");
1033 999 return NULL;
1034 1000 }
1035 1001 if (stop > datalen) {
1036 1002 PyErr_SetString(
1037 1003 PyExc_ValueError,
1038 1004 "stop longer than data length in fm1readmarkers");
1039 1005 return NULL;
1040 1006 }
1041 1007 dataend = data + datalen;
1042 1008 data += offset;
1043 1009 markers = PyList_New(0);
1044 1010 if (!markers) {
1045 1011 return NULL;
1046 1012 }
1047 1013 while (offset < stop) {
1048 1014 uint32_t msize;
1049 1015 int error;
1050 1016 PyObject *record = fm1readmarker(data, dataend, &msize);
1051 1017 if (!record) {
1052 1018 goto bail;
1053 1019 }
1054 1020 error = PyList_Append(markers, record);
1055 1021 Py_DECREF(record);
1056 1022 if (error) {
1057 1023 goto bail;
1058 1024 }
1059 1025 data += msize;
1060 1026 offset += msize;
1061 1027 }
1062 1028 return markers;
1063 1029 bail:
1064 1030 Py_DECREF(markers);
1065 1031 return NULL;
1066 1032 }
1067 1033
1068 1034 static char parsers_doc[] = "Efficient content parsing.";
1069 1035
1070 1036 PyObject *encodedir(PyObject *self, PyObject *args);
1071 1037 PyObject *pathencode(PyObject *self, PyObject *args);
1072 1038 PyObject *lowerencode(PyObject *self, PyObject *args);
1073 1039 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1074 1040
1075 1041 static PyMethodDef methods[] = {
1076 1042 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1077 1043 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
1078 1044 "create a set containing non-normal and other parent entries of given "
1079 1045 "dirstate\n"},
1080 1046 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1081 1047 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1082 1048 "parse a revlog index\n"},
1083 1049 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1084 1050 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1085 1051 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1086 1052 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1087 1053 "construct a dict with an expected size\n"},
1088 1054 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1089 1055 "make file foldmap\n"},
1090 1056 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1091 1057 "escape a UTF-8 byte string to JSON (fast path)\n"},
1092 1058 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1093 1059 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1094 1060 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1095 1061 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1096 1062 "parse v1 obsolete markers\n"},
1097 1063 {NULL, NULL}};
1098 1064
1099 1065 void dirs_module_init(PyObject *mod);
1100 1066 void manifest_module_init(PyObject *mod);
1101 1067 void revlog_module_init(PyObject *mod);
1102 1068
1103 1069 static const int version = 20;
1104 1070
1105 1071 static void module_init(PyObject *mod)
1106 1072 {
1107 1073 PyObject *capsule = NULL;
1108 1074 PyModule_AddIntConstant(mod, "version", version);
1109 1075
1110 1076 /* This module constant has two purposes. First, it lets us unit test
1111 1077 * the ImportError raised without hard-coding any error text. This
1112 1078 * means we can change the text in the future without breaking tests,
1113 1079 * even across changesets without a recompile. Second, its presence
1114 1080 * can be used to determine whether the version-checking logic is
1115 1081 * present, which also helps in testing across changesets without a
1116 1082 * recompile. Note that this means the pure-Python version of parsers
1117 1083 * should not have this module constant. */
1118 1084 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1119 1085
1120 1086 dirs_module_init(mod);
1121 1087 manifest_module_init(mod);
1122 1088 revlog_module_init(mod);
1123 1089
1124 1090 capsule = PyCapsule_New(
1125 1091 make_dirstate_item,
1126 1092 "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL);
1127 1093 if (capsule != NULL)
1128 1094 PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
1129 1095
1130 1096 if (PyType_Ready(&dirstateItemType) < 0) {
1131 1097 return;
1132 1098 }
1133 1099 Py_INCREF(&dirstateItemType);
1134 1100 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1135 1101 }
1136 1102
1137 1103 static int check_python_version(void)
1138 1104 {
1139 1105 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1140 1106 long hexversion;
1141 1107 if (!sys) {
1142 1108 return -1;
1143 1109 }
1144 1110 ver = PyObject_GetAttrString(sys, "hexversion");
1145 1111 Py_DECREF(sys);
1146 1112 if (!ver) {
1147 1113 return -1;
1148 1114 }
1149 1115 hexversion = PyInt_AsLong(ver);
1150 1116 Py_DECREF(ver);
1151 1117 /* sys.hexversion is a 32-bit number by default, so the -1 case
1152 1118 * should only occur in unusual circumstances (e.g. if sys.hexversion
1153 1119 * is manually set to an invalid value). */
1154 1120 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1155 1121 PyErr_Format(PyExc_ImportError,
1156 1122 "%s: The Mercurial extension "
1157 1123 "modules were compiled with Python " PY_VERSION
1158 1124 ", but "
1159 1125 "Mercurial is currently using Python with "
1160 1126 "sys.hexversion=%ld: "
1161 1127 "Python %s\n at: %s",
1162 1128 versionerrortext, hexversion, Py_GetVersion(),
1163 1129 Py_GetProgramFullPath());
1164 1130 return -1;
1165 1131 }
1166 1132 return 0;
1167 1133 }
1168 1134
1169 1135 #ifdef IS_PY3K
1170 1136 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1171 1137 parsers_doc, -1, methods};
1172 1138
1173 1139 PyMODINIT_FUNC PyInit_parsers(void)
1174 1140 {
1175 1141 PyObject *mod;
1176 1142
1177 1143 if (check_python_version() == -1)
1178 1144 return NULL;
1179 1145 mod = PyModule_Create(&parsers_module);
1180 1146 module_init(mod);
1181 1147 return mod;
1182 1148 }
1183 1149 #else
1184 1150 PyMODINIT_FUNC initparsers(void)
1185 1151 {
1186 1152 PyObject *mod;
1187 1153
1188 1154 if (check_python_version() == -1) {
1189 1155 return;
1190 1156 }
1191 1157 mod = Py_InitModule3("parsers", methods, parsers_doc);
1192 1158 module_init(mod);
1193 1159 }
1194 1160 #endif
@@ -1,755 +1,735 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It contains:
52 52
53 53 - state (one of 'n', 'a', 'r', 'm')
54 54 - mode,
55 55 - size,
56 56 - mtime,
57 57 """
58 58
59 59 _state = attr.ib()
60 60 _mode = attr.ib()
61 61 _size = attr.ib()
62 62 _mtime = attr.ib()
63 63
64 64 def __init__(
65 65 self,
66 66 wc_tracked=False,
67 67 p1_tracked=False,
68 68 p2_tracked=False,
69 69 merged=False,
70 70 clean_p1=False,
71 71 clean_p2=False,
72 72 possibly_dirty=False,
73 73 parentfiledata=None,
74 74 ):
75 75 if merged and (clean_p1 or clean_p2):
76 76 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
77 77 raise error.ProgrammingError(msg)
78 78
79 79 self._state = None
80 80 self._mode = 0
81 81 self._size = NONNORMAL
82 82 self._mtime = AMBIGUOUS_TIME
83 83 if not (p1_tracked or p2_tracked or wc_tracked):
84 84 pass # the object has no state to record
85 85 elif merged:
86 86 self._state = b'm'
87 87 self._size = FROM_P2
88 88 self._mtime = AMBIGUOUS_TIME
89 89 elif not (p1_tracked or p2_tracked) and wc_tracked:
90 90 self._state = b'a'
91 91 self._size = NONNORMAL
92 92 self._mtime = AMBIGUOUS_TIME
93 93 elif (p1_tracked or p2_tracked) and not wc_tracked:
94 94 self._state = b'r'
95 95 self._size = 0
96 96 self._mtime = 0
97 97 elif clean_p2 and wc_tracked:
98 98 self._state = b'n'
99 99 self._size = FROM_P2
100 100 self._mtime = AMBIGUOUS_TIME
101 101 elif not p1_tracked and p2_tracked and wc_tracked:
102 102 self._state = b'n'
103 103 self._size = FROM_P2
104 104 self._mtime = AMBIGUOUS_TIME
105 105 elif possibly_dirty:
106 106 self._state = b'n'
107 107 self._size = NONNORMAL
108 108 self._mtime = AMBIGUOUS_TIME
109 109 elif wc_tracked:
110 110 # this is a "normal" file
111 111 if parentfiledata is None:
112 112 msg = b'failed to pass parentfiledata for a normal file'
113 113 raise error.ProgrammingError(msg)
114 114 self._state = b'n'
115 115 self._mode = parentfiledata[0]
116 116 self._size = parentfiledata[1]
117 117 self._mtime = parentfiledata[2]
118 118 else:
119 119 assert False, 'unreachable'
120 120
121 121 @classmethod
122 122 def new_added(cls):
123 123 """constructor to help legacy API to build a new "added" item
124 124
125 125 Should eventually be removed
126 126 """
127 127 instance = cls()
128 128 instance._state = b'a'
129 129 instance._mode = 0
130 130 instance._size = NONNORMAL
131 131 instance._mtime = AMBIGUOUS_TIME
132 132 return instance
133 133
134 134 @classmethod
135 135 def new_merged(cls):
136 136 """constructor to help legacy API to build a new "merged" item
137 137
138 138 Should eventually be removed
139 139 """
140 140 instance = cls()
141 141 instance._state = b'm'
142 142 instance._mode = 0
143 143 instance._size = FROM_P2
144 144 instance._mtime = AMBIGUOUS_TIME
145 145 return instance
146 146
147 147 @classmethod
148 148 def new_from_p2(cls):
149 149 """constructor to help legacy API to build a new "from_p2" item
150 150
151 151 Should eventually be removed
152 152 """
153 153 instance = cls()
154 154 instance._state = b'n'
155 155 instance._mode = 0
156 156 instance._size = FROM_P2
157 157 instance._mtime = AMBIGUOUS_TIME
158 158 return instance
159 159
160 160 @classmethod
161 161 def new_possibly_dirty(cls):
162 162 """constructor to help legacy API to build a new "possibly_dirty" item
163 163
164 164 Should eventually be removed
165 165 """
166 166 instance = cls()
167 167 instance._state = b'n'
168 168 instance._mode = 0
169 169 instance._size = NONNORMAL
170 170 instance._mtime = AMBIGUOUS_TIME
171 171 return instance
172 172
173 173 @classmethod
174 174 def new_normal(cls, mode, size, mtime):
175 175 """constructor to help legacy API to build a new "normal" item
176 176
177 177 Should eventually be removed
178 178 """
179 179 assert size != FROM_P2
180 180 assert size != NONNORMAL
181 181 instance = cls()
182 182 instance._state = b'n'
183 183 instance._mode = mode
184 184 instance._size = size
185 185 instance._mtime = mtime
186 186 return instance
187 187
188 188 @classmethod
189 189 def from_v1_data(cls, state, mode, size, mtime):
190 190 """Build a new DirstateItem object from V1 data
191 191
192 192 Since the dirstate-v1 format is frozen, the signature of this function
193 193 is not expected to change, unlike the __init__ one.
194 194 """
195 195 instance = cls()
196 196 instance._state = state
197 197 instance._mode = mode
198 198 instance._size = size
199 199 instance._mtime = mtime
200 200 return instance
201 201
202 202 def set_possibly_dirty(self):
203 203 """Mark a file as "possibly dirty"
204 204
205 205 This means the next status call will have to actually check its content
206 206 to make sure it is correct.
207 207 """
208 208 self._mtime = AMBIGUOUS_TIME
209 209
210 210 def set_untracked(self):
211 211 """mark a file as untracked in the working copy
212 212
213 213 This will ultimately be called by command like `hg remove`.
214 214 """
215 215 # backup the previous state (useful for merge)
216 216 size = 0
217 217 if self.merged: # merge
218 218 size = NONNORMAL
219 219 elif self.from_p2:
220 220 size = FROM_P2
221 221 self._state = b'r'
222 222 self._mode = 0
223 223 self._size = size
224 224 self._mtime = 0
225 225
226 def __getitem__(self, idx):
227 if idx == 0 or idx == -4:
228 msg = b"do not use item[x], use item.state"
229 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
230 return self._state
231 elif idx == 1 or idx == -3:
232 msg = b"do not use item[x], use item.mode"
233 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
234 return self._mode
235 elif idx == 2 or idx == -2:
236 msg = b"do not use item[x], use item.size"
237 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
238 return self._size
239 elif idx == 3 or idx == -1:
240 msg = b"do not use item[x], use item.mtime"
241 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
242 return self._mtime
243 else:
244 raise IndexError(idx)
245
246 226 @property
247 227 def mode(self):
248 228 return self._mode
249 229
250 230 @property
251 231 def size(self):
252 232 return self._size
253 233
254 234 @property
255 235 def mtime(self):
256 236 return self._mtime
257 237
258 238 @property
259 239 def state(self):
260 240 """
261 241 States are:
262 242 n normal
263 243 m needs merging
264 244 r marked for removal
265 245 a marked for addition
266 246
267 247 XXX This "state" is a bit obscure and mostly a direct expression of the
268 248 dirstatev1 format. It would make sense to ultimately deprecate it in
269 249 favor of the more "semantic" attributes.
270 250 """
271 251 return self._state
272 252
273 253 @property
274 254 def tracked(self):
275 255 """True is the file is tracked in the working copy"""
276 256 return self._state in b"nma"
277 257
278 258 @property
279 259 def added(self):
280 260 """True if the file has been added"""
281 261 return self._state == b'a'
282 262
283 263 @property
284 264 def merged(self):
285 265 """True if the file has been merged
286 266
287 267 Should only be set if a merge is in progress in the dirstate
288 268 """
289 269 return self._state == b'm'
290 270
291 271 @property
292 272 def from_p2(self):
293 273 """True if the file have been fetched from p2 during the current merge
294 274
295 275 This is only True is the file is currently tracked.
296 276
297 277 Should only be set if a merge is in progress in the dirstate
298 278 """
299 279 return self._state == b'n' and self._size == FROM_P2
300 280
301 281 @property
302 282 def from_p2_removed(self):
303 283 """True if the file has been removed, but was "from_p2" initially
304 284
305 285 This property seems like an abstraction leakage and should probably be
306 286 dealt in this class (or maybe the dirstatemap) directly.
307 287 """
308 288 return self._state == b'r' and self._size == FROM_P2
309 289
310 290 @property
311 291 def removed(self):
312 292 """True if the file has been removed"""
313 293 return self._state == b'r'
314 294
315 295 @property
316 296 def merged_removed(self):
317 297 """True if the file has been removed, but was "merged" initially
318 298
319 299 This property seems like an abstraction leakage and should probably be
320 300 dealt in this class (or maybe the dirstatemap) directly.
321 301 """
322 302 return self._state == b'r' and self._size == NONNORMAL
323 303
324 304 @property
325 305 def dm_nonnormal(self):
326 306 """True is the entry is non-normal in the dirstatemap sense
327 307
328 308 There is no reason for any code, but the dirstatemap one to use this.
329 309 """
330 310 return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
331 311
332 312 @property
333 313 def dm_otherparent(self):
334 314 """True is the entry is `otherparent` in the dirstatemap sense
335 315
336 316 There is no reason for any code, but the dirstatemap one to use this.
337 317 """
338 318 return self._size == FROM_P2
339 319
340 320 def v1_state(self):
341 321 """return a "state" suitable for v1 serialization"""
342 322 return self._state
343 323
344 324 def v1_mode(self):
345 325 """return a "mode" suitable for v1 serialization"""
346 326 return self._mode
347 327
348 328 def v1_size(self):
349 329 """return a "size" suitable for v1 serialization"""
350 330 return self._size
351 331
352 332 def v1_mtime(self):
353 333 """return a "mtime" suitable for v1 serialization"""
354 334 return self._mtime
355 335
356 336 def need_delay(self, now):
357 337 """True if the stored mtime would be ambiguous with the current time"""
358 338 return self._state == b'n' and self._mtime == now
359 339
360 340
361 341 def gettype(q):
362 342 return int(q & 0xFFFF)
363 343
364 344
365 345 class BaseIndexObject(object):
366 346 # Can I be passed to an algorithme implemented in Rust ?
367 347 rust_ext_compat = 0
368 348 # Format of an index entry according to Python's `struct` language
369 349 index_format = revlog_constants.INDEX_ENTRY_V1
370 350 # Size of a C unsigned long long int, platform independent
371 351 big_int_size = struct.calcsize(b'>Q')
372 352 # Size of a C long int, platform independent
373 353 int_size = struct.calcsize(b'>i')
374 354 # An empty index entry, used as a default value to be overridden, or nullrev
375 355 null_item = (
376 356 0,
377 357 0,
378 358 0,
379 359 -1,
380 360 -1,
381 361 -1,
382 362 -1,
383 363 sha1nodeconstants.nullid,
384 364 0,
385 365 0,
386 366 revlog_constants.COMP_MODE_INLINE,
387 367 revlog_constants.COMP_MODE_INLINE,
388 368 )
389 369
390 370 @util.propertycache
391 371 def entry_size(self):
392 372 return self.index_format.size
393 373
394 374 @property
395 375 def nodemap(self):
396 376 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
397 377 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
398 378 return self._nodemap
399 379
400 380 @util.propertycache
401 381 def _nodemap(self):
402 382 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
403 383 for r in range(0, len(self)):
404 384 n = self[r][7]
405 385 nodemap[n] = r
406 386 return nodemap
407 387
408 388 def has_node(self, node):
409 389 """return True if the node exist in the index"""
410 390 return node in self._nodemap
411 391
412 392 def rev(self, node):
413 393 """return a revision for a node
414 394
415 395 If the node is unknown, raise a RevlogError"""
416 396 return self._nodemap[node]
417 397
418 398 def get_rev(self, node):
419 399 """return a revision for a node
420 400
421 401 If the node is unknown, return None"""
422 402 return self._nodemap.get(node)
423 403
424 404 def _stripnodes(self, start):
425 405 if '_nodemap' in vars(self):
426 406 for r in range(start, len(self)):
427 407 n = self[r][7]
428 408 del self._nodemap[n]
429 409
430 410 def clearcaches(self):
431 411 self.__dict__.pop('_nodemap', None)
432 412
433 413 def __len__(self):
434 414 return self._lgt + len(self._extra)
435 415
436 416 def append(self, tup):
437 417 if '_nodemap' in vars(self):
438 418 self._nodemap[tup[7]] = len(self)
439 419 data = self._pack_entry(len(self), tup)
440 420 self._extra.append(data)
441 421
442 422 def _pack_entry(self, rev, entry):
443 423 assert entry[8] == 0
444 424 assert entry[9] == 0
445 425 return self.index_format.pack(*entry[:8])
446 426
447 427 def _check_index(self, i):
448 428 if not isinstance(i, int):
449 429 raise TypeError(b"expecting int indexes")
450 430 if i < 0 or i >= len(self):
451 431 raise IndexError
452 432
453 433 def __getitem__(self, i):
454 434 if i == -1:
455 435 return self.null_item
456 436 self._check_index(i)
457 437 if i >= self._lgt:
458 438 data = self._extra[i - self._lgt]
459 439 else:
460 440 index = self._calculate_index(i)
461 441 data = self._data[index : index + self.entry_size]
462 442 r = self._unpack_entry(i, data)
463 443 if self._lgt and i == 0:
464 444 offset = revlogutils.offset_type(0, gettype(r[0]))
465 445 r = (offset,) + r[1:]
466 446 return r
467 447
468 448 def _unpack_entry(self, rev, data):
469 449 r = self.index_format.unpack(data)
470 450 r = r + (
471 451 0,
472 452 0,
473 453 revlog_constants.COMP_MODE_INLINE,
474 454 revlog_constants.COMP_MODE_INLINE,
475 455 )
476 456 return r
477 457
478 458 def pack_header(self, header):
479 459 """pack header information as binary"""
480 460 v_fmt = revlog_constants.INDEX_HEADER
481 461 return v_fmt.pack(header)
482 462
483 463 def entry_binary(self, rev):
484 464 """return the raw binary string representing a revision"""
485 465 entry = self[rev]
486 466 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
487 467 if rev == 0:
488 468 p = p[revlog_constants.INDEX_HEADER.size :]
489 469 return p
490 470
491 471
492 472 class IndexObject(BaseIndexObject):
493 473 def __init__(self, data):
494 474 assert len(data) % self.entry_size == 0, (
495 475 len(data),
496 476 self.entry_size,
497 477 len(data) % self.entry_size,
498 478 )
499 479 self._data = data
500 480 self._lgt = len(data) // self.entry_size
501 481 self._extra = []
502 482
503 483 def _calculate_index(self, i):
504 484 return i * self.entry_size
505 485
506 486 def __delitem__(self, i):
507 487 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
508 488 raise ValueError(b"deleting slices only supports a:-1 with step 1")
509 489 i = i.start
510 490 self._check_index(i)
511 491 self._stripnodes(i)
512 492 if i < self._lgt:
513 493 self._data = self._data[: i * self.entry_size]
514 494 self._lgt = i
515 495 self._extra = []
516 496 else:
517 497 self._extra = self._extra[: i - self._lgt]
518 498
519 499
520 500 class PersistentNodeMapIndexObject(IndexObject):
521 501 """a Debug oriented class to test persistent nodemap
522 502
523 503 We need a simple python object to test API and higher level behavior. See
524 504 the Rust implementation for more serious usage. This should be used only
525 505 through the dedicated `devel.persistent-nodemap` config.
526 506 """
527 507
528 508 def nodemap_data_all(self):
529 509 """Return bytes containing a full serialization of a nodemap
530 510
531 511 The nodemap should be valid for the full set of revisions in the
532 512 index."""
533 513 return nodemaputil.persistent_data(self)
534 514
535 515 def nodemap_data_incremental(self):
536 516 """Return bytes containing a incremental update to persistent nodemap
537 517
538 518 This containst the data for an append-only update of the data provided
539 519 in the last call to `update_nodemap_data`.
540 520 """
541 521 if self._nm_root is None:
542 522 return None
543 523 docket = self._nm_docket
544 524 changed, data = nodemaputil.update_persistent_data(
545 525 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
546 526 )
547 527
548 528 self._nm_root = self._nm_max_idx = self._nm_docket = None
549 529 return docket, changed, data
550 530
551 531 def update_nodemap_data(self, docket, nm_data):
552 532 """provide full block of persisted binary data for a nodemap
553 533
554 534 The data are expected to come from disk. See `nodemap_data_all` for a
555 535 produceur of such data."""
556 536 if nm_data is not None:
557 537 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
558 538 if self._nm_root:
559 539 self._nm_docket = docket
560 540 else:
561 541 self._nm_root = self._nm_max_idx = self._nm_docket = None
562 542
563 543
564 544 class InlinedIndexObject(BaseIndexObject):
565 545 def __init__(self, data, inline=0):
566 546 self._data = data
567 547 self._lgt = self._inline_scan(None)
568 548 self._inline_scan(self._lgt)
569 549 self._extra = []
570 550
571 551 def _inline_scan(self, lgt):
572 552 off = 0
573 553 if lgt is not None:
574 554 self._offsets = [0] * lgt
575 555 count = 0
576 556 while off <= len(self._data) - self.entry_size:
577 557 start = off + self.big_int_size
578 558 (s,) = struct.unpack(
579 559 b'>i',
580 560 self._data[start : start + self.int_size],
581 561 )
582 562 if lgt is not None:
583 563 self._offsets[count] = off
584 564 count += 1
585 565 off += self.entry_size + s
586 566 if off != len(self._data):
587 567 raise ValueError(b"corrupted data")
588 568 return count
589 569
590 570 def __delitem__(self, i):
591 571 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
592 572 raise ValueError(b"deleting slices only supports a:-1 with step 1")
593 573 i = i.start
594 574 self._check_index(i)
595 575 self._stripnodes(i)
596 576 if i < self._lgt:
597 577 self._offsets = self._offsets[:i]
598 578 self._lgt = i
599 579 self._extra = []
600 580 else:
601 581 self._extra = self._extra[: i - self._lgt]
602 582
603 583 def _calculate_index(self, i):
604 584 return self._offsets[i]
605 585
606 586
607 587 def parse_index2(data, inline, revlogv2=False):
608 588 if not inline:
609 589 cls = IndexObject2 if revlogv2 else IndexObject
610 590 return cls(data), None
611 591 cls = InlinedIndexObject
612 592 return cls(data, inline), (0, data)
613 593
614 594
615 595 def parse_index_cl_v2(data):
616 596 return IndexChangelogV2(data), None
617 597
618 598
619 599 class IndexObject2(IndexObject):
620 600 index_format = revlog_constants.INDEX_ENTRY_V2
621 601
622 602 def replace_sidedata_info(
623 603 self,
624 604 rev,
625 605 sidedata_offset,
626 606 sidedata_length,
627 607 offset_flags,
628 608 compression_mode,
629 609 ):
630 610 """
631 611 Replace an existing index entry's sidedata offset and length with new
632 612 ones.
633 613 This cannot be used outside of the context of sidedata rewriting,
634 614 inside the transaction that creates the revision `rev`.
635 615 """
636 616 if rev < 0:
637 617 raise KeyError
638 618 self._check_index(rev)
639 619 if rev < self._lgt:
640 620 msg = b"cannot rewrite entries outside of this transaction"
641 621 raise KeyError(msg)
642 622 else:
643 623 entry = list(self[rev])
644 624 entry[0] = offset_flags
645 625 entry[8] = sidedata_offset
646 626 entry[9] = sidedata_length
647 627 entry[11] = compression_mode
648 628 entry = tuple(entry)
649 629 new = self._pack_entry(rev, entry)
650 630 self._extra[rev - self._lgt] = new
651 631
652 632 def _unpack_entry(self, rev, data):
653 633 data = self.index_format.unpack(data)
654 634 entry = data[:10]
655 635 data_comp = data[10] & 3
656 636 sidedata_comp = (data[10] & (3 << 2)) >> 2
657 637 return entry + (data_comp, sidedata_comp)
658 638
659 639 def _pack_entry(self, rev, entry):
660 640 data = entry[:10]
661 641 data_comp = entry[10] & 3
662 642 sidedata_comp = (entry[11] & 3) << 2
663 643 data += (data_comp | sidedata_comp,)
664 644
665 645 return self.index_format.pack(*data)
666 646
667 647 def entry_binary(self, rev):
668 648 """return the raw binary string representing a revision"""
669 649 entry = self[rev]
670 650 return self._pack_entry(rev, entry)
671 651
672 652 def pack_header(self, header):
673 653 """pack header information as binary"""
674 654 msg = 'version header should go in the docket, not the index: %d'
675 655 msg %= header
676 656 raise error.ProgrammingError(msg)
677 657
678 658
679 659 class IndexChangelogV2(IndexObject2):
680 660 index_format = revlog_constants.INDEX_ENTRY_CL_V2
681 661
682 662 def _unpack_entry(self, rev, data, r=True):
683 663 items = self.index_format.unpack(data)
684 664 entry = items[:3] + (rev, rev) + items[3:8]
685 665 data_comp = items[8] & 3
686 666 sidedata_comp = (items[8] >> 2) & 3
687 667 return entry + (data_comp, sidedata_comp)
688 668
689 669 def _pack_entry(self, rev, entry):
690 670 assert entry[3] == rev, entry[3]
691 671 assert entry[4] == rev, entry[4]
692 672 data = entry[:3] + entry[5:10]
693 673 data_comp = entry[10] & 3
694 674 sidedata_comp = (entry[11] & 3) << 2
695 675 data += (data_comp | sidedata_comp,)
696 676 return self.index_format.pack(*data)
697 677
698 678
699 679 def parse_index_devel_nodemap(data, inline):
700 680 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
701 681 return PersistentNodeMapIndexObject(data), None
702 682
703 683
704 684 def parse_dirstate(dmap, copymap, st):
705 685 parents = [st[:20], st[20:40]]
706 686 # dereference fields so they will be local in loop
707 687 format = b">cllll"
708 688 e_size = struct.calcsize(format)
709 689 pos1 = 40
710 690 l = len(st)
711 691
712 692 # the inner loop
713 693 while pos1 < l:
714 694 pos2 = pos1 + e_size
715 695 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
716 696 pos1 = pos2 + e[4]
717 697 f = st[pos2:pos1]
718 698 if b'\0' in f:
719 699 f, c = f.split(b'\0')
720 700 copymap[f] = c
721 701 dmap[f] = DirstateItem.from_v1_data(*e[:4])
722 702 return parents
723 703
724 704
725 705 def pack_dirstate(dmap, copymap, pl, now):
726 706 now = int(now)
727 707 cs = stringio()
728 708 write = cs.write
729 709 write(b"".join(pl))
730 710 for f, e in pycompat.iteritems(dmap):
731 711 if e.need_delay(now):
732 712 # The file was last modified "simultaneously" with the current
733 713 # write to dirstate (i.e. within the same second for file-
734 714 # systems with a granularity of 1 sec). This commonly happens
735 715 # for at least a couple of files on 'update'.
736 716 # The user could change the file without changing its size
737 717 # within the same second. Invalidate the file's mtime in
738 718 # dirstate, forcing future 'status' calls to compare the
739 719 # contents of the file if the size is the same. This prevents
740 720 # mistakenly treating such files as clean.
741 721 e.set_possibly_dirty()
742 722
743 723 if f in copymap:
744 724 f = b"%s\0%s" % (f, copymap[f])
745 725 e = _pack(
746 726 b">cllll",
747 727 e.v1_state(),
748 728 e.v1_mode(),
749 729 e.v1_size(),
750 730 e.v1_mtime(),
751 731 len(f),
752 732 )
753 733 write(e)
754 734 write(f)
755 735 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now