##// END OF EJS Templates
statfs: make getfstype() raise OSError...
Yuya Nishihara -
r31678:1ed57a7d default
parent child Browse files
Show More
@@ -1,1328 +1,1328
1 1 /*
2 2 osutil.c - native operating system services
3 3
4 4 Copyright 2007 Matt Mackall and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define _ATFILE_SOURCE
11 11 #include <Python.h>
12 12 #include <fcntl.h>
13 13 #include <stdio.h>
14 14 #include <stdlib.h>
15 15 #include <string.h>
16 16 #include <errno.h>
17 17
18 18 #ifdef _WIN32
19 19 #include <windows.h>
20 20 #include <io.h>
21 21 #else
22 22 #include <dirent.h>
23 23 #include <sys/socket.h>
24 24 #include <sys/stat.h>
25 25 #include <sys/types.h>
26 26 #include <unistd.h>
27 27 #ifdef HAVE_LINUX_STATFS
28 28 #include <linux/magic.h>
29 29 #include <sys/vfs.h>
30 30 #endif
31 31 #ifdef HAVE_BSD_STATFS
32 32 #include <sys/mount.h>
33 33 #include <sys/param.h>
34 34 #endif
35 35 #endif
36 36
37 37 #ifdef __APPLE__
38 38 #include <sys/attr.h>
39 39 #include <sys/vnode.h>
40 40 #endif
41 41
42 42 #include "util.h"
43 43
44 44 /* some platforms lack the PATH_MAX definition (eg. GNU/Hurd) */
45 45 #ifndef PATH_MAX
46 46 #define PATH_MAX 4096
47 47 #endif
48 48
49 49 #ifdef _WIN32
50 50 /*
51 51 stat struct compatible with hg expectations
52 52 Mercurial only uses st_mode, st_size and st_mtime
53 53 the rest is kept to minimize changes between implementations
54 54 */
55 55 struct hg_stat {
56 56 int st_dev;
57 57 int st_mode;
58 58 int st_nlink;
59 59 __int64 st_size;
60 60 int st_mtime;
61 61 int st_ctime;
62 62 };
63 63 struct listdir_stat {
64 64 PyObject_HEAD
65 65 struct hg_stat st;
66 66 };
67 67 #else
68 68 struct listdir_stat {
69 69 PyObject_HEAD
70 70 struct stat st;
71 71 };
72 72 #endif
73 73
74 74 #ifdef IS_PY3K
75 75 #define listdir_slot(name) \
76 76 static PyObject *listdir_stat_##name(PyObject *self, void *x) \
77 77 { \
78 78 return PyLong_FromLong(((struct listdir_stat *)self)->st.name); \
79 79 }
80 80 #else
81 81 #define listdir_slot(name) \
82 82 static PyObject *listdir_stat_##name(PyObject *self, void *x) \
83 83 { \
84 84 return PyInt_FromLong(((struct listdir_stat *)self)->st.name); \
85 85 }
86 86 #endif
87 87
88 88 listdir_slot(st_dev)
89 89 listdir_slot(st_mode)
90 90 listdir_slot(st_nlink)
91 91 #ifdef _WIN32
92 92 static PyObject *listdir_stat_st_size(PyObject *self, void *x)
93 93 {
94 94 return PyLong_FromLongLong(
95 95 (PY_LONG_LONG)((struct listdir_stat *)self)->st.st_size);
96 96 }
97 97 #else
98 98 listdir_slot(st_size)
99 99 #endif
100 100 listdir_slot(st_mtime)
101 101 listdir_slot(st_ctime)
102 102
103 103 static struct PyGetSetDef listdir_stat_getsets[] = {
104 104 {"st_dev", listdir_stat_st_dev, 0, 0, 0},
105 105 {"st_mode", listdir_stat_st_mode, 0, 0, 0},
106 106 {"st_nlink", listdir_stat_st_nlink, 0, 0, 0},
107 107 {"st_size", listdir_stat_st_size, 0, 0, 0},
108 108 {"st_mtime", listdir_stat_st_mtime, 0, 0, 0},
109 109 {"st_ctime", listdir_stat_st_ctime, 0, 0, 0},
110 110 {0, 0, 0, 0, 0}
111 111 };
112 112
113 113 static PyObject *listdir_stat_new(PyTypeObject *t, PyObject *a, PyObject *k)
114 114 {
115 115 return t->tp_alloc(t, 0);
116 116 }
117 117
118 118 static void listdir_stat_dealloc(PyObject *o)
119 119 {
120 120 o->ob_type->tp_free(o);
121 121 }
122 122
123 123 static PyTypeObject listdir_stat_type = {
124 124 PyVarObject_HEAD_INIT(NULL, 0)
125 125 "osutil.stat", /*tp_name*/
126 126 sizeof(struct listdir_stat), /*tp_basicsize*/
127 127 0, /*tp_itemsize*/
128 128 (destructor)listdir_stat_dealloc, /*tp_dealloc*/
129 129 0, /*tp_print*/
130 130 0, /*tp_getattr*/
131 131 0, /*tp_setattr*/
132 132 0, /*tp_compare*/
133 133 0, /*tp_repr*/
134 134 0, /*tp_as_number*/
135 135 0, /*tp_as_sequence*/
136 136 0, /*tp_as_mapping*/
137 137 0, /*tp_hash */
138 138 0, /*tp_call*/
139 139 0, /*tp_str*/
140 140 0, /*tp_getattro*/
141 141 0, /*tp_setattro*/
142 142 0, /*tp_as_buffer*/
143 143 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
144 144 "stat objects", /* tp_doc */
145 145 0, /* tp_traverse */
146 146 0, /* tp_clear */
147 147 0, /* tp_richcompare */
148 148 0, /* tp_weaklistoffset */
149 149 0, /* tp_iter */
150 150 0, /* tp_iternext */
151 151 0, /* tp_methods */
152 152 0, /* tp_members */
153 153 listdir_stat_getsets, /* tp_getset */
154 154 0, /* tp_base */
155 155 0, /* tp_dict */
156 156 0, /* tp_descr_get */
157 157 0, /* tp_descr_set */
158 158 0, /* tp_dictoffset */
159 159 0, /* tp_init */
160 160 0, /* tp_alloc */
161 161 listdir_stat_new, /* tp_new */
162 162 };
163 163
164 164 #ifdef _WIN32
165 165
166 166 static int to_python_time(const FILETIME *tm)
167 167 {
168 168 /* number of seconds between epoch and January 1 1601 */
169 169 const __int64 a0 = (__int64)134774L * (__int64)24L * (__int64)3600L;
170 170 /* conversion factor from 100ns to 1s */
171 171 const __int64 a1 = 10000000;
172 172 /* explicit (int) cast to suspend compiler warnings */
173 173 return (int)((((__int64)tm->dwHighDateTime << 32)
174 174 + tm->dwLowDateTime) / a1 - a0);
175 175 }
176 176
177 177 static PyObject *make_item(const WIN32_FIND_DATAA *fd, int wantstat)
178 178 {
179 179 PyObject *py_st;
180 180 struct hg_stat *stp;
181 181
182 182 int kind = (fd->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
183 183 ? _S_IFDIR : _S_IFREG;
184 184
185 185 if (!wantstat)
186 186 return Py_BuildValue("si", fd->cFileName, kind);
187 187
188 188 py_st = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
189 189 if (!py_st)
190 190 return NULL;
191 191
192 192 stp = &((struct listdir_stat *)py_st)->st;
193 193 /*
194 194 use kind as st_mode
195 195 rwx bits on Win32 are meaningless
196 196 and Hg does not use them anyway
197 197 */
198 198 stp->st_mode = kind;
199 199 stp->st_mtime = to_python_time(&fd->ftLastWriteTime);
200 200 stp->st_ctime = to_python_time(&fd->ftCreationTime);
201 201 if (kind == _S_IFREG)
202 202 stp->st_size = ((__int64)fd->nFileSizeHigh << 32)
203 203 + fd->nFileSizeLow;
204 204 return Py_BuildValue("siN", fd->cFileName,
205 205 kind, py_st);
206 206 }
207 207
208 208 static PyObject *_listdir(char *path, int plen, int wantstat, char *skip)
209 209 {
210 210 PyObject *rval = NULL; /* initialize - return value */
211 211 PyObject *list;
212 212 HANDLE fh;
213 213 WIN32_FIND_DATAA fd;
214 214 char *pattern;
215 215
216 216 /* build the path + \* pattern string */
217 217 pattern = PyMem_Malloc(plen + 3); /* path + \* + \0 */
218 218 if (!pattern) {
219 219 PyErr_NoMemory();
220 220 goto error_nomem;
221 221 }
222 222 memcpy(pattern, path, plen);
223 223
224 224 if (plen > 0) {
225 225 char c = path[plen-1];
226 226 if (c != ':' && c != '/' && c != '\\')
227 227 pattern[plen++] = '\\';
228 228 }
229 229 pattern[plen++] = '*';
230 230 pattern[plen] = '\0';
231 231
232 232 fh = FindFirstFileA(pattern, &fd);
233 233 if (fh == INVALID_HANDLE_VALUE) {
234 234 PyErr_SetFromWindowsErrWithFilename(GetLastError(), path);
235 235 goto error_file;
236 236 }
237 237
238 238 list = PyList_New(0);
239 239 if (!list)
240 240 goto error_list;
241 241
242 242 do {
243 243 PyObject *item;
244 244
245 245 if (fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
246 246 if (!strcmp(fd.cFileName, ".")
247 247 || !strcmp(fd.cFileName, ".."))
248 248 continue;
249 249
250 250 if (skip && !strcmp(fd.cFileName, skip)) {
251 251 rval = PyList_New(0);
252 252 goto error;
253 253 }
254 254 }
255 255
256 256 item = make_item(&fd, wantstat);
257 257 if (!item)
258 258 goto error;
259 259
260 260 if (PyList_Append(list, item)) {
261 261 Py_XDECREF(item);
262 262 goto error;
263 263 }
264 264
265 265 Py_XDECREF(item);
266 266 } while (FindNextFileA(fh, &fd));
267 267
268 268 if (GetLastError() != ERROR_NO_MORE_FILES) {
269 269 PyErr_SetFromWindowsErrWithFilename(GetLastError(), path);
270 270 goto error;
271 271 }
272 272
273 273 rval = list;
274 274 Py_XINCREF(rval);
275 275 error:
276 276 Py_XDECREF(list);
277 277 error_list:
278 278 FindClose(fh);
279 279 error_file:
280 280 PyMem_Free(pattern);
281 281 error_nomem:
282 282 return rval;
283 283 }
284 284
285 285 #else
286 286
287 287 int entkind(struct dirent *ent)
288 288 {
289 289 #ifdef DT_REG
290 290 switch (ent->d_type) {
291 291 case DT_REG: return S_IFREG;
292 292 case DT_DIR: return S_IFDIR;
293 293 case DT_LNK: return S_IFLNK;
294 294 case DT_BLK: return S_IFBLK;
295 295 case DT_CHR: return S_IFCHR;
296 296 case DT_FIFO: return S_IFIFO;
297 297 case DT_SOCK: return S_IFSOCK;
298 298 }
299 299 #endif
300 300 return -1;
301 301 }
302 302
303 303 static PyObject *makestat(const struct stat *st)
304 304 {
305 305 PyObject *stat;
306 306
307 307 stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
308 308 if (stat)
309 309 memcpy(&((struct listdir_stat *)stat)->st, st, sizeof(*st));
310 310 return stat;
311 311 }
312 312
313 313 static PyObject *_listdir_stat(char *path, int pathlen, int keepstat,
314 314 char *skip)
315 315 {
316 316 PyObject *list, *elem, *stat = NULL, *ret = NULL;
317 317 char fullpath[PATH_MAX + 10];
318 318 int kind, err;
319 319 struct stat st;
320 320 struct dirent *ent;
321 321 DIR *dir;
322 322 #ifdef AT_SYMLINK_NOFOLLOW
323 323 int dfd = -1;
324 324 #endif
325 325
326 326 if (pathlen >= PATH_MAX) {
327 327 errno = ENAMETOOLONG;
328 328 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
329 329 goto error_value;
330 330 }
331 331 strncpy(fullpath, path, PATH_MAX);
332 332 fullpath[pathlen] = '/';
333 333
334 334 #ifdef AT_SYMLINK_NOFOLLOW
335 335 dfd = open(path, O_RDONLY);
336 336 if (dfd == -1) {
337 337 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
338 338 goto error_value;
339 339 }
340 340 dir = fdopendir(dfd);
341 341 #else
342 342 dir = opendir(path);
343 343 #endif
344 344 if (!dir) {
345 345 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
346 346 goto error_dir;
347 347 }
348 348
349 349 list = PyList_New(0);
350 350 if (!list)
351 351 goto error_list;
352 352
353 353 while ((ent = readdir(dir))) {
354 354 if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
355 355 continue;
356 356
357 357 kind = entkind(ent);
358 358 if (kind == -1 || keepstat) {
359 359 #ifdef AT_SYMLINK_NOFOLLOW
360 360 err = fstatat(dfd, ent->d_name, &st,
361 361 AT_SYMLINK_NOFOLLOW);
362 362 #else
363 363 strncpy(fullpath + pathlen + 1, ent->d_name,
364 364 PATH_MAX - pathlen);
365 365 fullpath[PATH_MAX] = '\0';
366 366 err = lstat(fullpath, &st);
367 367 #endif
368 368 if (err == -1) {
369 369 /* race with file deletion? */
370 370 if (errno == ENOENT)
371 371 continue;
372 372 strncpy(fullpath + pathlen + 1, ent->d_name,
373 373 PATH_MAX - pathlen);
374 374 fullpath[PATH_MAX] = 0;
375 375 PyErr_SetFromErrnoWithFilename(PyExc_OSError,
376 376 fullpath);
377 377 goto error;
378 378 }
379 379 kind = st.st_mode & S_IFMT;
380 380 }
381 381
382 382 /* quit early? */
383 383 if (skip && kind == S_IFDIR && !strcmp(ent->d_name, skip)) {
384 384 ret = PyList_New(0);
385 385 goto error;
386 386 }
387 387
388 388 if (keepstat) {
389 389 stat = makestat(&st);
390 390 if (!stat)
391 391 goto error;
392 392 elem = Py_BuildValue("siN", ent->d_name, kind, stat);
393 393 } else
394 394 elem = Py_BuildValue("si", ent->d_name, kind);
395 395 if (!elem)
396 396 goto error;
397 397 stat = NULL;
398 398
399 399 PyList_Append(list, elem);
400 400 Py_DECREF(elem);
401 401 }
402 402
403 403 ret = list;
404 404 Py_INCREF(ret);
405 405
406 406 error:
407 407 Py_DECREF(list);
408 408 Py_XDECREF(stat);
409 409 error_list:
410 410 closedir(dir);
411 411 /* closedir also closes its dirfd */
412 412 goto error_value;
413 413 error_dir:
414 414 #ifdef AT_SYMLINK_NOFOLLOW
415 415 close(dfd);
416 416 #endif
417 417 error_value:
418 418 return ret;
419 419 }
420 420
421 421 #ifdef __APPLE__
422 422
423 423 typedef struct {
424 424 u_int32_t length;
425 425 attrreference_t name;
426 426 fsobj_type_t obj_type;
427 427 struct timespec mtime;
428 428 #if __LITTLE_ENDIAN__
429 429 mode_t access_mask;
430 430 uint16_t padding;
431 431 #else
432 432 uint16_t padding;
433 433 mode_t access_mask;
434 434 #endif
435 435 off_t size;
436 436 } __attribute__((packed)) attrbuf_entry;
437 437
438 438 int attrkind(attrbuf_entry *entry)
439 439 {
440 440 switch (entry->obj_type) {
441 441 case VREG: return S_IFREG;
442 442 case VDIR: return S_IFDIR;
443 443 case VLNK: return S_IFLNK;
444 444 case VBLK: return S_IFBLK;
445 445 case VCHR: return S_IFCHR;
446 446 case VFIFO: return S_IFIFO;
447 447 case VSOCK: return S_IFSOCK;
448 448 }
449 449 return -1;
450 450 }
451 451
452 452 /* get these many entries at a time */
453 453 #define LISTDIR_BATCH_SIZE 50
454 454
455 455 static PyObject *_listdir_batch(char *path, int pathlen, int keepstat,
456 456 char *skip, bool *fallback)
457 457 {
458 458 PyObject *list, *elem, *stat = NULL, *ret = NULL;
459 459 int kind, err;
460 460 unsigned long index;
461 461 unsigned int count, old_state, new_state;
462 462 bool state_seen = false;
463 463 attrbuf_entry *entry;
464 464 /* from the getattrlist(2) man page: a path can be no longer than
465 465 (NAME_MAX * 3 + 1) bytes. Also, "The getattrlist() function will
466 466 silently truncate attribute data if attrBufSize is too small." So
467 467 pass in a buffer big enough for the worst case. */
468 468 char attrbuf[LISTDIR_BATCH_SIZE * (sizeof(attrbuf_entry) + NAME_MAX * 3 + 1)];
469 469 unsigned int basep_unused;
470 470
471 471 struct stat st;
472 472 int dfd = -1;
473 473
474 474 /* these must match the attrbuf_entry struct, otherwise you'll end up
475 475 with garbage */
476 476 struct attrlist requested_attr = {0};
477 477 requested_attr.bitmapcount = ATTR_BIT_MAP_COUNT;
478 478 requested_attr.commonattr = (ATTR_CMN_NAME | ATTR_CMN_OBJTYPE |
479 479 ATTR_CMN_MODTIME | ATTR_CMN_ACCESSMASK);
480 480 requested_attr.fileattr = ATTR_FILE_DATALENGTH;
481 481
482 482 *fallback = false;
483 483
484 484 if (pathlen >= PATH_MAX) {
485 485 errno = ENAMETOOLONG;
486 486 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
487 487 goto error_value;
488 488 }
489 489
490 490 dfd = open(path, O_RDONLY);
491 491 if (dfd == -1) {
492 492 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
493 493 goto error_value;
494 494 }
495 495
496 496 list = PyList_New(0);
497 497 if (!list)
498 498 goto error_dir;
499 499
500 500 do {
501 501 count = LISTDIR_BATCH_SIZE;
502 502 err = getdirentriesattr(dfd, &requested_attr, &attrbuf,
503 503 sizeof(attrbuf), &count, &basep_unused,
504 504 &new_state, 0);
505 505 if (err < 0) {
506 506 if (errno == ENOTSUP) {
507 507 /* We're on a filesystem that doesn't support
508 508 getdirentriesattr. Fall back to the
509 509 stat-based implementation. */
510 510 *fallback = true;
511 511 } else
512 512 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
513 513 goto error;
514 514 }
515 515
516 516 if (!state_seen) {
517 517 old_state = new_state;
518 518 state_seen = true;
519 519 } else if (old_state != new_state) {
520 520 /* There's an edge case with getdirentriesattr. Consider
521 521 the following initial list of files:
522 522
523 523 a
524 524 b
525 525 <--
526 526 c
527 527 d
528 528
529 529 If the iteration is paused at the arrow, and b is
530 530 deleted before it is resumed, getdirentriesattr will
531 531 not return d at all! Ordinarily we're expected to
532 532 restart the iteration from the beginning. To avoid
533 533 getting stuck in a retry loop here, fall back to
534 534 stat. */
535 535 *fallback = true;
536 536 goto error;
537 537 }
538 538
539 539 entry = (attrbuf_entry *)attrbuf;
540 540
541 541 for (index = 0; index < count; index++) {
542 542 char *filename = ((char *)&entry->name) +
543 543 entry->name.attr_dataoffset;
544 544
545 545 if (!strcmp(filename, ".") || !strcmp(filename, ".."))
546 546 continue;
547 547
548 548 kind = attrkind(entry);
549 549 if (kind == -1) {
550 550 PyErr_Format(PyExc_OSError,
551 551 "unknown object type %u for file "
552 552 "%s%s!",
553 553 entry->obj_type, path, filename);
554 554 goto error;
555 555 }
556 556
557 557 /* quit early? */
558 558 if (skip && kind == S_IFDIR && !strcmp(filename, skip)) {
559 559 ret = PyList_New(0);
560 560 goto error;
561 561 }
562 562
563 563 if (keepstat) {
564 564 /* from the getattrlist(2) man page: "Only the
565 565 permission bits ... are valid". */
566 566 st.st_mode = (entry->access_mask & ~S_IFMT) | kind;
567 567 st.st_mtime = entry->mtime.tv_sec;
568 568 st.st_size = entry->size;
569 569 stat = makestat(&st);
570 570 if (!stat)
571 571 goto error;
572 572 elem = Py_BuildValue("siN", filename, kind, stat);
573 573 } else
574 574 elem = Py_BuildValue("si", filename, kind);
575 575 if (!elem)
576 576 goto error;
577 577 stat = NULL;
578 578
579 579 PyList_Append(list, elem);
580 580 Py_DECREF(elem);
581 581
582 582 entry = (attrbuf_entry *)((char *)entry + entry->length);
583 583 }
584 584 } while (err == 0);
585 585
586 586 ret = list;
587 587 Py_INCREF(ret);
588 588
589 589 error:
590 590 Py_DECREF(list);
591 591 Py_XDECREF(stat);
592 592 error_dir:
593 593 close(dfd);
594 594 error_value:
595 595 return ret;
596 596 }
597 597
598 598 #endif /* __APPLE__ */
599 599
600 600 static PyObject *_listdir(char *path, int pathlen, int keepstat, char *skip)
601 601 {
602 602 #ifdef __APPLE__
603 603 PyObject *ret;
604 604 bool fallback = false;
605 605
606 606 ret = _listdir_batch(path, pathlen, keepstat, skip, &fallback);
607 607 if (ret != NULL || !fallback)
608 608 return ret;
609 609 #endif
610 610 return _listdir_stat(path, pathlen, keepstat, skip);
611 611 }
612 612
613 613 static PyObject *statfiles(PyObject *self, PyObject *args)
614 614 {
615 615 PyObject *names, *stats;
616 616 Py_ssize_t i, count;
617 617
618 618 if (!PyArg_ParseTuple(args, "O:statfiles", &names))
619 619 return NULL;
620 620
621 621 count = PySequence_Length(names);
622 622 if (count == -1) {
623 623 PyErr_SetString(PyExc_TypeError, "not a sequence");
624 624 return NULL;
625 625 }
626 626
627 627 stats = PyList_New(count);
628 628 if (stats == NULL)
629 629 return NULL;
630 630
631 631 for (i = 0; i < count; i++) {
632 632 PyObject *stat, *pypath;
633 633 struct stat st;
634 634 int ret, kind;
635 635 char *path;
636 636
637 637 /* With a large file count or on a slow filesystem,
638 638 don't block signals for long (issue4878). */
639 639 if ((i % 1000) == 999 && PyErr_CheckSignals() == -1)
640 640 goto bail;
641 641
642 642 pypath = PySequence_GetItem(names, i);
643 643 if (!pypath)
644 644 goto bail;
645 645 path = PyBytes_AsString(pypath);
646 646 if (path == NULL) {
647 647 Py_DECREF(pypath);
648 648 PyErr_SetString(PyExc_TypeError, "not a string");
649 649 goto bail;
650 650 }
651 651 ret = lstat(path, &st);
652 652 Py_DECREF(pypath);
653 653 kind = st.st_mode & S_IFMT;
654 654 if (ret != -1 && (kind == S_IFREG || kind == S_IFLNK)) {
655 655 stat = makestat(&st);
656 656 if (stat == NULL)
657 657 goto bail;
658 658 PyList_SET_ITEM(stats, i, stat);
659 659 } else {
660 660 Py_INCREF(Py_None);
661 661 PyList_SET_ITEM(stats, i, Py_None);
662 662 }
663 663 }
664 664
665 665 return stats;
666 666
667 667 bail:
668 668 Py_DECREF(stats);
669 669 return NULL;
670 670 }
671 671
672 672 /*
673 673 * recvfds() simply does not release GIL during blocking io operation because
674 674 * command server is known to be single-threaded.
675 675 *
676 676 * Old systems such as Solaris don't provide CMSG_LEN, msg_control, etc.
677 677 * Currently, recvfds() is not supported on these platforms.
678 678 */
679 679 #ifdef CMSG_LEN
680 680
681 681 static ssize_t recvfdstobuf(int sockfd, int **rfds, void *cbuf, size_t cbufsize)
682 682 {
683 683 char dummy[1];
684 684 struct iovec iov = {dummy, sizeof(dummy)};
685 685 struct msghdr msgh = {0};
686 686 struct cmsghdr *cmsg;
687 687
688 688 msgh.msg_iov = &iov;
689 689 msgh.msg_iovlen = 1;
690 690 msgh.msg_control = cbuf;
691 691 msgh.msg_controllen = (socklen_t)cbufsize;
692 692 if (recvmsg(sockfd, &msgh, 0) < 0)
693 693 return -1;
694 694
695 695 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg;
696 696 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
697 697 if (cmsg->cmsg_level != SOL_SOCKET ||
698 698 cmsg->cmsg_type != SCM_RIGHTS)
699 699 continue;
700 700 *rfds = (int *)CMSG_DATA(cmsg);
701 701 return (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
702 702 }
703 703
704 704 *rfds = cbuf;
705 705 return 0;
706 706 }
707 707
708 708 static PyObject *recvfds(PyObject *self, PyObject *args)
709 709 {
710 710 int sockfd;
711 711 int *rfds = NULL;
712 712 ssize_t rfdscount, i;
713 713 char cbuf[256];
714 714 PyObject *rfdslist = NULL;
715 715
716 716 if (!PyArg_ParseTuple(args, "i", &sockfd))
717 717 return NULL;
718 718
719 719 rfdscount = recvfdstobuf(sockfd, &rfds, cbuf, sizeof(cbuf));
720 720 if (rfdscount < 0)
721 721 return PyErr_SetFromErrno(PyExc_OSError);
722 722
723 723 rfdslist = PyList_New(rfdscount);
724 724 if (!rfdslist)
725 725 goto bail;
726 726 for (i = 0; i < rfdscount; i++) {
727 727 PyObject *obj = PyLong_FromLong(rfds[i]);
728 728 if (!obj)
729 729 goto bail;
730 730 PyList_SET_ITEM(rfdslist, i, obj);
731 731 }
732 732 return rfdslist;
733 733
734 734 bail:
735 735 Py_XDECREF(rfdslist);
736 736 return NULL;
737 737 }
738 738
739 739 #endif /* CMSG_LEN */
740 740
741 741 #if defined(HAVE_SETPROCTITLE)
742 742 /* setproctitle is the first choice - available in FreeBSD */
743 743 #define SETPROCNAME_USE_SETPROCTITLE
744 744 #elif (defined(__linux__) || defined(__APPLE__)) && PY_MAJOR_VERSION == 2
745 745 /* rewrite the argv buffer in place - works in Linux and OS X. Py_GetArgcArgv
746 746 * in Python 3 returns the copied wchar_t **argv, thus unsupported. */
747 747 #define SETPROCNAME_USE_ARGVREWRITE
748 748 #else
749 749 #define SETPROCNAME_USE_NONE
750 750 #endif
751 751
752 752 #ifndef SETPROCNAME_USE_NONE
753 753 static PyObject *setprocname(PyObject *self, PyObject *args)
754 754 {
755 755 const char *name = NULL;
756 756 if (!PyArg_ParseTuple(args, "s", &name))
757 757 return NULL;
758 758
759 759 #if defined(SETPROCNAME_USE_SETPROCTITLE)
760 760 setproctitle("%s", name);
761 761 #elif defined(SETPROCNAME_USE_ARGVREWRITE)
762 762 {
763 763 static char *argvstart = NULL;
764 764 static size_t argvsize = 0;
765 765 if (argvstart == NULL) {
766 766 int argc = 0, i;
767 767 char **argv = NULL;
768 768 char *argvend;
769 769 extern void Py_GetArgcArgv(int *argc, char ***argv);
770 770 Py_GetArgcArgv(&argc, &argv);
771 771
772 772 /* Check the memory we can use. Typically, argv[i] and
773 773 * argv[i + 1] are continuous. */
774 774 argvend = argvstart = argv[0];
775 775 for (i = 0; i < argc; ++i) {
776 776 if (argv[i] > argvend || argv[i] < argvstart)
777 777 break; /* not continuous */
778 778 size_t len = strlen(argv[i]);
779 779 argvend = argv[i] + len + 1 /* '\0' */;
780 780 }
781 781 if (argvend > argvstart) /* sanity check */
782 782 argvsize = argvend - argvstart;
783 783 }
784 784
785 785 if (argvstart && argvsize > 1) {
786 786 int n = snprintf(argvstart, argvsize, "%s", name);
787 787 if (n >= 0 && (size_t)n < argvsize)
788 788 memset(argvstart + n, 0, argvsize - n);
789 789 }
790 790 }
791 791 #endif
792 792
793 793 Py_RETURN_NONE;
794 794 }
795 795 #endif /* ndef SETPROCNAME_USE_NONE */
796 796
797 797 #if defined(HAVE_BSD_STATFS)
798 798 static const char *describefstype(const struct statfs *pbuf)
799 799 {
800 800 /* BSD or OSX provides a f_fstypename field */
801 801 return pbuf->f_fstypename;
802 802 }
803 803 #elif defined(HAVE_LINUX_STATFS)
804 804 static const char *describefstype(const struct statfs *pbuf)
805 805 {
806 806 /* Begin of Linux filesystems */
807 807 #ifdef ADFS_SUPER_MAGIC
808 808 if (pbuf->f_type == ADFS_SUPER_MAGIC)
809 809 return "adfs";
810 810 #endif
811 811 #ifdef AFFS_SUPER_MAGIC
812 812 if (pbuf->f_type == AFFS_SUPER_MAGIC)
813 813 return "affs";
814 814 #endif
815 815 #ifdef AUTOFS_SUPER_MAGIC
816 816 if (pbuf->f_type == AUTOFS_SUPER_MAGIC)
817 817 return "autofs";
818 818 #endif
819 819 #ifdef BDEVFS_MAGIC
820 820 if (pbuf->f_type == BDEVFS_MAGIC)
821 821 return "bdevfs";
822 822 #endif
823 823 #ifdef BEFS_SUPER_MAGIC
824 824 if (pbuf->f_type == BEFS_SUPER_MAGIC)
825 825 return "befs";
826 826 #endif
827 827 #ifdef BFS_MAGIC
828 828 if (pbuf->f_type == BFS_MAGIC)
829 829 return "bfs";
830 830 #endif
831 831 #ifdef BINFMTFS_MAGIC
832 832 if (pbuf->f_type == BINFMTFS_MAGIC)
833 833 return "binfmtfs";
834 834 #endif
835 835 #ifdef BTRFS_SUPER_MAGIC
836 836 if (pbuf->f_type == BTRFS_SUPER_MAGIC)
837 837 return "btrfs";
838 838 #endif
839 839 #ifdef CGROUP_SUPER_MAGIC
840 840 if (pbuf->f_type == CGROUP_SUPER_MAGIC)
841 841 return "cgroup";
842 842 #endif
843 843 #ifdef CIFS_MAGIC_NUMBER
844 844 if (pbuf->f_type == CIFS_MAGIC_NUMBER)
845 845 return "cifs";
846 846 #endif
847 847 #ifdef CODA_SUPER_MAGIC
848 848 if (pbuf->f_type == CODA_SUPER_MAGIC)
849 849 return "coda";
850 850 #endif
851 851 #ifdef COH_SUPER_MAGIC
852 852 if (pbuf->f_type == COH_SUPER_MAGIC)
853 853 return "coh";
854 854 #endif
855 855 #ifdef CRAMFS_MAGIC
856 856 if (pbuf->f_type == CRAMFS_MAGIC)
857 857 return "cramfs";
858 858 #endif
859 859 #ifdef DEBUGFS_MAGIC
860 860 if (pbuf->f_type == DEBUGFS_MAGIC)
861 861 return "debugfs";
862 862 #endif
863 863 #ifdef DEVFS_SUPER_MAGIC
864 864 if (pbuf->f_type == DEVFS_SUPER_MAGIC)
865 865 return "devfs";
866 866 #endif
867 867 #ifdef DEVPTS_SUPER_MAGIC
868 868 if (pbuf->f_type == DEVPTS_SUPER_MAGIC)
869 869 return "devpts";
870 870 #endif
871 871 #ifdef EFIVARFS_MAGIC
872 872 if (pbuf->f_type == EFIVARFS_MAGIC)
873 873 return "efivarfs";
874 874 #endif
875 875 #ifdef EFS_SUPER_MAGIC
876 876 if (pbuf->f_type == EFS_SUPER_MAGIC)
877 877 return "efs";
878 878 #endif
879 879 #ifdef EXT_SUPER_MAGIC
880 880 if (pbuf->f_type == EXT_SUPER_MAGIC)
881 881 return "ext";
882 882 #endif
883 883 #ifdef EXT2_OLD_SUPER_MAGIC
884 884 if (pbuf->f_type == EXT2_OLD_SUPER_MAGIC)
885 885 return "ext2";
886 886 #endif
887 887 #ifdef EXT2_SUPER_MAGIC
888 888 if (pbuf->f_type == EXT2_SUPER_MAGIC)
889 889 return "ext2";
890 890 #endif
891 891 #ifdef EXT3_SUPER_MAGIC
892 892 if (pbuf->f_type == EXT3_SUPER_MAGIC)
893 893 return "ext3";
894 894 #endif
895 895 #ifdef EXT4_SUPER_MAGIC
896 896 if (pbuf->f_type == EXT4_SUPER_MAGIC)
897 897 return "ext4";
898 898 #endif
899 899 #ifdef F2FS_SUPER_MAGIC
900 900 if (pbuf->f_type == F2FS_SUPER_MAGIC)
901 901 return "f2fs";
902 902 #endif
903 903 #ifdef FUSE_SUPER_MAGIC
904 904 if (pbuf->f_type == FUSE_SUPER_MAGIC)
905 905 return "fuse";
906 906 #endif
907 907 #ifdef FUTEXFS_SUPER_MAGIC
908 908 if (pbuf->f_type == FUTEXFS_SUPER_MAGIC)
909 909 return "futexfs";
910 910 #endif
911 911 #ifdef HFS_SUPER_MAGIC
912 912 if (pbuf->f_type == HFS_SUPER_MAGIC)
913 913 return "hfs";
914 914 #endif
915 915 #ifdef HOSTFS_SUPER_MAGIC
916 916 if (pbuf->f_type == HOSTFS_SUPER_MAGIC)
917 917 return "hostfs";
918 918 #endif
919 919 #ifdef HPFS_SUPER_MAGIC
920 920 if (pbuf->f_type == HPFS_SUPER_MAGIC)
921 921 return "hpfs";
922 922 #endif
923 923 #ifdef HUGETLBFS_MAGIC
924 924 if (pbuf->f_type == HUGETLBFS_MAGIC)
925 925 return "hugetlbfs";
926 926 #endif
927 927 #ifdef ISOFS_SUPER_MAGIC
928 928 if (pbuf->f_type == ISOFS_SUPER_MAGIC)
929 929 return "isofs";
930 930 #endif
931 931 #ifdef JFFS2_SUPER_MAGIC
932 932 if (pbuf->f_type == JFFS2_SUPER_MAGIC)
933 933 return "jffs2";
934 934 #endif
935 935 #ifdef JFS_SUPER_MAGIC
936 936 if (pbuf->f_type == JFS_SUPER_MAGIC)
937 937 return "jfs";
938 938 #endif
939 939 #ifdef MINIX_SUPER_MAGIC
940 940 if (pbuf->f_type == MINIX_SUPER_MAGIC)
941 941 return "minix";
942 942 #endif
943 943 #ifdef MINIX2_SUPER_MAGIC
944 944 if (pbuf->f_type == MINIX2_SUPER_MAGIC)
945 945 return "minix2";
946 946 #endif
947 947 #ifdef MINIX3_SUPER_MAGIC
948 948 if (pbuf->f_type == MINIX3_SUPER_MAGIC)
949 949 return "minix3";
950 950 #endif
951 951 #ifdef MQUEUE_MAGIC
952 952 if (pbuf->f_type == MQUEUE_MAGIC)
953 953 return "mqueue";
954 954 #endif
955 955 #ifdef MSDOS_SUPER_MAGIC
956 956 if (pbuf->f_type == MSDOS_SUPER_MAGIC)
957 957 return "msdos";
958 958 #endif
959 959 #ifdef NCP_SUPER_MAGIC
960 960 if (pbuf->f_type == NCP_SUPER_MAGIC)
961 961 return "ncp";
962 962 #endif
963 963 #ifdef NFS_SUPER_MAGIC
964 964 if (pbuf->f_type == NFS_SUPER_MAGIC)
965 965 return "nfs";
966 966 #endif
967 967 #ifdef NILFS_SUPER_MAGIC
968 968 if (pbuf->f_type == NILFS_SUPER_MAGIC)
969 969 return "nilfs";
970 970 #endif
971 971 #ifdef NTFS_SB_MAGIC
972 972 if (pbuf->f_type == NTFS_SB_MAGIC)
973 973 return "ntfs-sb";
974 974 #endif
975 975 #ifdef OCFS2_SUPER_MAGIC
976 976 if (pbuf->f_type == OCFS2_SUPER_MAGIC)
977 977 return "ocfs2";
978 978 #endif
979 979 #ifdef OPENPROM_SUPER_MAGIC
980 980 if (pbuf->f_type == OPENPROM_SUPER_MAGIC)
981 981 return "openprom";
982 982 #endif
983 983 #ifdef OVERLAYFS_SUPER_MAGIC
984 984 if (pbuf->f_type == OVERLAYFS_SUPER_MAGIC)
985 985 return "overlay";
986 986 #endif
987 987 #ifdef PIPEFS_MAGIC
988 988 if (pbuf->f_type == PIPEFS_MAGIC)
989 989 return "pipefs";
990 990 #endif
991 991 #ifdef PROC_SUPER_MAGIC
992 992 if (pbuf->f_type == PROC_SUPER_MAGIC)
993 993 return "proc";
994 994 #endif
995 995 #ifdef PSTOREFS_MAGIC
996 996 if (pbuf->f_type == PSTOREFS_MAGIC)
997 997 return "pstorefs";
998 998 #endif
999 999 #ifdef QNX4_SUPER_MAGIC
1000 1000 if (pbuf->f_type == QNX4_SUPER_MAGIC)
1001 1001 return "qnx4";
1002 1002 #endif
1003 1003 #ifdef QNX6_SUPER_MAGIC
1004 1004 if (pbuf->f_type == QNX6_SUPER_MAGIC)
1005 1005 return "qnx6";
1006 1006 #endif
1007 1007 #ifdef RAMFS_MAGIC
1008 1008 if (pbuf->f_type == RAMFS_MAGIC)
1009 1009 return "ramfs";
1010 1010 #endif
1011 1011 #ifdef REISERFS_SUPER_MAGIC
1012 1012 if (pbuf->f_type == REISERFS_SUPER_MAGIC)
1013 1013 return "reiserfs";
1014 1014 #endif
1015 1015 #ifdef ROMFS_MAGIC
1016 1016 if (pbuf->f_type == ROMFS_MAGIC)
1017 1017 return "romfs";
1018 1018 #endif
1019 1019 #ifdef SECURITYFS_MAGIC
1020 1020 if (pbuf->f_type == SECURITYFS_MAGIC)
1021 1021 return "securityfs";
1022 1022 #endif
1023 1023 #ifdef SELINUX_MAGIC
1024 1024 if (pbuf->f_type == SELINUX_MAGIC)
1025 1025 return "selinux";
1026 1026 #endif
1027 1027 #ifdef SMACK_MAGIC
1028 1028 if (pbuf->f_type == SMACK_MAGIC)
1029 1029 return "smack";
1030 1030 #endif
1031 1031 #ifdef SMB_SUPER_MAGIC
1032 1032 if (pbuf->f_type == SMB_SUPER_MAGIC)
1033 1033 return "smb";
1034 1034 #endif
1035 1035 #ifdef SOCKFS_MAGIC
1036 1036 if (pbuf->f_type == SOCKFS_MAGIC)
1037 1037 return "sockfs";
1038 1038 #endif
1039 1039 #ifdef SQUASHFS_MAGIC
1040 1040 if (pbuf->f_type == SQUASHFS_MAGIC)
1041 1041 return "squashfs";
1042 1042 #endif
1043 1043 #ifdef SYSFS_MAGIC
1044 1044 if (pbuf->f_type == SYSFS_MAGIC)
1045 1045 return "sysfs";
1046 1046 #endif
1047 1047 #ifdef SYSV2_SUPER_MAGIC
1048 1048 if (pbuf->f_type == SYSV2_SUPER_MAGIC)
1049 1049 return "sysv2";
1050 1050 #endif
1051 1051 #ifdef SYSV4_SUPER_MAGIC
1052 1052 if (pbuf->f_type == SYSV4_SUPER_MAGIC)
1053 1053 return "sysv4";
1054 1054 #endif
1055 1055 #ifdef TMPFS_MAGIC
1056 1056 if (pbuf->f_type == TMPFS_MAGIC)
1057 1057 return "tmpfs";
1058 1058 #endif
1059 1059 #ifdef UDF_SUPER_MAGIC
1060 1060 if (pbuf->f_type == UDF_SUPER_MAGIC)
1061 1061 return "udf";
1062 1062 #endif
1063 1063 #ifdef UFS_MAGIC
1064 1064 if (pbuf->f_type == UFS_MAGIC)
1065 1065 return "ufs";
1066 1066 #endif
1067 1067 #ifdef USBDEVICE_SUPER_MAGIC
1068 1068 if (pbuf->f_type == USBDEVICE_SUPER_MAGIC)
1069 1069 return "usbdevice";
1070 1070 #endif
1071 1071 #ifdef V9FS_MAGIC
1072 1072 if (pbuf->f_type == V9FS_MAGIC)
1073 1073 return "v9fs";
1074 1074 #endif
1075 1075 #ifdef VXFS_SUPER_MAGIC
1076 1076 if (pbuf->f_type == VXFS_SUPER_MAGIC)
1077 1077 return "vxfs";
1078 1078 #endif
1079 1079 #ifdef XENFS_SUPER_MAGIC
1080 1080 if (pbuf->f_type == XENFS_SUPER_MAGIC)
1081 1081 return "xenfs";
1082 1082 #endif
1083 1083 #ifdef XENIX_SUPER_MAGIC
1084 1084 if (pbuf->f_type == XENIX_SUPER_MAGIC)
1085 1085 return "xenix";
1086 1086 #endif
1087 1087 #ifdef XFS_SUPER_MAGIC
1088 1088 if (pbuf->f_type == XFS_SUPER_MAGIC)
1089 1089 return "xfs";
1090 1090 #endif
1091 1091 /* End of Linux filesystems */
1092 1092 return NULL;
1093 1093 }
1094 1094 #endif /* def HAVE_LINUX_STATFS */
1095 1095
1096 1096 #if defined(HAVE_BSD_STATFS) || defined(HAVE_LINUX_STATFS)
1097 1097 /* given a directory path, return filesystem type name (best-effort) */
1098 1098 static PyObject *getfstype(PyObject *self, PyObject *args)
1099 1099 {
1100 1100 const char *path = NULL;
1101 1101 struct statfs buf;
1102 1102 int r;
1103 1103 if (!PyArg_ParseTuple(args, "s", &path))
1104 1104 return NULL;
1105 1105
1106 1106 memset(&buf, 0, sizeof(buf));
1107 1107 r = statfs(path, &buf);
1108 1108 if (r != 0)
1109 Py_RETURN_NONE;
1109 return PyErr_SetFromErrno(PyExc_OSError);
1110 1110 return Py_BuildValue("s", describefstype(&buf));
1111 1111 }
1112 1112 #endif /* defined(HAVE_LINUX_STATFS) || defined(HAVE_BSD_STATFS) */
1113 1113
1114 1114 #endif /* ndef _WIN32 */
1115 1115
1116 1116 static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
1117 1117 {
1118 1118 PyObject *statobj = NULL; /* initialize - optional arg */
1119 1119 PyObject *skipobj = NULL; /* initialize - optional arg */
1120 1120 char *path, *skip = NULL;
1121 1121 int wantstat, plen;
1122 1122
1123 1123 static char *kwlist[] = {"path", "stat", "skip", NULL};
1124 1124
1125 1125 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|OO:listdir",
1126 1126 kwlist, &path, &plen, &statobj, &skipobj))
1127 1127 return NULL;
1128 1128
1129 1129 wantstat = statobj && PyObject_IsTrue(statobj);
1130 1130
1131 1131 if (skipobj && skipobj != Py_None) {
1132 1132 skip = PyBytes_AsString(skipobj);
1133 1133 if (!skip)
1134 1134 return NULL;
1135 1135 }
1136 1136
1137 1137 return _listdir(path, plen, wantstat, skip);
1138 1138 }
1139 1139
1140 1140 #ifdef _WIN32
1141 1141 static PyObject *posixfile(PyObject *self, PyObject *args, PyObject *kwds)
1142 1142 {
1143 1143 static char *kwlist[] = {"name", "mode", "buffering", NULL};
1144 1144 PyObject *file_obj = NULL;
1145 1145 char *name = NULL;
1146 1146 char *mode = "rb";
1147 1147 DWORD access = 0;
1148 1148 DWORD creation;
1149 1149 HANDLE handle;
1150 1150 int fd, flags = 0;
1151 1151 int bufsize = -1;
1152 1152 char m0, m1, m2;
1153 1153 char fpmode[4];
1154 1154 int fppos = 0;
1155 1155 int plus;
1156 1156 FILE *fp;
1157 1157
1158 1158 if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|si:posixfile", kwlist,
1159 1159 Py_FileSystemDefaultEncoding,
1160 1160 &name, &mode, &bufsize))
1161 1161 return NULL;
1162 1162
1163 1163 m0 = mode[0];
1164 1164 m1 = m0 ? mode[1] : '\0';
1165 1165 m2 = m1 ? mode[2] : '\0';
1166 1166 plus = m1 == '+' || m2 == '+';
1167 1167
1168 1168 fpmode[fppos++] = m0;
1169 1169 if (m1 == 'b' || m2 == 'b') {
1170 1170 flags = _O_BINARY;
1171 1171 fpmode[fppos++] = 'b';
1172 1172 }
1173 1173 else
1174 1174 flags = _O_TEXT;
1175 1175 if (m0 == 'r' && !plus) {
1176 1176 flags |= _O_RDONLY;
1177 1177 access = GENERIC_READ;
1178 1178 } else {
1179 1179 /*
1180 1180 work around http://support.microsoft.com/kb/899149 and
1181 1181 set _O_RDWR for 'w' and 'a', even if mode has no '+'
1182 1182 */
1183 1183 flags |= _O_RDWR;
1184 1184 access = GENERIC_READ | GENERIC_WRITE;
1185 1185 fpmode[fppos++] = '+';
1186 1186 }
1187 1187 fpmode[fppos++] = '\0';
1188 1188
1189 1189 switch (m0) {
1190 1190 case 'r':
1191 1191 creation = OPEN_EXISTING;
1192 1192 break;
1193 1193 case 'w':
1194 1194 creation = CREATE_ALWAYS;
1195 1195 break;
1196 1196 case 'a':
1197 1197 creation = OPEN_ALWAYS;
1198 1198 flags |= _O_APPEND;
1199 1199 break;
1200 1200 default:
1201 1201 PyErr_Format(PyExc_ValueError,
1202 1202 "mode string must begin with one of 'r', 'w', "
1203 1203 "or 'a', not '%c'", m0);
1204 1204 goto bail;
1205 1205 }
1206 1206
1207 1207 handle = CreateFile(name, access,
1208 1208 FILE_SHARE_READ | FILE_SHARE_WRITE |
1209 1209 FILE_SHARE_DELETE,
1210 1210 NULL,
1211 1211 creation,
1212 1212 FILE_ATTRIBUTE_NORMAL,
1213 1213 0);
1214 1214
1215 1215 if (handle == INVALID_HANDLE_VALUE) {
1216 1216 PyErr_SetFromWindowsErrWithFilename(GetLastError(), name);
1217 1217 goto bail;
1218 1218 }
1219 1219
1220 1220 fd = _open_osfhandle((intptr_t)handle, flags);
1221 1221
1222 1222 if (fd == -1) {
1223 1223 CloseHandle(handle);
1224 1224 PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
1225 1225 goto bail;
1226 1226 }
1227 1227 #ifndef IS_PY3K
1228 1228 fp = _fdopen(fd, fpmode);
1229 1229 if (fp == NULL) {
1230 1230 _close(fd);
1231 1231 PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
1232 1232 goto bail;
1233 1233 }
1234 1234
1235 1235 file_obj = PyFile_FromFile(fp, name, mode, fclose);
1236 1236 if (file_obj == NULL) {
1237 1237 fclose(fp);
1238 1238 goto bail;
1239 1239 }
1240 1240
1241 1241 PyFile_SetBufSize(file_obj, bufsize);
1242 1242 #else
1243 1243 file_obj = PyFile_FromFd(fd, name, mode, bufsize, NULL, NULL, NULL, 1);
1244 1244 if (file_obj == NULL)
1245 1245 goto bail;
1246 1246 #endif
1247 1247 bail:
1248 1248 PyMem_Free(name);
1249 1249 return file_obj;
1250 1250 }
1251 1251 #endif
1252 1252
1253 1253 #ifdef __APPLE__
1254 1254 #include <ApplicationServices/ApplicationServices.h>
1255 1255
1256 1256 static PyObject *isgui(PyObject *self)
1257 1257 {
1258 1258 CFDictionaryRef dict = CGSessionCopyCurrentDictionary();
1259 1259
1260 1260 if (dict != NULL) {
1261 1261 CFRelease(dict);
1262 1262 Py_RETURN_TRUE;
1263 1263 } else {
1264 1264 Py_RETURN_FALSE;
1265 1265 }
1266 1266 }
1267 1267 #endif
1268 1268
1269 1269 static char osutil_doc[] = "Native operating system services.";
1270 1270
1271 1271 static PyMethodDef methods[] = {
1272 1272 {"listdir", (PyCFunction)listdir, METH_VARARGS | METH_KEYWORDS,
1273 1273 "list a directory\n"},
1274 1274 #ifdef _WIN32
1275 1275 {"posixfile", (PyCFunction)posixfile, METH_VARARGS | METH_KEYWORDS,
1276 1276 "Open a file with POSIX-like semantics.\n"
1277 1277 "On error, this function may raise either a WindowsError or an IOError."},
1278 1278 #else
1279 1279 {"statfiles", (PyCFunction)statfiles, METH_VARARGS | METH_KEYWORDS,
1280 1280 "stat a series of files or symlinks\n"
1281 1281 "Returns None for non-existent entries and entries of other types.\n"},
1282 1282 #ifdef CMSG_LEN
1283 1283 {"recvfds", (PyCFunction)recvfds, METH_VARARGS,
1284 1284 "receive list of file descriptors via socket\n"},
1285 1285 #endif
1286 1286 #ifndef SETPROCNAME_USE_NONE
1287 1287 {"setprocname", (PyCFunction)setprocname, METH_VARARGS,
1288 1288 "set process title (best-effort)\n"},
1289 1289 #endif
1290 1290 #if defined(HAVE_BSD_STATFS) || defined(HAVE_LINUX_STATFS)
1291 1291 {"getfstype", (PyCFunction)getfstype, METH_VARARGS,
1292 1292 "get filesystem type (best-effort)\n"},
1293 1293 #endif
1294 1294 #endif /* ndef _WIN32 */
1295 1295 #ifdef __APPLE__
1296 1296 {
1297 1297 "isgui", (PyCFunction)isgui, METH_NOARGS,
1298 1298 "Is a CoreGraphics session available?"
1299 1299 },
1300 1300 #endif
1301 1301 {NULL, NULL}
1302 1302 };
1303 1303
1304 1304 #ifdef IS_PY3K
1305 1305 static struct PyModuleDef osutil_module = {
1306 1306 PyModuleDef_HEAD_INIT,
1307 1307 "osutil",
1308 1308 osutil_doc,
1309 1309 -1,
1310 1310 methods
1311 1311 };
1312 1312
1313 1313 PyMODINIT_FUNC PyInit_osutil(void)
1314 1314 {
1315 1315 if (PyType_Ready(&listdir_stat_type) < 0)
1316 1316 return NULL;
1317 1317
1318 1318 return PyModule_Create(&osutil_module);
1319 1319 }
1320 1320 #else
1321 1321 PyMODINIT_FUNC initosutil(void)
1322 1322 {
1323 1323 if (PyType_Ready(&listdir_stat_type) == -1)
1324 1324 return;
1325 1325
1326 1326 Py_InitModule3("osutil", methods, osutil_doc);
1327 1327 }
1328 1328 #endif
@@ -1,3630 +1,3633
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import codecs
21 21 import collections
22 22 import datetime
23 23 import errno
24 24 import gc
25 25 import hashlib
26 26 import imp
27 27 import os
28 28 import platform as pyplatform
29 29 import re as remod
30 30 import shutil
31 31 import signal
32 32 import socket
33 33 import stat
34 34 import string
35 35 import subprocess
36 36 import sys
37 37 import tempfile
38 38 import textwrap
39 39 import time
40 40 import traceback
41 41 import zlib
42 42
43 43 from . import (
44 44 encoding,
45 45 error,
46 46 i18n,
47 47 osutil,
48 48 parsers,
49 49 pycompat,
50 50 )
51 51
52 52 empty = pycompat.empty
53 53 httplib = pycompat.httplib
54 54 httpserver = pycompat.httpserver
55 55 pickle = pycompat.pickle
56 56 queue = pycompat.queue
57 57 socketserver = pycompat.socketserver
58 58 stderr = pycompat.stderr
59 59 stdin = pycompat.stdin
60 60 stdout = pycompat.stdout
61 61 stringio = pycompat.stringio
62 62 urlerr = pycompat.urlerr
63 63 urlreq = pycompat.urlreq
64 64 xmlrpclib = pycompat.xmlrpclib
65 65
66 66 def isatty(fp):
67 67 try:
68 68 return fp.isatty()
69 69 except AttributeError:
70 70 return False
71 71
72 72 # glibc determines buffering on first write to stdout - if we replace a TTY
73 73 # destined stdout with a pipe destined stdout (e.g. pager), we want line
74 74 # buffering
75 75 if isatty(stdout):
76 76 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
77 77
78 78 if pycompat.osname == 'nt':
79 79 from . import windows as platform
80 80 stdout = platform.winstdout(stdout)
81 81 else:
82 82 from . import posix as platform
83 83
84 84 _ = i18n._
85 85
86 86 bindunixsocket = platform.bindunixsocket
87 87 cachestat = platform.cachestat
88 88 checkexec = platform.checkexec
89 89 checklink = platform.checklink
90 90 copymode = platform.copymode
91 91 executablepath = platform.executablepath
92 92 expandglobs = platform.expandglobs
93 93 explainexit = platform.explainexit
94 94 findexe = platform.findexe
95 95 gethgcmd = platform.gethgcmd
96 96 getuser = platform.getuser
97 97 getpid = os.getpid
98 98 groupmembers = platform.groupmembers
99 99 groupname = platform.groupname
100 100 hidewindow = platform.hidewindow
101 101 isexec = platform.isexec
102 102 isowner = platform.isowner
103 103 localpath = platform.localpath
104 104 lookupreg = platform.lookupreg
105 105 makedir = platform.makedir
106 106 nlinks = platform.nlinks
107 107 normpath = platform.normpath
108 108 normcase = platform.normcase
109 109 normcasespec = platform.normcasespec
110 110 normcasefallback = platform.normcasefallback
111 111 openhardlinks = platform.openhardlinks
112 112 oslink = platform.oslink
113 113 parsepatchoutput = platform.parsepatchoutput
114 114 pconvert = platform.pconvert
115 115 poll = platform.poll
116 116 popen = platform.popen
117 117 posixfile = platform.posixfile
118 118 quotecommand = platform.quotecommand
119 119 readpipe = platform.readpipe
120 120 rename = platform.rename
121 121 removedirs = platform.removedirs
122 122 samedevice = platform.samedevice
123 123 samefile = platform.samefile
124 124 samestat = platform.samestat
125 125 setbinary = platform.setbinary
126 126 setflags = platform.setflags
127 127 setsignalhandler = platform.setsignalhandler
128 128 shellquote = platform.shellquote
129 129 spawndetached = platform.spawndetached
130 130 split = platform.split
131 131 sshargs = platform.sshargs
132 132 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
133 133 statisexec = platform.statisexec
134 134 statislink = platform.statislink
135 135 testpid = platform.testpid
136 136 umask = platform.umask
137 137 unlink = platform.unlink
138 138 username = platform.username
139 139
140 140 # Python compatibility
141 141
142 142 _notset = object()
143 143
144 144 # disable Python's problematic floating point timestamps (issue4836)
145 145 # (Python hypocritically says you shouldn't change this behavior in
146 146 # libraries, and sure enough Mercurial is not a library.)
147 147 os.stat_float_times(False)
148 148
149 149 def safehasattr(thing, attr):
150 150 return getattr(thing, attr, _notset) is not _notset
151 151
152 152 def bitsfrom(container):
153 153 bits = 0
154 154 for bit in container:
155 155 bits |= bit
156 156 return bits
157 157
158 158 DIGESTS = {
159 159 'md5': hashlib.md5,
160 160 'sha1': hashlib.sha1,
161 161 'sha512': hashlib.sha512,
162 162 }
163 163 # List of digest types from strongest to weakest
164 164 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
165 165
166 166 for k in DIGESTS_BY_STRENGTH:
167 167 assert k in DIGESTS
168 168
169 169 class digester(object):
170 170 """helper to compute digests.
171 171
172 172 This helper can be used to compute one or more digests given their name.
173 173
174 174 >>> d = digester(['md5', 'sha1'])
175 175 >>> d.update('foo')
176 176 >>> [k for k in sorted(d)]
177 177 ['md5', 'sha1']
178 178 >>> d['md5']
179 179 'acbd18db4cc2f85cedef654fccc4a4d8'
180 180 >>> d['sha1']
181 181 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
182 182 >>> digester.preferred(['md5', 'sha1'])
183 183 'sha1'
184 184 """
185 185
186 186 def __init__(self, digests, s=''):
187 187 self._hashes = {}
188 188 for k in digests:
189 189 if k not in DIGESTS:
190 190 raise Abort(_('unknown digest type: %s') % k)
191 191 self._hashes[k] = DIGESTS[k]()
192 192 if s:
193 193 self.update(s)
194 194
195 195 def update(self, data):
196 196 for h in self._hashes.values():
197 197 h.update(data)
198 198
199 199 def __getitem__(self, key):
200 200 if key not in DIGESTS:
201 201 raise Abort(_('unknown digest type: %s') % k)
202 202 return self._hashes[key].hexdigest()
203 203
204 204 def __iter__(self):
205 205 return iter(self._hashes)
206 206
207 207 @staticmethod
208 208 def preferred(supported):
209 209 """returns the strongest digest type in both supported and DIGESTS."""
210 210
211 211 for k in DIGESTS_BY_STRENGTH:
212 212 if k in supported:
213 213 return k
214 214 return None
215 215
216 216 class digestchecker(object):
217 217 """file handle wrapper that additionally checks content against a given
218 218 size and digests.
219 219
220 220 d = digestchecker(fh, size, {'md5': '...'})
221 221
222 222 When multiple digests are given, all of them are validated.
223 223 """
224 224
225 225 def __init__(self, fh, size, digests):
226 226 self._fh = fh
227 227 self._size = size
228 228 self._got = 0
229 229 self._digests = dict(digests)
230 230 self._digester = digester(self._digests.keys())
231 231
232 232 def read(self, length=-1):
233 233 content = self._fh.read(length)
234 234 self._digester.update(content)
235 235 self._got += len(content)
236 236 return content
237 237
238 238 def validate(self):
239 239 if self._size != self._got:
240 240 raise Abort(_('size mismatch: expected %d, got %d') %
241 241 (self._size, self._got))
242 242 for k, v in self._digests.items():
243 243 if v != self._digester[k]:
244 244 # i18n: first parameter is a digest name
245 245 raise Abort(_('%s mismatch: expected %s, got %s') %
246 246 (k, v, self._digester[k]))
247 247
248 248 try:
249 249 buffer = buffer
250 250 except NameError:
251 251 if not pycompat.ispy3:
252 252 def buffer(sliceable, offset=0, length=None):
253 253 if length is not None:
254 254 return sliceable[offset:offset + length]
255 255 return sliceable[offset:]
256 256 else:
257 257 def buffer(sliceable, offset=0, length=None):
258 258 if length is not None:
259 259 return memoryview(sliceable)[offset:offset + length]
260 260 return memoryview(sliceable)[offset:]
261 261
262 262 closefds = pycompat.osname == 'posix'
263 263
264 264 _chunksize = 4096
265 265
266 266 class bufferedinputpipe(object):
267 267 """a manually buffered input pipe
268 268
269 269 Python will not let us use buffered IO and lazy reading with 'polling' at
270 270 the same time. We cannot probe the buffer state and select will not detect
271 271 that data are ready to read if they are already buffered.
272 272
273 273 This class let us work around that by implementing its own buffering
274 274 (allowing efficient readline) while offering a way to know if the buffer is
275 275 empty from the output (allowing collaboration of the buffer with polling).
276 276
277 277 This class lives in the 'util' module because it makes use of the 'os'
278 278 module from the python stdlib.
279 279 """
280 280
281 281 def __init__(self, input):
282 282 self._input = input
283 283 self._buffer = []
284 284 self._eof = False
285 285 self._lenbuf = 0
286 286
287 287 @property
288 288 def hasbuffer(self):
289 289 """True is any data is currently buffered
290 290
291 291 This will be used externally a pre-step for polling IO. If there is
292 292 already data then no polling should be set in place."""
293 293 return bool(self._buffer)
294 294
295 295 @property
296 296 def closed(self):
297 297 return self._input.closed
298 298
299 299 def fileno(self):
300 300 return self._input.fileno()
301 301
302 302 def close(self):
303 303 return self._input.close()
304 304
305 305 def read(self, size):
306 306 while (not self._eof) and (self._lenbuf < size):
307 307 self._fillbuffer()
308 308 return self._frombuffer(size)
309 309
310 310 def readline(self, *args, **kwargs):
311 311 if 1 < len(self._buffer):
312 312 # this should not happen because both read and readline end with a
313 313 # _frombuffer call that collapse it.
314 314 self._buffer = [''.join(self._buffer)]
315 315 self._lenbuf = len(self._buffer[0])
316 316 lfi = -1
317 317 if self._buffer:
318 318 lfi = self._buffer[-1].find('\n')
319 319 while (not self._eof) and lfi < 0:
320 320 self._fillbuffer()
321 321 if self._buffer:
322 322 lfi = self._buffer[-1].find('\n')
323 323 size = lfi + 1
324 324 if lfi < 0: # end of file
325 325 size = self._lenbuf
326 326 elif 1 < len(self._buffer):
327 327 # we need to take previous chunks into account
328 328 size += self._lenbuf - len(self._buffer[-1])
329 329 return self._frombuffer(size)
330 330
331 331 def _frombuffer(self, size):
332 332 """return at most 'size' data from the buffer
333 333
334 334 The data are removed from the buffer."""
335 335 if size == 0 or not self._buffer:
336 336 return ''
337 337 buf = self._buffer[0]
338 338 if 1 < len(self._buffer):
339 339 buf = ''.join(self._buffer)
340 340
341 341 data = buf[:size]
342 342 buf = buf[len(data):]
343 343 if buf:
344 344 self._buffer = [buf]
345 345 self._lenbuf = len(buf)
346 346 else:
347 347 self._buffer = []
348 348 self._lenbuf = 0
349 349 return data
350 350
351 351 def _fillbuffer(self):
352 352 """read data to the buffer"""
353 353 data = os.read(self._input.fileno(), _chunksize)
354 354 if not data:
355 355 self._eof = True
356 356 else:
357 357 self._lenbuf += len(data)
358 358 self._buffer.append(data)
359 359
360 360 def popen2(cmd, env=None, newlines=False):
361 361 # Setting bufsize to -1 lets the system decide the buffer size.
362 362 # The default for bufsize is 0, meaning unbuffered. This leads to
363 363 # poor performance on Mac OS X: http://bugs.python.org/issue4194
364 364 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
365 365 close_fds=closefds,
366 366 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
367 367 universal_newlines=newlines,
368 368 env=env)
369 369 return p.stdin, p.stdout
370 370
371 371 def popen3(cmd, env=None, newlines=False):
372 372 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
373 373 return stdin, stdout, stderr
374 374
375 375 def popen4(cmd, env=None, newlines=False, bufsize=-1):
376 376 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
377 377 close_fds=closefds,
378 378 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
379 379 stderr=subprocess.PIPE,
380 380 universal_newlines=newlines,
381 381 env=env)
382 382 return p.stdin, p.stdout, p.stderr, p
383 383
384 384 def version():
385 385 """Return version information if available."""
386 386 try:
387 387 from . import __version__
388 388 return __version__.version
389 389 except ImportError:
390 390 return 'unknown'
391 391
392 392 def versiontuple(v=None, n=4):
393 393 """Parses a Mercurial version string into an N-tuple.
394 394
395 395 The version string to be parsed is specified with the ``v`` argument.
396 396 If it isn't defined, the current Mercurial version string will be parsed.
397 397
398 398 ``n`` can be 2, 3, or 4. Here is how some version strings map to
399 399 returned values:
400 400
401 401 >>> v = '3.6.1+190-df9b73d2d444'
402 402 >>> versiontuple(v, 2)
403 403 (3, 6)
404 404 >>> versiontuple(v, 3)
405 405 (3, 6, 1)
406 406 >>> versiontuple(v, 4)
407 407 (3, 6, 1, '190-df9b73d2d444')
408 408
409 409 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
410 410 (3, 6, 1, '190-df9b73d2d444+20151118')
411 411
412 412 >>> v = '3.6'
413 413 >>> versiontuple(v, 2)
414 414 (3, 6)
415 415 >>> versiontuple(v, 3)
416 416 (3, 6, None)
417 417 >>> versiontuple(v, 4)
418 418 (3, 6, None, None)
419 419
420 420 >>> v = '3.9-rc'
421 421 >>> versiontuple(v, 2)
422 422 (3, 9)
423 423 >>> versiontuple(v, 3)
424 424 (3, 9, None)
425 425 >>> versiontuple(v, 4)
426 426 (3, 9, None, 'rc')
427 427
428 428 >>> v = '3.9-rc+2-02a8fea4289b'
429 429 >>> versiontuple(v, 2)
430 430 (3, 9)
431 431 >>> versiontuple(v, 3)
432 432 (3, 9, None)
433 433 >>> versiontuple(v, 4)
434 434 (3, 9, None, 'rc+2-02a8fea4289b')
435 435 """
436 436 if not v:
437 437 v = version()
438 438 parts = remod.split('[\+-]', v, 1)
439 439 if len(parts) == 1:
440 440 vparts, extra = parts[0], None
441 441 else:
442 442 vparts, extra = parts
443 443
444 444 vints = []
445 445 for i in vparts.split('.'):
446 446 try:
447 447 vints.append(int(i))
448 448 except ValueError:
449 449 break
450 450 # (3, 6) -> (3, 6, None)
451 451 while len(vints) < 3:
452 452 vints.append(None)
453 453
454 454 if n == 2:
455 455 return (vints[0], vints[1])
456 456 if n == 3:
457 457 return (vints[0], vints[1], vints[2])
458 458 if n == 4:
459 459 return (vints[0], vints[1], vints[2], extra)
460 460
461 461 # used by parsedate
462 462 defaultdateformats = (
463 463 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
464 464 '%Y-%m-%dT%H:%M', # without seconds
465 465 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
466 466 '%Y-%m-%dT%H%M', # without seconds
467 467 '%Y-%m-%d %H:%M:%S', # our common legal variant
468 468 '%Y-%m-%d %H:%M', # without seconds
469 469 '%Y-%m-%d %H%M%S', # without :
470 470 '%Y-%m-%d %H%M', # without seconds
471 471 '%Y-%m-%d %I:%M:%S%p',
472 472 '%Y-%m-%d %H:%M',
473 473 '%Y-%m-%d %I:%M%p',
474 474 '%Y-%m-%d',
475 475 '%m-%d',
476 476 '%m/%d',
477 477 '%m/%d/%y',
478 478 '%m/%d/%Y',
479 479 '%a %b %d %H:%M:%S %Y',
480 480 '%a %b %d %I:%M:%S%p %Y',
481 481 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
482 482 '%b %d %H:%M:%S %Y',
483 483 '%b %d %I:%M:%S%p %Y',
484 484 '%b %d %H:%M:%S',
485 485 '%b %d %I:%M:%S%p',
486 486 '%b %d %H:%M',
487 487 '%b %d %I:%M%p',
488 488 '%b %d %Y',
489 489 '%b %d',
490 490 '%H:%M:%S',
491 491 '%I:%M:%S%p',
492 492 '%H:%M',
493 493 '%I:%M%p',
494 494 )
495 495
496 496 extendeddateformats = defaultdateformats + (
497 497 "%Y",
498 498 "%Y-%m",
499 499 "%b",
500 500 "%b %Y",
501 501 )
502 502
503 503 def cachefunc(func):
504 504 '''cache the result of function calls'''
505 505 # XXX doesn't handle keywords args
506 506 if func.__code__.co_argcount == 0:
507 507 cache = []
508 508 def f():
509 509 if len(cache) == 0:
510 510 cache.append(func())
511 511 return cache[0]
512 512 return f
513 513 cache = {}
514 514 if func.__code__.co_argcount == 1:
515 515 # we gain a small amount of time because
516 516 # we don't need to pack/unpack the list
517 517 def f(arg):
518 518 if arg not in cache:
519 519 cache[arg] = func(arg)
520 520 return cache[arg]
521 521 else:
522 522 def f(*args):
523 523 if args not in cache:
524 524 cache[args] = func(*args)
525 525 return cache[args]
526 526
527 527 return f
528 528
529 529 class sortdict(dict):
530 530 '''a simple sorted dictionary'''
531 531 def __init__(self, data=None):
532 532 self._list = []
533 533 if data:
534 534 self.update(data)
535 535 def copy(self):
536 536 return sortdict(self)
537 537 def __setitem__(self, key, val):
538 538 if key in self:
539 539 self._list.remove(key)
540 540 self._list.append(key)
541 541 dict.__setitem__(self, key, val)
542 542 def __iter__(self):
543 543 return self._list.__iter__()
544 544 def update(self, src):
545 545 if isinstance(src, dict):
546 546 src = src.iteritems()
547 547 for k, v in src:
548 548 self[k] = v
549 549 def clear(self):
550 550 dict.clear(self)
551 551 self._list = []
552 552 def items(self):
553 553 return [(k, self[k]) for k in self._list]
554 554 def __delitem__(self, key):
555 555 dict.__delitem__(self, key)
556 556 self._list.remove(key)
557 557 def pop(self, key, *args, **kwargs):
558 558 dict.pop(self, key, *args, **kwargs)
559 559 try:
560 560 self._list.remove(key)
561 561 except ValueError:
562 562 pass
563 563 def keys(self):
564 564 return self._list[:]
565 565 def iterkeys(self):
566 566 return self._list.__iter__()
567 567 def iteritems(self):
568 568 for k in self._list:
569 569 yield k, self[k]
570 570 def insert(self, index, key, val):
571 571 self._list.insert(index, key)
572 572 dict.__setitem__(self, key, val)
573 573 def __repr__(self):
574 574 if not self:
575 575 return '%s()' % self.__class__.__name__
576 576 return '%s(%r)' % (self.__class__.__name__, self.items())
577 577
578 578 class _lrucachenode(object):
579 579 """A node in a doubly linked list.
580 580
581 581 Holds a reference to nodes on either side as well as a key-value
582 582 pair for the dictionary entry.
583 583 """
584 584 __slots__ = (u'next', u'prev', u'key', u'value')
585 585
586 586 def __init__(self):
587 587 self.next = None
588 588 self.prev = None
589 589
590 590 self.key = _notset
591 591 self.value = None
592 592
593 593 def markempty(self):
594 594 """Mark the node as emptied."""
595 595 self.key = _notset
596 596
597 597 class lrucachedict(object):
598 598 """Dict that caches most recent accesses and sets.
599 599
600 600 The dict consists of an actual backing dict - indexed by original
601 601 key - and a doubly linked circular list defining the order of entries in
602 602 the cache.
603 603
604 604 The head node is the newest entry in the cache. If the cache is full,
605 605 we recycle head.prev and make it the new head. Cache accesses result in
606 606 the node being moved to before the existing head and being marked as the
607 607 new head node.
608 608 """
609 609 def __init__(self, max):
610 610 self._cache = {}
611 611
612 612 self._head = head = _lrucachenode()
613 613 head.prev = head
614 614 head.next = head
615 615 self._size = 1
616 616 self._capacity = max
617 617
618 618 def __len__(self):
619 619 return len(self._cache)
620 620
621 621 def __contains__(self, k):
622 622 return k in self._cache
623 623
624 624 def __iter__(self):
625 625 # We don't have to iterate in cache order, but why not.
626 626 n = self._head
627 627 for i in range(len(self._cache)):
628 628 yield n.key
629 629 n = n.next
630 630
631 631 def __getitem__(self, k):
632 632 node = self._cache[k]
633 633 self._movetohead(node)
634 634 return node.value
635 635
636 636 def __setitem__(self, k, v):
637 637 node = self._cache.get(k)
638 638 # Replace existing value and mark as newest.
639 639 if node is not None:
640 640 node.value = v
641 641 self._movetohead(node)
642 642 return
643 643
644 644 if self._size < self._capacity:
645 645 node = self._addcapacity()
646 646 else:
647 647 # Grab the last/oldest item.
648 648 node = self._head.prev
649 649
650 650 # At capacity. Kill the old entry.
651 651 if node.key is not _notset:
652 652 del self._cache[node.key]
653 653
654 654 node.key = k
655 655 node.value = v
656 656 self._cache[k] = node
657 657 # And mark it as newest entry. No need to adjust order since it
658 658 # is already self._head.prev.
659 659 self._head = node
660 660
661 661 def __delitem__(self, k):
662 662 node = self._cache.pop(k)
663 663 node.markempty()
664 664
665 665 # Temporarily mark as newest item before re-adjusting head to make
666 666 # this node the oldest item.
667 667 self._movetohead(node)
668 668 self._head = node.next
669 669
670 670 # Additional dict methods.
671 671
672 672 def get(self, k, default=None):
673 673 try:
674 674 return self._cache[k].value
675 675 except KeyError:
676 676 return default
677 677
678 678 def clear(self):
679 679 n = self._head
680 680 while n.key is not _notset:
681 681 n.markempty()
682 682 n = n.next
683 683
684 684 self._cache.clear()
685 685
686 686 def copy(self):
687 687 result = lrucachedict(self._capacity)
688 688 n = self._head.prev
689 689 # Iterate in oldest-to-newest order, so the copy has the right ordering
690 690 for i in range(len(self._cache)):
691 691 result[n.key] = n.value
692 692 n = n.prev
693 693 return result
694 694
695 695 def _movetohead(self, node):
696 696 """Mark a node as the newest, making it the new head.
697 697
698 698 When a node is accessed, it becomes the freshest entry in the LRU
699 699 list, which is denoted by self._head.
700 700
701 701 Visually, let's make ``N`` the new head node (* denotes head):
702 702
703 703 previous/oldest <-> head <-> next/next newest
704 704
705 705 ----<->--- A* ---<->-----
706 706 | |
707 707 E <-> D <-> N <-> C <-> B
708 708
709 709 To:
710 710
711 711 ----<->--- N* ---<->-----
712 712 | |
713 713 E <-> D <-> C <-> B <-> A
714 714
715 715 This requires the following moves:
716 716
717 717 C.next = D (node.prev.next = node.next)
718 718 D.prev = C (node.next.prev = node.prev)
719 719 E.next = N (head.prev.next = node)
720 720 N.prev = E (node.prev = head.prev)
721 721 N.next = A (node.next = head)
722 722 A.prev = N (head.prev = node)
723 723 """
724 724 head = self._head
725 725 # C.next = D
726 726 node.prev.next = node.next
727 727 # D.prev = C
728 728 node.next.prev = node.prev
729 729 # N.prev = E
730 730 node.prev = head.prev
731 731 # N.next = A
732 732 # It is tempting to do just "head" here, however if node is
733 733 # adjacent to head, this will do bad things.
734 734 node.next = head.prev.next
735 735 # E.next = N
736 736 node.next.prev = node
737 737 # A.prev = N
738 738 node.prev.next = node
739 739
740 740 self._head = node
741 741
742 742 def _addcapacity(self):
743 743 """Add a node to the circular linked list.
744 744
745 745 The new node is inserted before the head node.
746 746 """
747 747 head = self._head
748 748 node = _lrucachenode()
749 749 head.prev.next = node
750 750 node.prev = head.prev
751 751 node.next = head
752 752 head.prev = node
753 753 self._size += 1
754 754 return node
755 755
756 756 def lrucachefunc(func):
757 757 '''cache most recent results of function calls'''
758 758 cache = {}
759 759 order = collections.deque()
760 760 if func.__code__.co_argcount == 1:
761 761 def f(arg):
762 762 if arg not in cache:
763 763 if len(cache) > 20:
764 764 del cache[order.popleft()]
765 765 cache[arg] = func(arg)
766 766 else:
767 767 order.remove(arg)
768 768 order.append(arg)
769 769 return cache[arg]
770 770 else:
771 771 def f(*args):
772 772 if args not in cache:
773 773 if len(cache) > 20:
774 774 del cache[order.popleft()]
775 775 cache[args] = func(*args)
776 776 else:
777 777 order.remove(args)
778 778 order.append(args)
779 779 return cache[args]
780 780
781 781 return f
782 782
783 783 class propertycache(object):
784 784 def __init__(self, func):
785 785 self.func = func
786 786 self.name = func.__name__
787 787 def __get__(self, obj, type=None):
788 788 result = self.func(obj)
789 789 self.cachevalue(obj, result)
790 790 return result
791 791
792 792 def cachevalue(self, obj, value):
793 793 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
794 794 obj.__dict__[self.name] = value
795 795
796 796 def pipefilter(s, cmd):
797 797 '''filter string S through command CMD, returning its output'''
798 798 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
799 799 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
800 800 pout, perr = p.communicate(s)
801 801 return pout
802 802
803 803 def tempfilter(s, cmd):
804 804 '''filter string S through a pair of temporary files with CMD.
805 805 CMD is used as a template to create the real command to be run,
806 806 with the strings INFILE and OUTFILE replaced by the real names of
807 807 the temporary files generated.'''
808 808 inname, outname = None, None
809 809 try:
810 810 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
811 811 fp = os.fdopen(infd, pycompat.sysstr('wb'))
812 812 fp.write(s)
813 813 fp.close()
814 814 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
815 815 os.close(outfd)
816 816 cmd = cmd.replace('INFILE', inname)
817 817 cmd = cmd.replace('OUTFILE', outname)
818 818 code = os.system(cmd)
819 819 if pycompat.sysplatform == 'OpenVMS' and code & 1:
820 820 code = 0
821 821 if code:
822 822 raise Abort(_("command '%s' failed: %s") %
823 823 (cmd, explainexit(code)))
824 824 return readfile(outname)
825 825 finally:
826 826 try:
827 827 if inname:
828 828 os.unlink(inname)
829 829 except OSError:
830 830 pass
831 831 try:
832 832 if outname:
833 833 os.unlink(outname)
834 834 except OSError:
835 835 pass
836 836
837 837 filtertable = {
838 838 'tempfile:': tempfilter,
839 839 'pipe:': pipefilter,
840 840 }
841 841
842 842 def filter(s, cmd):
843 843 "filter a string through a command that transforms its input to its output"
844 844 for name, fn in filtertable.iteritems():
845 845 if cmd.startswith(name):
846 846 return fn(s, cmd[len(name):].lstrip())
847 847 return pipefilter(s, cmd)
848 848
849 849 def binary(s):
850 850 """return true if a string is binary data"""
851 851 return bool(s and '\0' in s)
852 852
853 853 def increasingchunks(source, min=1024, max=65536):
854 854 '''return no less than min bytes per chunk while data remains,
855 855 doubling min after each chunk until it reaches max'''
856 856 def log2(x):
857 857 if not x:
858 858 return 0
859 859 i = 0
860 860 while x:
861 861 x >>= 1
862 862 i += 1
863 863 return i - 1
864 864
865 865 buf = []
866 866 blen = 0
867 867 for chunk in source:
868 868 buf.append(chunk)
869 869 blen += len(chunk)
870 870 if blen >= min:
871 871 if min < max:
872 872 min = min << 1
873 873 nmin = 1 << log2(blen)
874 874 if nmin > min:
875 875 min = nmin
876 876 if min > max:
877 877 min = max
878 878 yield ''.join(buf)
879 879 blen = 0
880 880 buf = []
881 881 if buf:
882 882 yield ''.join(buf)
883 883
884 884 Abort = error.Abort
885 885
886 886 def always(fn):
887 887 return True
888 888
889 889 def never(fn):
890 890 return False
891 891
892 892 def nogc(func):
893 893 """disable garbage collector
894 894
895 895 Python's garbage collector triggers a GC each time a certain number of
896 896 container objects (the number being defined by gc.get_threshold()) are
897 897 allocated even when marked not to be tracked by the collector. Tracking has
898 898 no effect on when GCs are triggered, only on what objects the GC looks
899 899 into. As a workaround, disable GC while building complex (huge)
900 900 containers.
901 901
902 902 This garbage collector issue have been fixed in 2.7.
903 903 """
904 904 if sys.version_info >= (2, 7):
905 905 return func
906 906 def wrapper(*args, **kwargs):
907 907 gcenabled = gc.isenabled()
908 908 gc.disable()
909 909 try:
910 910 return func(*args, **kwargs)
911 911 finally:
912 912 if gcenabled:
913 913 gc.enable()
914 914 return wrapper
915 915
916 916 def pathto(root, n1, n2):
917 917 '''return the relative path from one place to another.
918 918 root should use os.sep to separate directories
919 919 n1 should use os.sep to separate directories
920 920 n2 should use "/" to separate directories
921 921 returns an os.sep-separated path.
922 922
923 923 If n1 is a relative path, it's assumed it's
924 924 relative to root.
925 925 n2 should always be relative to root.
926 926 '''
927 927 if not n1:
928 928 return localpath(n2)
929 929 if os.path.isabs(n1):
930 930 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
931 931 return os.path.join(root, localpath(n2))
932 932 n2 = '/'.join((pconvert(root), n2))
933 933 a, b = splitpath(n1), n2.split('/')
934 934 a.reverse()
935 935 b.reverse()
936 936 while a and b and a[-1] == b[-1]:
937 937 a.pop()
938 938 b.pop()
939 939 b.reverse()
940 940 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
941 941
942 942 def mainfrozen():
943 943 """return True if we are a frozen executable.
944 944
945 945 The code supports py2exe (most common, Windows only) and tools/freeze
946 946 (portable, not much used).
947 947 """
948 948 return (safehasattr(sys, "frozen") or # new py2exe
949 949 safehasattr(sys, "importers") or # old py2exe
950 950 imp.is_frozen(u"__main__")) # tools/freeze
951 951
952 952 # the location of data files matching the source code
953 953 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
954 954 # executable version (py2exe) doesn't support __file__
955 955 datapath = os.path.dirname(pycompat.sysexecutable)
956 956 else:
957 957 datapath = os.path.dirname(pycompat.fsencode(__file__))
958 958
959 959 i18n.setdatapath(datapath)
960 960
961 961 _hgexecutable = None
962 962
963 963 def hgexecutable():
964 964 """return location of the 'hg' executable.
965 965
966 966 Defaults to $HG or 'hg' in the search path.
967 967 """
968 968 if _hgexecutable is None:
969 969 hg = encoding.environ.get('HG')
970 970 mainmod = sys.modules[pycompat.sysstr('__main__')]
971 971 if hg:
972 972 _sethgexecutable(hg)
973 973 elif mainfrozen():
974 974 if getattr(sys, 'frozen', None) == 'macosx_app':
975 975 # Env variable set by py2app
976 976 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
977 977 else:
978 978 _sethgexecutable(pycompat.sysexecutable)
979 979 elif (os.path.basename(
980 980 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
981 981 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
982 982 else:
983 983 exe = findexe('hg') or os.path.basename(sys.argv[0])
984 984 _sethgexecutable(exe)
985 985 return _hgexecutable
986 986
987 987 def _sethgexecutable(path):
988 988 """set location of the 'hg' executable"""
989 989 global _hgexecutable
990 990 _hgexecutable = path
991 991
992 992 def _isstdout(f):
993 993 fileno = getattr(f, 'fileno', None)
994 994 return fileno and fileno() == sys.__stdout__.fileno()
995 995
996 996 def shellenviron(environ=None):
997 997 """return environ with optional override, useful for shelling out"""
998 998 def py2shell(val):
999 999 'convert python object into string that is useful to shell'
1000 1000 if val is None or val is False:
1001 1001 return '0'
1002 1002 if val is True:
1003 1003 return '1'
1004 1004 return str(val)
1005 1005 env = dict(encoding.environ)
1006 1006 if environ:
1007 1007 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1008 1008 env['HG'] = hgexecutable()
1009 1009 return env
1010 1010
1011 1011 def system(cmd, environ=None, cwd=None, out=None):
1012 1012 '''enhanced shell command execution.
1013 1013 run with environment maybe modified, maybe in different dir.
1014 1014
1015 1015 if out is specified, it is assumed to be a file-like object that has a
1016 1016 write() method. stdout and stderr will be redirected to out.'''
1017 1017 try:
1018 1018 stdout.flush()
1019 1019 except Exception:
1020 1020 pass
1021 1021 cmd = quotecommand(cmd)
1022 1022 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1023 1023 and sys.version_info[1] < 7):
1024 1024 # subprocess kludge to work around issues in half-baked Python
1025 1025 # ports, notably bichued/python:
1026 1026 if not cwd is None:
1027 1027 os.chdir(cwd)
1028 1028 rc = os.system(cmd)
1029 1029 else:
1030 1030 env = shellenviron(environ)
1031 1031 if out is None or _isstdout(out):
1032 1032 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1033 1033 env=env, cwd=cwd)
1034 1034 else:
1035 1035 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1036 1036 env=env, cwd=cwd, stdout=subprocess.PIPE,
1037 1037 stderr=subprocess.STDOUT)
1038 1038 for line in iter(proc.stdout.readline, ''):
1039 1039 out.write(line)
1040 1040 proc.wait()
1041 1041 rc = proc.returncode
1042 1042 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1043 1043 rc = 0
1044 1044 return rc
1045 1045
1046 1046 def checksignature(func):
1047 1047 '''wrap a function with code to check for calling errors'''
1048 1048 def check(*args, **kwargs):
1049 1049 try:
1050 1050 return func(*args, **kwargs)
1051 1051 except TypeError:
1052 1052 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1053 1053 raise error.SignatureError
1054 1054 raise
1055 1055
1056 1056 return check
1057 1057
1058 1058 # a whilelist of known filesystems where hardlink works reliably
1059 1059 _hardlinkfswhitelist = set([
1060 1060 'btrfs',
1061 1061 'ext2',
1062 1062 'ext3',
1063 1063 'ext4',
1064 1064 'hfs',
1065 1065 'jfs',
1066 1066 'reiserfs',
1067 1067 'tmpfs',
1068 1068 'ufs',
1069 1069 'xfs',
1070 1070 'zfs',
1071 1071 ])
1072 1072
1073 1073 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1074 1074 '''copy a file, preserving mode and optionally other stat info like
1075 1075 atime/mtime
1076 1076
1077 1077 checkambig argument is used with filestat, and is useful only if
1078 1078 destination file is guarded by any lock (e.g. repo.lock or
1079 1079 repo.wlock).
1080 1080
1081 1081 copystat and checkambig should be exclusive.
1082 1082 '''
1083 1083 assert not (copystat and checkambig)
1084 1084 oldstat = None
1085 1085 if os.path.lexists(dest):
1086 1086 if checkambig:
1087 1087 oldstat = checkambig and filestat(dest)
1088 1088 unlink(dest)
1089 1089 if hardlink:
1090 1090 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1091 1091 # unless we are confident that dest is on a whitelisted filesystem.
1092 fstype = getfstype(os.path.dirname(dest))
1092 try:
1093 fstype = getfstype(os.path.dirname(dest))
1094 except OSError:
1095 fstype = None
1093 1096 if fstype not in _hardlinkfswhitelist:
1094 1097 hardlink = False
1095 1098 if hardlink:
1096 1099 try:
1097 1100 oslink(src, dest)
1098 1101 return
1099 1102 except (IOError, OSError):
1100 1103 pass # fall back to normal copy
1101 1104 if os.path.islink(src):
1102 1105 os.symlink(os.readlink(src), dest)
1103 1106 # copytime is ignored for symlinks, but in general copytime isn't needed
1104 1107 # for them anyway
1105 1108 else:
1106 1109 try:
1107 1110 shutil.copyfile(src, dest)
1108 1111 if copystat:
1109 1112 # copystat also copies mode
1110 1113 shutil.copystat(src, dest)
1111 1114 else:
1112 1115 shutil.copymode(src, dest)
1113 1116 if oldstat and oldstat.stat:
1114 1117 newstat = filestat(dest)
1115 1118 if newstat.isambig(oldstat):
1116 1119 # stat of copied file is ambiguous to original one
1117 1120 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1118 1121 os.utime(dest, (advanced, advanced))
1119 1122 except shutil.Error as inst:
1120 1123 raise Abort(str(inst))
1121 1124
1122 1125 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1123 1126 """Copy a directory tree using hardlinks if possible."""
1124 1127 num = 0
1125 1128
1126 1129 if hardlink is None:
1127 1130 hardlink = (os.stat(src).st_dev ==
1128 1131 os.stat(os.path.dirname(dst)).st_dev)
1129 1132 if hardlink:
1130 1133 topic = _('linking')
1131 1134 else:
1132 1135 topic = _('copying')
1133 1136
1134 1137 if os.path.isdir(src):
1135 1138 os.mkdir(dst)
1136 1139 for name, kind in osutil.listdir(src):
1137 1140 srcname = os.path.join(src, name)
1138 1141 dstname = os.path.join(dst, name)
1139 1142 def nprog(t, pos):
1140 1143 if pos is not None:
1141 1144 return progress(t, pos + num)
1142 1145 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1143 1146 num += n
1144 1147 else:
1145 1148 if hardlink:
1146 1149 try:
1147 1150 oslink(src, dst)
1148 1151 except (IOError, OSError):
1149 1152 hardlink = False
1150 1153 shutil.copy(src, dst)
1151 1154 else:
1152 1155 shutil.copy(src, dst)
1153 1156 num += 1
1154 1157 progress(topic, num)
1155 1158 progress(topic, None)
1156 1159
1157 1160 return hardlink, num
1158 1161
1159 1162 _winreservednames = '''con prn aux nul
1160 1163 com1 com2 com3 com4 com5 com6 com7 com8 com9
1161 1164 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1162 1165 _winreservedchars = ':*?"<>|'
1163 1166 def checkwinfilename(path):
1164 1167 r'''Check that the base-relative path is a valid filename on Windows.
1165 1168 Returns None if the path is ok, or a UI string describing the problem.
1166 1169
1167 1170 >>> checkwinfilename("just/a/normal/path")
1168 1171 >>> checkwinfilename("foo/bar/con.xml")
1169 1172 "filename contains 'con', which is reserved on Windows"
1170 1173 >>> checkwinfilename("foo/con.xml/bar")
1171 1174 "filename contains 'con', which is reserved on Windows"
1172 1175 >>> checkwinfilename("foo/bar/xml.con")
1173 1176 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1174 1177 "filename contains 'AUX', which is reserved on Windows"
1175 1178 >>> checkwinfilename("foo/bar/bla:.txt")
1176 1179 "filename contains ':', which is reserved on Windows"
1177 1180 >>> checkwinfilename("foo/bar/b\07la.txt")
1178 1181 "filename contains '\\x07', which is invalid on Windows"
1179 1182 >>> checkwinfilename("foo/bar/bla ")
1180 1183 "filename ends with ' ', which is not allowed on Windows"
1181 1184 >>> checkwinfilename("../bar")
1182 1185 >>> checkwinfilename("foo\\")
1183 1186 "filename ends with '\\', which is invalid on Windows"
1184 1187 >>> checkwinfilename("foo\\/bar")
1185 1188 "directory name ends with '\\', which is invalid on Windows"
1186 1189 '''
1187 1190 if path.endswith('\\'):
1188 1191 return _("filename ends with '\\', which is invalid on Windows")
1189 1192 if '\\/' in path:
1190 1193 return _("directory name ends with '\\', which is invalid on Windows")
1191 1194 for n in path.replace('\\', '/').split('/'):
1192 1195 if not n:
1193 1196 continue
1194 1197 for c in pycompat.bytestr(n):
1195 1198 if c in _winreservedchars:
1196 1199 return _("filename contains '%s', which is reserved "
1197 1200 "on Windows") % c
1198 1201 if ord(c) <= 31:
1199 1202 return _("filename contains %r, which is invalid "
1200 1203 "on Windows") % c
1201 1204 base = n.split('.')[0]
1202 1205 if base and base.lower() in _winreservednames:
1203 1206 return _("filename contains '%s', which is reserved "
1204 1207 "on Windows") % base
1205 1208 t = n[-1]
1206 1209 if t in '. ' and n not in '..':
1207 1210 return _("filename ends with '%s', which is not allowed "
1208 1211 "on Windows") % t
1209 1212
1210 1213 if pycompat.osname == 'nt':
1211 1214 checkosfilename = checkwinfilename
1212 1215 timer = time.clock
1213 1216 else:
1214 1217 checkosfilename = platform.checkosfilename
1215 1218 timer = time.time
1216 1219
1217 1220 if safehasattr(time, "perf_counter"):
1218 1221 timer = time.perf_counter
1219 1222
1220 1223 def makelock(info, pathname):
1221 1224 try:
1222 1225 return os.symlink(info, pathname)
1223 1226 except OSError as why:
1224 1227 if why.errno == errno.EEXIST:
1225 1228 raise
1226 1229 except AttributeError: # no symlink in os
1227 1230 pass
1228 1231
1229 1232 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1230 1233 os.write(ld, info)
1231 1234 os.close(ld)
1232 1235
1233 1236 def readlock(pathname):
1234 1237 try:
1235 1238 return os.readlink(pathname)
1236 1239 except OSError as why:
1237 1240 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1238 1241 raise
1239 1242 except AttributeError: # no symlink in os
1240 1243 pass
1241 1244 fp = posixfile(pathname)
1242 1245 r = fp.read()
1243 1246 fp.close()
1244 1247 return r
1245 1248
1246 1249 def fstat(fp):
1247 1250 '''stat file object that may not have fileno method.'''
1248 1251 try:
1249 1252 return os.fstat(fp.fileno())
1250 1253 except AttributeError:
1251 1254 return os.stat(fp.name)
1252 1255
1253 1256 # File system features
1254 1257
1255 1258 def fscasesensitive(path):
1256 1259 """
1257 1260 Return true if the given path is on a case-sensitive filesystem
1258 1261
1259 1262 Requires a path (like /foo/.hg) ending with a foldable final
1260 1263 directory component.
1261 1264 """
1262 1265 s1 = os.lstat(path)
1263 1266 d, b = os.path.split(path)
1264 1267 b2 = b.upper()
1265 1268 if b == b2:
1266 1269 b2 = b.lower()
1267 1270 if b == b2:
1268 1271 return True # no evidence against case sensitivity
1269 1272 p2 = os.path.join(d, b2)
1270 1273 try:
1271 1274 s2 = os.lstat(p2)
1272 1275 if s2 == s1:
1273 1276 return False
1274 1277 return True
1275 1278 except OSError:
1276 1279 return True
1277 1280
1278 1281 try:
1279 1282 import re2
1280 1283 _re2 = None
1281 1284 except ImportError:
1282 1285 _re2 = False
1283 1286
1284 1287 class _re(object):
1285 1288 def _checkre2(self):
1286 1289 global _re2
1287 1290 try:
1288 1291 # check if match works, see issue3964
1289 1292 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1290 1293 except ImportError:
1291 1294 _re2 = False
1292 1295
1293 1296 def compile(self, pat, flags=0):
1294 1297 '''Compile a regular expression, using re2 if possible
1295 1298
1296 1299 For best performance, use only re2-compatible regexp features. The
1297 1300 only flags from the re module that are re2-compatible are
1298 1301 IGNORECASE and MULTILINE.'''
1299 1302 if _re2 is None:
1300 1303 self._checkre2()
1301 1304 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1302 1305 if flags & remod.IGNORECASE:
1303 1306 pat = '(?i)' + pat
1304 1307 if flags & remod.MULTILINE:
1305 1308 pat = '(?m)' + pat
1306 1309 try:
1307 1310 return re2.compile(pat)
1308 1311 except re2.error:
1309 1312 pass
1310 1313 return remod.compile(pat, flags)
1311 1314
1312 1315 @propertycache
1313 1316 def escape(self):
1314 1317 '''Return the version of escape corresponding to self.compile.
1315 1318
1316 1319 This is imperfect because whether re2 or re is used for a particular
1317 1320 function depends on the flags, etc, but it's the best we can do.
1318 1321 '''
1319 1322 global _re2
1320 1323 if _re2 is None:
1321 1324 self._checkre2()
1322 1325 if _re2:
1323 1326 return re2.escape
1324 1327 else:
1325 1328 return remod.escape
1326 1329
1327 1330 re = _re()
1328 1331
1329 1332 _fspathcache = {}
1330 1333 def fspath(name, root):
1331 1334 '''Get name in the case stored in the filesystem
1332 1335
1333 1336 The name should be relative to root, and be normcase-ed for efficiency.
1334 1337
1335 1338 Note that this function is unnecessary, and should not be
1336 1339 called, for case-sensitive filesystems (simply because it's expensive).
1337 1340
1338 1341 The root should be normcase-ed, too.
1339 1342 '''
1340 1343 def _makefspathcacheentry(dir):
1341 1344 return dict((normcase(n), n) for n in os.listdir(dir))
1342 1345
1343 1346 seps = pycompat.ossep
1344 1347 if pycompat.osaltsep:
1345 1348 seps = seps + pycompat.osaltsep
1346 1349 # Protect backslashes. This gets silly very quickly.
1347 1350 seps.replace('\\','\\\\')
1348 1351 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1349 1352 dir = os.path.normpath(root)
1350 1353 result = []
1351 1354 for part, sep in pattern.findall(name):
1352 1355 if sep:
1353 1356 result.append(sep)
1354 1357 continue
1355 1358
1356 1359 if dir not in _fspathcache:
1357 1360 _fspathcache[dir] = _makefspathcacheentry(dir)
1358 1361 contents = _fspathcache[dir]
1359 1362
1360 1363 found = contents.get(part)
1361 1364 if not found:
1362 1365 # retry "once per directory" per "dirstate.walk" which
1363 1366 # may take place for each patches of "hg qpush", for example
1364 1367 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1365 1368 found = contents.get(part)
1366 1369
1367 1370 result.append(found or part)
1368 1371 dir = os.path.join(dir, part)
1369 1372
1370 1373 return ''.join(result)
1371 1374
1372 1375 def getfstype(dirpath):
1373 1376 '''Get the filesystem type name from a directory (best-effort)
1374 1377
1375 Returns None if we are unsure, or errors like ENOENT, EPERM happen.
1378 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1376 1379 '''
1377 1380 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1378 1381
1379 1382 def checknlink(testfile):
1380 1383 '''check whether hardlink count reporting works properly'''
1381 1384
1382 1385 # testfile may be open, so we need a separate file for checking to
1383 1386 # work around issue2543 (or testfile may get lost on Samba shares)
1384 1387 f1 = testfile + ".hgtmp1"
1385 1388 if os.path.lexists(f1):
1386 1389 return False
1387 1390 try:
1388 1391 posixfile(f1, 'w').close()
1389 1392 except IOError:
1390 1393 try:
1391 1394 os.unlink(f1)
1392 1395 except OSError:
1393 1396 pass
1394 1397 return False
1395 1398
1396 1399 f2 = testfile + ".hgtmp2"
1397 1400 fd = None
1398 1401 try:
1399 1402 oslink(f1, f2)
1400 1403 # nlinks() may behave differently for files on Windows shares if
1401 1404 # the file is open.
1402 1405 fd = posixfile(f2)
1403 1406 return nlinks(f2) > 1
1404 1407 except OSError:
1405 1408 return False
1406 1409 finally:
1407 1410 if fd is not None:
1408 1411 fd.close()
1409 1412 for f in (f1, f2):
1410 1413 try:
1411 1414 os.unlink(f)
1412 1415 except OSError:
1413 1416 pass
1414 1417
1415 1418 def endswithsep(path):
1416 1419 '''Check path ends with os.sep or os.altsep.'''
1417 1420 return (path.endswith(pycompat.ossep)
1418 1421 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1419 1422
1420 1423 def splitpath(path):
1421 1424 '''Split path by os.sep.
1422 1425 Note that this function does not use os.altsep because this is
1423 1426 an alternative of simple "xxx.split(os.sep)".
1424 1427 It is recommended to use os.path.normpath() before using this
1425 1428 function if need.'''
1426 1429 return path.split(pycompat.ossep)
1427 1430
1428 1431 def gui():
1429 1432 '''Are we running in a GUI?'''
1430 1433 if pycompat.sysplatform == 'darwin':
1431 1434 if 'SSH_CONNECTION' in encoding.environ:
1432 1435 # handle SSH access to a box where the user is logged in
1433 1436 return False
1434 1437 elif getattr(osutil, 'isgui', None):
1435 1438 # check if a CoreGraphics session is available
1436 1439 return osutil.isgui()
1437 1440 else:
1438 1441 # pure build; use a safe default
1439 1442 return True
1440 1443 else:
1441 1444 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1442 1445
1443 1446 def mktempcopy(name, emptyok=False, createmode=None):
1444 1447 """Create a temporary file with the same contents from name
1445 1448
1446 1449 The permission bits are copied from the original file.
1447 1450
1448 1451 If the temporary file is going to be truncated immediately, you
1449 1452 can use emptyok=True as an optimization.
1450 1453
1451 1454 Returns the name of the temporary file.
1452 1455 """
1453 1456 d, fn = os.path.split(name)
1454 1457 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1455 1458 os.close(fd)
1456 1459 # Temporary files are created with mode 0600, which is usually not
1457 1460 # what we want. If the original file already exists, just copy
1458 1461 # its mode. Otherwise, manually obey umask.
1459 1462 copymode(name, temp, createmode)
1460 1463 if emptyok:
1461 1464 return temp
1462 1465 try:
1463 1466 try:
1464 1467 ifp = posixfile(name, "rb")
1465 1468 except IOError as inst:
1466 1469 if inst.errno == errno.ENOENT:
1467 1470 return temp
1468 1471 if not getattr(inst, 'filename', None):
1469 1472 inst.filename = name
1470 1473 raise
1471 1474 ofp = posixfile(temp, "wb")
1472 1475 for chunk in filechunkiter(ifp):
1473 1476 ofp.write(chunk)
1474 1477 ifp.close()
1475 1478 ofp.close()
1476 1479 except: # re-raises
1477 1480 try: os.unlink(temp)
1478 1481 except OSError: pass
1479 1482 raise
1480 1483 return temp
1481 1484
1482 1485 class filestat(object):
1483 1486 """help to exactly detect change of a file
1484 1487
1485 1488 'stat' attribute is result of 'os.stat()' if specified 'path'
1486 1489 exists. Otherwise, it is None. This can avoid preparative
1487 1490 'exists()' examination on client side of this class.
1488 1491 """
1489 1492 def __init__(self, path):
1490 1493 try:
1491 1494 self.stat = os.stat(path)
1492 1495 except OSError as err:
1493 1496 if err.errno != errno.ENOENT:
1494 1497 raise
1495 1498 self.stat = None
1496 1499
1497 1500 __hash__ = object.__hash__
1498 1501
1499 1502 def __eq__(self, old):
1500 1503 try:
1501 1504 # if ambiguity between stat of new and old file is
1502 1505 # avoided, comparison of size, ctime and mtime is enough
1503 1506 # to exactly detect change of a file regardless of platform
1504 1507 return (self.stat.st_size == old.stat.st_size and
1505 1508 self.stat.st_ctime == old.stat.st_ctime and
1506 1509 self.stat.st_mtime == old.stat.st_mtime)
1507 1510 except AttributeError:
1508 1511 return False
1509 1512
1510 1513 def isambig(self, old):
1511 1514 """Examine whether new (= self) stat is ambiguous against old one
1512 1515
1513 1516 "S[N]" below means stat of a file at N-th change:
1514 1517
1515 1518 - S[n-1].ctime < S[n].ctime: can detect change of a file
1516 1519 - S[n-1].ctime == S[n].ctime
1517 1520 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1518 1521 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1519 1522 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1520 1523 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1521 1524
1522 1525 Case (*2) above means that a file was changed twice or more at
1523 1526 same time in sec (= S[n-1].ctime), and comparison of timestamp
1524 1527 is ambiguous.
1525 1528
1526 1529 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1527 1530 timestamp is ambiguous".
1528 1531
1529 1532 But advancing mtime only in case (*2) doesn't work as
1530 1533 expected, because naturally advanced S[n].mtime in case (*1)
1531 1534 might be equal to manually advanced S[n-1 or earlier].mtime.
1532 1535
1533 1536 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1534 1537 treated as ambiguous regardless of mtime, to avoid overlooking
1535 1538 by confliction between such mtime.
1536 1539
1537 1540 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1538 1541 S[n].mtime", even if size of a file isn't changed.
1539 1542 """
1540 1543 try:
1541 1544 return (self.stat.st_ctime == old.stat.st_ctime)
1542 1545 except AttributeError:
1543 1546 return False
1544 1547
1545 1548 def avoidambig(self, path, old):
1546 1549 """Change file stat of specified path to avoid ambiguity
1547 1550
1548 1551 'old' should be previous filestat of 'path'.
1549 1552
1550 1553 This skips avoiding ambiguity, if a process doesn't have
1551 1554 appropriate privileges for 'path'.
1552 1555 """
1553 1556 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1554 1557 try:
1555 1558 os.utime(path, (advanced, advanced))
1556 1559 except OSError as inst:
1557 1560 if inst.errno == errno.EPERM:
1558 1561 # utime() on the file created by another user causes EPERM,
1559 1562 # if a process doesn't have appropriate privileges
1560 1563 return
1561 1564 raise
1562 1565
1563 1566 def __ne__(self, other):
1564 1567 return not self == other
1565 1568
1566 1569 class atomictempfile(object):
1567 1570 '''writable file object that atomically updates a file
1568 1571
1569 1572 All writes will go to a temporary copy of the original file. Call
1570 1573 close() when you are done writing, and atomictempfile will rename
1571 1574 the temporary copy to the original name, making the changes
1572 1575 visible. If the object is destroyed without being closed, all your
1573 1576 writes are discarded.
1574 1577
1575 1578 checkambig argument of constructor is used with filestat, and is
1576 1579 useful only if target file is guarded by any lock (e.g. repo.lock
1577 1580 or repo.wlock).
1578 1581 '''
1579 1582 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1580 1583 self.__name = name # permanent name
1581 1584 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1582 1585 createmode=createmode)
1583 1586 self._fp = posixfile(self._tempname, mode)
1584 1587 self._checkambig = checkambig
1585 1588
1586 1589 # delegated methods
1587 1590 self.read = self._fp.read
1588 1591 self.write = self._fp.write
1589 1592 self.seek = self._fp.seek
1590 1593 self.tell = self._fp.tell
1591 1594 self.fileno = self._fp.fileno
1592 1595
1593 1596 def close(self):
1594 1597 if not self._fp.closed:
1595 1598 self._fp.close()
1596 1599 filename = localpath(self.__name)
1597 1600 oldstat = self._checkambig and filestat(filename)
1598 1601 if oldstat and oldstat.stat:
1599 1602 rename(self._tempname, filename)
1600 1603 newstat = filestat(filename)
1601 1604 if newstat.isambig(oldstat):
1602 1605 # stat of changed file is ambiguous to original one
1603 1606 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1604 1607 os.utime(filename, (advanced, advanced))
1605 1608 else:
1606 1609 rename(self._tempname, filename)
1607 1610
1608 1611 def discard(self):
1609 1612 if not self._fp.closed:
1610 1613 try:
1611 1614 os.unlink(self._tempname)
1612 1615 except OSError:
1613 1616 pass
1614 1617 self._fp.close()
1615 1618
1616 1619 def __del__(self):
1617 1620 if safehasattr(self, '_fp'): # constructor actually did something
1618 1621 self.discard()
1619 1622
1620 1623 def __enter__(self):
1621 1624 return self
1622 1625
1623 1626 def __exit__(self, exctype, excvalue, traceback):
1624 1627 if exctype is not None:
1625 1628 self.discard()
1626 1629 else:
1627 1630 self.close()
1628 1631
1629 1632 def unlinkpath(f, ignoremissing=False):
1630 1633 """unlink and remove the directory if it is empty"""
1631 1634 if ignoremissing:
1632 1635 tryunlink(f)
1633 1636 else:
1634 1637 unlink(f)
1635 1638 # try removing directories that might now be empty
1636 1639 try:
1637 1640 removedirs(os.path.dirname(f))
1638 1641 except OSError:
1639 1642 pass
1640 1643
1641 1644 def tryunlink(f):
1642 1645 """Attempt to remove a file, ignoring ENOENT errors."""
1643 1646 try:
1644 1647 unlink(f)
1645 1648 except OSError as e:
1646 1649 if e.errno != errno.ENOENT:
1647 1650 raise
1648 1651
1649 1652 def makedirs(name, mode=None, notindexed=False):
1650 1653 """recursive directory creation with parent mode inheritance
1651 1654
1652 1655 Newly created directories are marked as "not to be indexed by
1653 1656 the content indexing service", if ``notindexed`` is specified
1654 1657 for "write" mode access.
1655 1658 """
1656 1659 try:
1657 1660 makedir(name, notindexed)
1658 1661 except OSError as err:
1659 1662 if err.errno == errno.EEXIST:
1660 1663 return
1661 1664 if err.errno != errno.ENOENT or not name:
1662 1665 raise
1663 1666 parent = os.path.dirname(os.path.abspath(name))
1664 1667 if parent == name:
1665 1668 raise
1666 1669 makedirs(parent, mode, notindexed)
1667 1670 try:
1668 1671 makedir(name, notindexed)
1669 1672 except OSError as err:
1670 1673 # Catch EEXIST to handle races
1671 1674 if err.errno == errno.EEXIST:
1672 1675 return
1673 1676 raise
1674 1677 if mode is not None:
1675 1678 os.chmod(name, mode)
1676 1679
1677 1680 def readfile(path):
1678 1681 with open(path, 'rb') as fp:
1679 1682 return fp.read()
1680 1683
1681 1684 def writefile(path, text):
1682 1685 with open(path, 'wb') as fp:
1683 1686 fp.write(text)
1684 1687
1685 1688 def appendfile(path, text):
1686 1689 with open(path, 'ab') as fp:
1687 1690 fp.write(text)
1688 1691
1689 1692 class chunkbuffer(object):
1690 1693 """Allow arbitrary sized chunks of data to be efficiently read from an
1691 1694 iterator over chunks of arbitrary size."""
1692 1695
1693 1696 def __init__(self, in_iter):
1694 1697 """in_iter is the iterator that's iterating over the input chunks.
1695 1698 targetsize is how big a buffer to try to maintain."""
1696 1699 def splitbig(chunks):
1697 1700 for chunk in chunks:
1698 1701 if len(chunk) > 2**20:
1699 1702 pos = 0
1700 1703 while pos < len(chunk):
1701 1704 end = pos + 2 ** 18
1702 1705 yield chunk[pos:end]
1703 1706 pos = end
1704 1707 else:
1705 1708 yield chunk
1706 1709 self.iter = splitbig(in_iter)
1707 1710 self._queue = collections.deque()
1708 1711 self._chunkoffset = 0
1709 1712
1710 1713 def read(self, l=None):
1711 1714 """Read L bytes of data from the iterator of chunks of data.
1712 1715 Returns less than L bytes if the iterator runs dry.
1713 1716
1714 1717 If size parameter is omitted, read everything"""
1715 1718 if l is None:
1716 1719 return ''.join(self.iter)
1717 1720
1718 1721 left = l
1719 1722 buf = []
1720 1723 queue = self._queue
1721 1724 while left > 0:
1722 1725 # refill the queue
1723 1726 if not queue:
1724 1727 target = 2**18
1725 1728 for chunk in self.iter:
1726 1729 queue.append(chunk)
1727 1730 target -= len(chunk)
1728 1731 if target <= 0:
1729 1732 break
1730 1733 if not queue:
1731 1734 break
1732 1735
1733 1736 # The easy way to do this would be to queue.popleft(), modify the
1734 1737 # chunk (if necessary), then queue.appendleft(). However, for cases
1735 1738 # where we read partial chunk content, this incurs 2 dequeue
1736 1739 # mutations and creates a new str for the remaining chunk in the
1737 1740 # queue. Our code below avoids this overhead.
1738 1741
1739 1742 chunk = queue[0]
1740 1743 chunkl = len(chunk)
1741 1744 offset = self._chunkoffset
1742 1745
1743 1746 # Use full chunk.
1744 1747 if offset == 0 and left >= chunkl:
1745 1748 left -= chunkl
1746 1749 queue.popleft()
1747 1750 buf.append(chunk)
1748 1751 # self._chunkoffset remains at 0.
1749 1752 continue
1750 1753
1751 1754 chunkremaining = chunkl - offset
1752 1755
1753 1756 # Use all of unconsumed part of chunk.
1754 1757 if left >= chunkremaining:
1755 1758 left -= chunkremaining
1756 1759 queue.popleft()
1757 1760 # offset == 0 is enabled by block above, so this won't merely
1758 1761 # copy via ``chunk[0:]``.
1759 1762 buf.append(chunk[offset:])
1760 1763 self._chunkoffset = 0
1761 1764
1762 1765 # Partial chunk needed.
1763 1766 else:
1764 1767 buf.append(chunk[offset:offset + left])
1765 1768 self._chunkoffset += left
1766 1769 left -= chunkremaining
1767 1770
1768 1771 return ''.join(buf)
1769 1772
1770 1773 def filechunkiter(f, size=131072, limit=None):
1771 1774 """Create a generator that produces the data in the file size
1772 1775 (default 131072) bytes at a time, up to optional limit (default is
1773 1776 to read all data). Chunks may be less than size bytes if the
1774 1777 chunk is the last chunk in the file, or the file is a socket or
1775 1778 some other type of file that sometimes reads less data than is
1776 1779 requested."""
1777 1780 assert size >= 0
1778 1781 assert limit is None or limit >= 0
1779 1782 while True:
1780 1783 if limit is None:
1781 1784 nbytes = size
1782 1785 else:
1783 1786 nbytes = min(limit, size)
1784 1787 s = nbytes and f.read(nbytes)
1785 1788 if not s:
1786 1789 break
1787 1790 if limit:
1788 1791 limit -= len(s)
1789 1792 yield s
1790 1793
1791 1794 def makedate(timestamp=None):
1792 1795 '''Return a unix timestamp (or the current time) as a (unixtime,
1793 1796 offset) tuple based off the local timezone.'''
1794 1797 if timestamp is None:
1795 1798 timestamp = time.time()
1796 1799 if timestamp < 0:
1797 1800 hint = _("check your clock")
1798 1801 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1799 1802 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1800 1803 datetime.datetime.fromtimestamp(timestamp))
1801 1804 tz = delta.days * 86400 + delta.seconds
1802 1805 return timestamp, tz
1803 1806
1804 1807 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1805 1808 """represent a (unixtime, offset) tuple as a localized time.
1806 1809 unixtime is seconds since the epoch, and offset is the time zone's
1807 1810 number of seconds away from UTC.
1808 1811
1809 1812 >>> datestr((0, 0))
1810 1813 'Thu Jan 01 00:00:00 1970 +0000'
1811 1814 >>> datestr((42, 0))
1812 1815 'Thu Jan 01 00:00:42 1970 +0000'
1813 1816 >>> datestr((-42, 0))
1814 1817 'Wed Dec 31 23:59:18 1969 +0000'
1815 1818 >>> datestr((0x7fffffff, 0))
1816 1819 'Tue Jan 19 03:14:07 2038 +0000'
1817 1820 >>> datestr((-0x80000000, 0))
1818 1821 'Fri Dec 13 20:45:52 1901 +0000'
1819 1822 """
1820 1823 t, tz = date or makedate()
1821 1824 if "%1" in format or "%2" in format or "%z" in format:
1822 1825 sign = (tz > 0) and "-" or "+"
1823 1826 minutes = abs(tz) // 60
1824 1827 q, r = divmod(minutes, 60)
1825 1828 format = format.replace("%z", "%1%2")
1826 1829 format = format.replace("%1", "%c%02d" % (sign, q))
1827 1830 format = format.replace("%2", "%02d" % r)
1828 1831 d = t - tz
1829 1832 if d > 0x7fffffff:
1830 1833 d = 0x7fffffff
1831 1834 elif d < -0x80000000:
1832 1835 d = -0x80000000
1833 1836 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1834 1837 # because they use the gmtime() system call which is buggy on Windows
1835 1838 # for negative values.
1836 1839 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1837 1840 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1838 1841 return s
1839 1842
1840 1843 def shortdate(date=None):
1841 1844 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1842 1845 return datestr(date, format='%Y-%m-%d')
1843 1846
1844 1847 def parsetimezone(s):
1845 1848 """find a trailing timezone, if any, in string, and return a
1846 1849 (offset, remainder) pair"""
1847 1850
1848 1851 if s.endswith("GMT") or s.endswith("UTC"):
1849 1852 return 0, s[:-3].rstrip()
1850 1853
1851 1854 # Unix-style timezones [+-]hhmm
1852 1855 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1853 1856 sign = (s[-5] == "+") and 1 or -1
1854 1857 hours = int(s[-4:-2])
1855 1858 minutes = int(s[-2:])
1856 1859 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1857 1860
1858 1861 # ISO8601 trailing Z
1859 1862 if s.endswith("Z") and s[-2:-1].isdigit():
1860 1863 return 0, s[:-1]
1861 1864
1862 1865 # ISO8601-style [+-]hh:mm
1863 1866 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1864 1867 s[-5:-3].isdigit() and s[-2:].isdigit()):
1865 1868 sign = (s[-6] == "+") and 1 or -1
1866 1869 hours = int(s[-5:-3])
1867 1870 minutes = int(s[-2:])
1868 1871 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1869 1872
1870 1873 return None, s
1871 1874
1872 1875 def strdate(string, format, defaults=None):
1873 1876 """parse a localized time string and return a (unixtime, offset) tuple.
1874 1877 if the string cannot be parsed, ValueError is raised."""
1875 1878 if defaults is None:
1876 1879 defaults = {}
1877 1880
1878 1881 # NOTE: unixtime = localunixtime + offset
1879 1882 offset, date = parsetimezone(string)
1880 1883
1881 1884 # add missing elements from defaults
1882 1885 usenow = False # default to using biased defaults
1883 1886 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1884 1887 found = [True for p in part if ("%"+p) in format]
1885 1888 if not found:
1886 1889 date += "@" + defaults[part][usenow]
1887 1890 format += "@%" + part[0]
1888 1891 else:
1889 1892 # We've found a specific time element, less specific time
1890 1893 # elements are relative to today
1891 1894 usenow = True
1892 1895
1893 1896 timetuple = time.strptime(date, format)
1894 1897 localunixtime = int(calendar.timegm(timetuple))
1895 1898 if offset is None:
1896 1899 # local timezone
1897 1900 unixtime = int(time.mktime(timetuple))
1898 1901 offset = unixtime - localunixtime
1899 1902 else:
1900 1903 unixtime = localunixtime + offset
1901 1904 return unixtime, offset
1902 1905
1903 1906 def parsedate(date, formats=None, bias=None):
1904 1907 """parse a localized date/time and return a (unixtime, offset) tuple.
1905 1908
1906 1909 The date may be a "unixtime offset" string or in one of the specified
1907 1910 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1908 1911
1909 1912 >>> parsedate(' today ') == parsedate(\
1910 1913 datetime.date.today().strftime('%b %d'))
1911 1914 True
1912 1915 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1913 1916 datetime.timedelta(days=1)\
1914 1917 ).strftime('%b %d'))
1915 1918 True
1916 1919 >>> now, tz = makedate()
1917 1920 >>> strnow, strtz = parsedate('now')
1918 1921 >>> (strnow - now) < 1
1919 1922 True
1920 1923 >>> tz == strtz
1921 1924 True
1922 1925 """
1923 1926 if bias is None:
1924 1927 bias = {}
1925 1928 if not date:
1926 1929 return 0, 0
1927 1930 if isinstance(date, tuple) and len(date) == 2:
1928 1931 return date
1929 1932 if not formats:
1930 1933 formats = defaultdateformats
1931 1934 date = date.strip()
1932 1935
1933 1936 if date == 'now' or date == _('now'):
1934 1937 return makedate()
1935 1938 if date == 'today' or date == _('today'):
1936 1939 date = datetime.date.today().strftime('%b %d')
1937 1940 elif date == 'yesterday' or date == _('yesterday'):
1938 1941 date = (datetime.date.today() -
1939 1942 datetime.timedelta(days=1)).strftime('%b %d')
1940 1943
1941 1944 try:
1942 1945 when, offset = map(int, date.split(' '))
1943 1946 except ValueError:
1944 1947 # fill out defaults
1945 1948 now = makedate()
1946 1949 defaults = {}
1947 1950 for part in ("d", "mb", "yY", "HI", "M", "S"):
1948 1951 # this piece is for rounding the specific end of unknowns
1949 1952 b = bias.get(part)
1950 1953 if b is None:
1951 1954 if part[0] in "HMS":
1952 1955 b = "00"
1953 1956 else:
1954 1957 b = "0"
1955 1958
1956 1959 # this piece is for matching the generic end to today's date
1957 1960 n = datestr(now, "%" + part[0])
1958 1961
1959 1962 defaults[part] = (b, n)
1960 1963
1961 1964 for format in formats:
1962 1965 try:
1963 1966 when, offset = strdate(date, format, defaults)
1964 1967 except (ValueError, OverflowError):
1965 1968 pass
1966 1969 else:
1967 1970 break
1968 1971 else:
1969 1972 raise Abort(_('invalid date: %r') % date)
1970 1973 # validate explicit (probably user-specified) date and
1971 1974 # time zone offset. values must fit in signed 32 bits for
1972 1975 # current 32-bit linux runtimes. timezones go from UTC-12
1973 1976 # to UTC+14
1974 1977 if when < -0x80000000 or when > 0x7fffffff:
1975 1978 raise Abort(_('date exceeds 32 bits: %d') % when)
1976 1979 if offset < -50400 or offset > 43200:
1977 1980 raise Abort(_('impossible time zone offset: %d') % offset)
1978 1981 return when, offset
1979 1982
1980 1983 def matchdate(date):
1981 1984 """Return a function that matches a given date match specifier
1982 1985
1983 1986 Formats include:
1984 1987
1985 1988 '{date}' match a given date to the accuracy provided
1986 1989
1987 1990 '<{date}' on or before a given date
1988 1991
1989 1992 '>{date}' on or after a given date
1990 1993
1991 1994 >>> p1 = parsedate("10:29:59")
1992 1995 >>> p2 = parsedate("10:30:00")
1993 1996 >>> p3 = parsedate("10:30:59")
1994 1997 >>> p4 = parsedate("10:31:00")
1995 1998 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1996 1999 >>> f = matchdate("10:30")
1997 2000 >>> f(p1[0])
1998 2001 False
1999 2002 >>> f(p2[0])
2000 2003 True
2001 2004 >>> f(p3[0])
2002 2005 True
2003 2006 >>> f(p4[0])
2004 2007 False
2005 2008 >>> f(p5[0])
2006 2009 False
2007 2010 """
2008 2011
2009 2012 def lower(date):
2010 2013 d = {'mb': "1", 'd': "1"}
2011 2014 return parsedate(date, extendeddateformats, d)[0]
2012 2015
2013 2016 def upper(date):
2014 2017 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2015 2018 for days in ("31", "30", "29"):
2016 2019 try:
2017 2020 d["d"] = days
2018 2021 return parsedate(date, extendeddateformats, d)[0]
2019 2022 except Abort:
2020 2023 pass
2021 2024 d["d"] = "28"
2022 2025 return parsedate(date, extendeddateformats, d)[0]
2023 2026
2024 2027 date = date.strip()
2025 2028
2026 2029 if not date:
2027 2030 raise Abort(_("dates cannot consist entirely of whitespace"))
2028 2031 elif date[0] == "<":
2029 2032 if not date[1:]:
2030 2033 raise Abort(_("invalid day spec, use '<DATE'"))
2031 2034 when = upper(date[1:])
2032 2035 return lambda x: x <= when
2033 2036 elif date[0] == ">":
2034 2037 if not date[1:]:
2035 2038 raise Abort(_("invalid day spec, use '>DATE'"))
2036 2039 when = lower(date[1:])
2037 2040 return lambda x: x >= when
2038 2041 elif date[0] == "-":
2039 2042 try:
2040 2043 days = int(date[1:])
2041 2044 except ValueError:
2042 2045 raise Abort(_("invalid day spec: %s") % date[1:])
2043 2046 if days < 0:
2044 2047 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2045 2048 % date[1:])
2046 2049 when = makedate()[0] - days * 3600 * 24
2047 2050 return lambda x: x >= when
2048 2051 elif " to " in date:
2049 2052 a, b = date.split(" to ")
2050 2053 start, stop = lower(a), upper(b)
2051 2054 return lambda x: x >= start and x <= stop
2052 2055 else:
2053 2056 start, stop = lower(date), upper(date)
2054 2057 return lambda x: x >= start and x <= stop
2055 2058
2056 2059 def stringmatcher(pattern, casesensitive=True):
2057 2060 """
2058 2061 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2059 2062 returns the matcher name, pattern, and matcher function.
2060 2063 missing or unknown prefixes are treated as literal matches.
2061 2064
2062 2065 helper for tests:
2063 2066 >>> def test(pattern, *tests):
2064 2067 ... kind, pattern, matcher = stringmatcher(pattern)
2065 2068 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2066 2069 >>> def itest(pattern, *tests):
2067 2070 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2068 2071 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2069 2072
2070 2073 exact matching (no prefix):
2071 2074 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2072 2075 ('literal', 'abcdefg', [False, False, True])
2073 2076
2074 2077 regex matching ('re:' prefix)
2075 2078 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2076 2079 ('re', 'a.+b', [False, False, True])
2077 2080
2078 2081 force exact matches ('literal:' prefix)
2079 2082 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2080 2083 ('literal', 're:foobar', [False, True])
2081 2084
2082 2085 unknown prefixes are ignored and treated as literals
2083 2086 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2084 2087 ('literal', 'foo:bar', [False, False, True])
2085 2088
2086 2089 case insensitive regex matches
2087 2090 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2088 2091 ('re', 'A.+b', [False, False, True])
2089 2092
2090 2093 case insensitive literal matches
2091 2094 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2092 2095 ('literal', 'ABCDEFG', [False, False, True])
2093 2096 """
2094 2097 if pattern.startswith('re:'):
2095 2098 pattern = pattern[3:]
2096 2099 try:
2097 2100 flags = 0
2098 2101 if not casesensitive:
2099 2102 flags = remod.I
2100 2103 regex = remod.compile(pattern, flags)
2101 2104 except remod.error as e:
2102 2105 raise error.ParseError(_('invalid regular expression: %s')
2103 2106 % e)
2104 2107 return 're', pattern, regex.search
2105 2108 elif pattern.startswith('literal:'):
2106 2109 pattern = pattern[8:]
2107 2110
2108 2111 match = pattern.__eq__
2109 2112
2110 2113 if not casesensitive:
2111 2114 ipat = encoding.lower(pattern)
2112 2115 match = lambda s: ipat == encoding.lower(s)
2113 2116 return 'literal', pattern, match
2114 2117
2115 2118 def shortuser(user):
2116 2119 """Return a short representation of a user name or email address."""
2117 2120 f = user.find('@')
2118 2121 if f >= 0:
2119 2122 user = user[:f]
2120 2123 f = user.find('<')
2121 2124 if f >= 0:
2122 2125 user = user[f + 1:]
2123 2126 f = user.find(' ')
2124 2127 if f >= 0:
2125 2128 user = user[:f]
2126 2129 f = user.find('.')
2127 2130 if f >= 0:
2128 2131 user = user[:f]
2129 2132 return user
2130 2133
2131 2134 def emailuser(user):
2132 2135 """Return the user portion of an email address."""
2133 2136 f = user.find('@')
2134 2137 if f >= 0:
2135 2138 user = user[:f]
2136 2139 f = user.find('<')
2137 2140 if f >= 0:
2138 2141 user = user[f + 1:]
2139 2142 return user
2140 2143
2141 2144 def email(author):
2142 2145 '''get email of author.'''
2143 2146 r = author.find('>')
2144 2147 if r == -1:
2145 2148 r = None
2146 2149 return author[author.find('<') + 1:r]
2147 2150
2148 2151 def ellipsis(text, maxlength=400):
2149 2152 """Trim string to at most maxlength (default: 400) columns in display."""
2150 2153 return encoding.trim(text, maxlength, ellipsis='...')
2151 2154
2152 2155 def unitcountfn(*unittable):
2153 2156 '''return a function that renders a readable count of some quantity'''
2154 2157
2155 2158 def go(count):
2156 2159 for multiplier, divisor, format in unittable:
2157 2160 if count >= divisor * multiplier:
2158 2161 return format % (count / float(divisor))
2159 2162 return unittable[-1][2] % count
2160 2163
2161 2164 return go
2162 2165
2163 2166 def processlinerange(fromline, toline):
2164 2167 """Check that linerange <fromline>:<toline> makes sense and return a
2165 2168 0-based range.
2166 2169
2167 2170 >>> processlinerange(10, 20)
2168 2171 (9, 20)
2169 2172 >>> processlinerange(2, 1)
2170 2173 Traceback (most recent call last):
2171 2174 ...
2172 2175 ParseError: line range must be positive
2173 2176 >>> processlinerange(0, 5)
2174 2177 Traceback (most recent call last):
2175 2178 ...
2176 2179 ParseError: fromline must be strictly positive
2177 2180 """
2178 2181 if toline - fromline < 0:
2179 2182 raise error.ParseError(_("line range must be positive"))
2180 2183 if fromline < 1:
2181 2184 raise error.ParseError(_("fromline must be strictly positive"))
2182 2185 return fromline - 1, toline
2183 2186
2184 2187 bytecount = unitcountfn(
2185 2188 (100, 1 << 30, _('%.0f GB')),
2186 2189 (10, 1 << 30, _('%.1f GB')),
2187 2190 (1, 1 << 30, _('%.2f GB')),
2188 2191 (100, 1 << 20, _('%.0f MB')),
2189 2192 (10, 1 << 20, _('%.1f MB')),
2190 2193 (1, 1 << 20, _('%.2f MB')),
2191 2194 (100, 1 << 10, _('%.0f KB')),
2192 2195 (10, 1 << 10, _('%.1f KB')),
2193 2196 (1, 1 << 10, _('%.2f KB')),
2194 2197 (1, 1, _('%.0f bytes')),
2195 2198 )
2196 2199
2197 2200 def escapestr(s):
2198 2201 # call underlying function of s.encode('string_escape') directly for
2199 2202 # Python 3 compatibility
2200 2203 return codecs.escape_encode(s)[0]
2201 2204
2202 2205 def unescapestr(s):
2203 2206 return codecs.escape_decode(s)[0]
2204 2207
2205 2208 def uirepr(s):
2206 2209 # Avoid double backslash in Windows path repr()
2207 2210 return repr(s).replace('\\\\', '\\')
2208 2211
2209 2212 # delay import of textwrap
2210 2213 def MBTextWrapper(**kwargs):
2211 2214 class tw(textwrap.TextWrapper):
2212 2215 """
2213 2216 Extend TextWrapper for width-awareness.
2214 2217
2215 2218 Neither number of 'bytes' in any encoding nor 'characters' is
2216 2219 appropriate to calculate terminal columns for specified string.
2217 2220
2218 2221 Original TextWrapper implementation uses built-in 'len()' directly,
2219 2222 so overriding is needed to use width information of each characters.
2220 2223
2221 2224 In addition, characters classified into 'ambiguous' width are
2222 2225 treated as wide in East Asian area, but as narrow in other.
2223 2226
2224 2227 This requires use decision to determine width of such characters.
2225 2228 """
2226 2229 def _cutdown(self, ucstr, space_left):
2227 2230 l = 0
2228 2231 colwidth = encoding.ucolwidth
2229 2232 for i in xrange(len(ucstr)):
2230 2233 l += colwidth(ucstr[i])
2231 2234 if space_left < l:
2232 2235 return (ucstr[:i], ucstr[i:])
2233 2236 return ucstr, ''
2234 2237
2235 2238 # overriding of base class
2236 2239 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2237 2240 space_left = max(width - cur_len, 1)
2238 2241
2239 2242 if self.break_long_words:
2240 2243 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2241 2244 cur_line.append(cut)
2242 2245 reversed_chunks[-1] = res
2243 2246 elif not cur_line:
2244 2247 cur_line.append(reversed_chunks.pop())
2245 2248
2246 2249 # this overriding code is imported from TextWrapper of Python 2.6
2247 2250 # to calculate columns of string by 'encoding.ucolwidth()'
2248 2251 def _wrap_chunks(self, chunks):
2249 2252 colwidth = encoding.ucolwidth
2250 2253
2251 2254 lines = []
2252 2255 if self.width <= 0:
2253 2256 raise ValueError("invalid width %r (must be > 0)" % self.width)
2254 2257
2255 2258 # Arrange in reverse order so items can be efficiently popped
2256 2259 # from a stack of chucks.
2257 2260 chunks.reverse()
2258 2261
2259 2262 while chunks:
2260 2263
2261 2264 # Start the list of chunks that will make up the current line.
2262 2265 # cur_len is just the length of all the chunks in cur_line.
2263 2266 cur_line = []
2264 2267 cur_len = 0
2265 2268
2266 2269 # Figure out which static string will prefix this line.
2267 2270 if lines:
2268 2271 indent = self.subsequent_indent
2269 2272 else:
2270 2273 indent = self.initial_indent
2271 2274
2272 2275 # Maximum width for this line.
2273 2276 width = self.width - len(indent)
2274 2277
2275 2278 # First chunk on line is whitespace -- drop it, unless this
2276 2279 # is the very beginning of the text (i.e. no lines started yet).
2277 2280 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2278 2281 del chunks[-1]
2279 2282
2280 2283 while chunks:
2281 2284 l = colwidth(chunks[-1])
2282 2285
2283 2286 # Can at least squeeze this chunk onto the current line.
2284 2287 if cur_len + l <= width:
2285 2288 cur_line.append(chunks.pop())
2286 2289 cur_len += l
2287 2290
2288 2291 # Nope, this line is full.
2289 2292 else:
2290 2293 break
2291 2294
2292 2295 # The current line is full, and the next chunk is too big to
2293 2296 # fit on *any* line (not just this one).
2294 2297 if chunks and colwidth(chunks[-1]) > width:
2295 2298 self._handle_long_word(chunks, cur_line, cur_len, width)
2296 2299
2297 2300 # If the last chunk on this line is all whitespace, drop it.
2298 2301 if (self.drop_whitespace and
2299 2302 cur_line and cur_line[-1].strip() == ''):
2300 2303 del cur_line[-1]
2301 2304
2302 2305 # Convert current line back to a string and store it in list
2303 2306 # of all lines (return value).
2304 2307 if cur_line:
2305 2308 lines.append(indent + ''.join(cur_line))
2306 2309
2307 2310 return lines
2308 2311
2309 2312 global MBTextWrapper
2310 2313 MBTextWrapper = tw
2311 2314 return tw(**kwargs)
2312 2315
2313 2316 def wrap(line, width, initindent='', hangindent=''):
2314 2317 maxindent = max(len(hangindent), len(initindent))
2315 2318 if width <= maxindent:
2316 2319 # adjust for weird terminal size
2317 2320 width = max(78, maxindent + 1)
2318 2321 line = line.decode(pycompat.sysstr(encoding.encoding),
2319 2322 pycompat.sysstr(encoding.encodingmode))
2320 2323 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2321 2324 pycompat.sysstr(encoding.encodingmode))
2322 2325 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2323 2326 pycompat.sysstr(encoding.encodingmode))
2324 2327 wrapper = MBTextWrapper(width=width,
2325 2328 initial_indent=initindent,
2326 2329 subsequent_indent=hangindent)
2327 2330 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2328 2331
2329 2332 if (pyplatform.python_implementation() == 'CPython' and
2330 2333 sys.version_info < (3, 0)):
2331 2334 # There is an issue in CPython that some IO methods do not handle EINTR
2332 2335 # correctly. The following table shows what CPython version (and functions)
2333 2336 # are affected (buggy: has the EINTR bug, okay: otherwise):
2334 2337 #
2335 2338 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2336 2339 # --------------------------------------------------
2337 2340 # fp.__iter__ | buggy | buggy | okay
2338 2341 # fp.read* | buggy | okay [1] | okay
2339 2342 #
2340 2343 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2341 2344 #
2342 2345 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2343 2346 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2344 2347 #
2345 2348 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2346 2349 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2347 2350 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2348 2351 # fp.__iter__ but not other fp.read* methods.
2349 2352 #
2350 2353 # On modern systems like Linux, the "read" syscall cannot be interrupted
2351 2354 # when reading "fast" files like on-disk files. So the EINTR issue only
2352 2355 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2353 2356 # files approximately as "fast" files and use the fast (unsafe) code path,
2354 2357 # to minimize the performance impact.
2355 2358 if sys.version_info >= (2, 7, 4):
2356 2359 # fp.readline deals with EINTR correctly, use it as a workaround.
2357 2360 def _safeiterfile(fp):
2358 2361 return iter(fp.readline, '')
2359 2362 else:
2360 2363 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2361 2364 # note: this may block longer than necessary because of bufsize.
2362 2365 def _safeiterfile(fp, bufsize=4096):
2363 2366 fd = fp.fileno()
2364 2367 line = ''
2365 2368 while True:
2366 2369 try:
2367 2370 buf = os.read(fd, bufsize)
2368 2371 except OSError as ex:
2369 2372 # os.read only raises EINTR before any data is read
2370 2373 if ex.errno == errno.EINTR:
2371 2374 continue
2372 2375 else:
2373 2376 raise
2374 2377 line += buf
2375 2378 if '\n' in buf:
2376 2379 splitted = line.splitlines(True)
2377 2380 line = ''
2378 2381 for l in splitted:
2379 2382 if l[-1] == '\n':
2380 2383 yield l
2381 2384 else:
2382 2385 line = l
2383 2386 if not buf:
2384 2387 break
2385 2388 if line:
2386 2389 yield line
2387 2390
2388 2391 def iterfile(fp):
2389 2392 fastpath = True
2390 2393 if type(fp) is file:
2391 2394 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2392 2395 if fastpath:
2393 2396 return fp
2394 2397 else:
2395 2398 return _safeiterfile(fp)
2396 2399 else:
2397 2400 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2398 2401 def iterfile(fp):
2399 2402 return fp
2400 2403
2401 2404 def iterlines(iterator):
2402 2405 for chunk in iterator:
2403 2406 for line in chunk.splitlines():
2404 2407 yield line
2405 2408
2406 2409 def expandpath(path):
2407 2410 return os.path.expanduser(os.path.expandvars(path))
2408 2411
2409 2412 def hgcmd():
2410 2413 """Return the command used to execute current hg
2411 2414
2412 2415 This is different from hgexecutable() because on Windows we want
2413 2416 to avoid things opening new shell windows like batch files, so we
2414 2417 get either the python call or current executable.
2415 2418 """
2416 2419 if mainfrozen():
2417 2420 if getattr(sys, 'frozen', None) == 'macosx_app':
2418 2421 # Env variable set by py2app
2419 2422 return [encoding.environ['EXECUTABLEPATH']]
2420 2423 else:
2421 2424 return [pycompat.sysexecutable]
2422 2425 return gethgcmd()
2423 2426
2424 2427 def rundetached(args, condfn):
2425 2428 """Execute the argument list in a detached process.
2426 2429
2427 2430 condfn is a callable which is called repeatedly and should return
2428 2431 True once the child process is known to have started successfully.
2429 2432 At this point, the child process PID is returned. If the child
2430 2433 process fails to start or finishes before condfn() evaluates to
2431 2434 True, return -1.
2432 2435 """
2433 2436 # Windows case is easier because the child process is either
2434 2437 # successfully starting and validating the condition or exiting
2435 2438 # on failure. We just poll on its PID. On Unix, if the child
2436 2439 # process fails to start, it will be left in a zombie state until
2437 2440 # the parent wait on it, which we cannot do since we expect a long
2438 2441 # running process on success. Instead we listen for SIGCHLD telling
2439 2442 # us our child process terminated.
2440 2443 terminated = set()
2441 2444 def handler(signum, frame):
2442 2445 terminated.add(os.wait())
2443 2446 prevhandler = None
2444 2447 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2445 2448 if SIGCHLD is not None:
2446 2449 prevhandler = signal.signal(SIGCHLD, handler)
2447 2450 try:
2448 2451 pid = spawndetached(args)
2449 2452 while not condfn():
2450 2453 if ((pid in terminated or not testpid(pid))
2451 2454 and not condfn()):
2452 2455 return -1
2453 2456 time.sleep(0.1)
2454 2457 return pid
2455 2458 finally:
2456 2459 if prevhandler is not None:
2457 2460 signal.signal(signal.SIGCHLD, prevhandler)
2458 2461
2459 2462 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2460 2463 """Return the result of interpolating items in the mapping into string s.
2461 2464
2462 2465 prefix is a single character string, or a two character string with
2463 2466 a backslash as the first character if the prefix needs to be escaped in
2464 2467 a regular expression.
2465 2468
2466 2469 fn is an optional function that will be applied to the replacement text
2467 2470 just before replacement.
2468 2471
2469 2472 escape_prefix is an optional flag that allows using doubled prefix for
2470 2473 its escaping.
2471 2474 """
2472 2475 fn = fn or (lambda s: s)
2473 2476 patterns = '|'.join(mapping.keys())
2474 2477 if escape_prefix:
2475 2478 patterns += '|' + prefix
2476 2479 if len(prefix) > 1:
2477 2480 prefix_char = prefix[1:]
2478 2481 else:
2479 2482 prefix_char = prefix
2480 2483 mapping[prefix_char] = prefix_char
2481 2484 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2482 2485 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2483 2486
2484 2487 def getport(port):
2485 2488 """Return the port for a given network service.
2486 2489
2487 2490 If port is an integer, it's returned as is. If it's a string, it's
2488 2491 looked up using socket.getservbyname(). If there's no matching
2489 2492 service, error.Abort is raised.
2490 2493 """
2491 2494 try:
2492 2495 return int(port)
2493 2496 except ValueError:
2494 2497 pass
2495 2498
2496 2499 try:
2497 2500 return socket.getservbyname(port)
2498 2501 except socket.error:
2499 2502 raise Abort(_("no port number associated with service '%s'") % port)
2500 2503
2501 2504 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2502 2505 '0': False, 'no': False, 'false': False, 'off': False,
2503 2506 'never': False}
2504 2507
2505 2508 def parsebool(s):
2506 2509 """Parse s into a boolean.
2507 2510
2508 2511 If s is not a valid boolean, returns None.
2509 2512 """
2510 2513 return _booleans.get(s.lower(), None)
2511 2514
2512 2515 _hextochr = dict((a + b, chr(int(a + b, 16)))
2513 2516 for a in string.hexdigits for b in string.hexdigits)
2514 2517
2515 2518 class url(object):
2516 2519 r"""Reliable URL parser.
2517 2520
2518 2521 This parses URLs and provides attributes for the following
2519 2522 components:
2520 2523
2521 2524 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2522 2525
2523 2526 Missing components are set to None. The only exception is
2524 2527 fragment, which is set to '' if present but empty.
2525 2528
2526 2529 If parsefragment is False, fragment is included in query. If
2527 2530 parsequery is False, query is included in path. If both are
2528 2531 False, both fragment and query are included in path.
2529 2532
2530 2533 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2531 2534
2532 2535 Note that for backward compatibility reasons, bundle URLs do not
2533 2536 take host names. That means 'bundle://../' has a path of '../'.
2534 2537
2535 2538 Examples:
2536 2539
2537 2540 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2538 2541 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2539 2542 >>> url('ssh://[::1]:2200//home/joe/repo')
2540 2543 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2541 2544 >>> url('file:///home/joe/repo')
2542 2545 <url scheme: 'file', path: '/home/joe/repo'>
2543 2546 >>> url('file:///c:/temp/foo/')
2544 2547 <url scheme: 'file', path: 'c:/temp/foo/'>
2545 2548 >>> url('bundle:foo')
2546 2549 <url scheme: 'bundle', path: 'foo'>
2547 2550 >>> url('bundle://../foo')
2548 2551 <url scheme: 'bundle', path: '../foo'>
2549 2552 >>> url(r'c:\foo\bar')
2550 2553 <url path: 'c:\\foo\\bar'>
2551 2554 >>> url(r'\\blah\blah\blah')
2552 2555 <url path: '\\\\blah\\blah\\blah'>
2553 2556 >>> url(r'\\blah\blah\blah#baz')
2554 2557 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2555 2558 >>> url(r'file:///C:\users\me')
2556 2559 <url scheme: 'file', path: 'C:\\users\\me'>
2557 2560
2558 2561 Authentication credentials:
2559 2562
2560 2563 >>> url('ssh://joe:xyz@x/repo')
2561 2564 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2562 2565 >>> url('ssh://joe@x/repo')
2563 2566 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2564 2567
2565 2568 Query strings and fragments:
2566 2569
2567 2570 >>> url('http://host/a?b#c')
2568 2571 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2569 2572 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2570 2573 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2571 2574
2572 2575 Empty path:
2573 2576
2574 2577 >>> url('')
2575 2578 <url path: ''>
2576 2579 >>> url('#a')
2577 2580 <url path: '', fragment: 'a'>
2578 2581 >>> url('http://host/')
2579 2582 <url scheme: 'http', host: 'host', path: ''>
2580 2583 >>> url('http://host/#a')
2581 2584 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2582 2585
2583 2586 Only scheme:
2584 2587
2585 2588 >>> url('http:')
2586 2589 <url scheme: 'http'>
2587 2590 """
2588 2591
2589 2592 _safechars = "!~*'()+"
2590 2593 _safepchars = "/!~*'()+:\\"
2591 2594 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2592 2595
2593 2596 def __init__(self, path, parsequery=True, parsefragment=True):
2594 2597 # We slowly chomp away at path until we have only the path left
2595 2598 self.scheme = self.user = self.passwd = self.host = None
2596 2599 self.port = self.path = self.query = self.fragment = None
2597 2600 self._localpath = True
2598 2601 self._hostport = ''
2599 2602 self._origpath = path
2600 2603
2601 2604 if parsefragment and '#' in path:
2602 2605 path, self.fragment = path.split('#', 1)
2603 2606
2604 2607 # special case for Windows drive letters and UNC paths
2605 2608 if hasdriveletter(path) or path.startswith('\\\\'):
2606 2609 self.path = path
2607 2610 return
2608 2611
2609 2612 # For compatibility reasons, we can't handle bundle paths as
2610 2613 # normal URLS
2611 2614 if path.startswith('bundle:'):
2612 2615 self.scheme = 'bundle'
2613 2616 path = path[7:]
2614 2617 if path.startswith('//'):
2615 2618 path = path[2:]
2616 2619 self.path = path
2617 2620 return
2618 2621
2619 2622 if self._matchscheme(path):
2620 2623 parts = path.split(':', 1)
2621 2624 if parts[0]:
2622 2625 self.scheme, path = parts
2623 2626 self._localpath = False
2624 2627
2625 2628 if not path:
2626 2629 path = None
2627 2630 if self._localpath:
2628 2631 self.path = ''
2629 2632 return
2630 2633 else:
2631 2634 if self._localpath:
2632 2635 self.path = path
2633 2636 return
2634 2637
2635 2638 if parsequery and '?' in path:
2636 2639 path, self.query = path.split('?', 1)
2637 2640 if not path:
2638 2641 path = None
2639 2642 if not self.query:
2640 2643 self.query = None
2641 2644
2642 2645 # // is required to specify a host/authority
2643 2646 if path and path.startswith('//'):
2644 2647 parts = path[2:].split('/', 1)
2645 2648 if len(parts) > 1:
2646 2649 self.host, path = parts
2647 2650 else:
2648 2651 self.host = parts[0]
2649 2652 path = None
2650 2653 if not self.host:
2651 2654 self.host = None
2652 2655 # path of file:///d is /d
2653 2656 # path of file:///d:/ is d:/, not /d:/
2654 2657 if path and not hasdriveletter(path):
2655 2658 path = '/' + path
2656 2659
2657 2660 if self.host and '@' in self.host:
2658 2661 self.user, self.host = self.host.rsplit('@', 1)
2659 2662 if ':' in self.user:
2660 2663 self.user, self.passwd = self.user.split(':', 1)
2661 2664 if not self.host:
2662 2665 self.host = None
2663 2666
2664 2667 # Don't split on colons in IPv6 addresses without ports
2665 2668 if (self.host and ':' in self.host and
2666 2669 not (self.host.startswith('[') and self.host.endswith(']'))):
2667 2670 self._hostport = self.host
2668 2671 self.host, self.port = self.host.rsplit(':', 1)
2669 2672 if not self.host:
2670 2673 self.host = None
2671 2674
2672 2675 if (self.host and self.scheme == 'file' and
2673 2676 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2674 2677 raise Abort(_('file:// URLs can only refer to localhost'))
2675 2678
2676 2679 self.path = path
2677 2680
2678 2681 # leave the query string escaped
2679 2682 for a in ('user', 'passwd', 'host', 'port',
2680 2683 'path', 'fragment'):
2681 2684 v = getattr(self, a)
2682 2685 if v is not None:
2683 2686 setattr(self, a, urlreq.unquote(v))
2684 2687
2685 2688 def __repr__(self):
2686 2689 attrs = []
2687 2690 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2688 2691 'query', 'fragment'):
2689 2692 v = getattr(self, a)
2690 2693 if v is not None:
2691 2694 attrs.append('%s: %r' % (a, v))
2692 2695 return '<url %s>' % ', '.join(attrs)
2693 2696
2694 2697 def __str__(self):
2695 2698 r"""Join the URL's components back into a URL string.
2696 2699
2697 2700 Examples:
2698 2701
2699 2702 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2700 2703 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2701 2704 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2702 2705 'http://user:pw@host:80/?foo=bar&baz=42'
2703 2706 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2704 2707 'http://user:pw@host:80/?foo=bar%3dbaz'
2705 2708 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2706 2709 'ssh://user:pw@[::1]:2200//home/joe#'
2707 2710 >>> str(url('http://localhost:80//'))
2708 2711 'http://localhost:80//'
2709 2712 >>> str(url('http://localhost:80/'))
2710 2713 'http://localhost:80/'
2711 2714 >>> str(url('http://localhost:80'))
2712 2715 'http://localhost:80/'
2713 2716 >>> str(url('bundle:foo'))
2714 2717 'bundle:foo'
2715 2718 >>> str(url('bundle://../foo'))
2716 2719 'bundle:../foo'
2717 2720 >>> str(url('path'))
2718 2721 'path'
2719 2722 >>> str(url('file:///tmp/foo/bar'))
2720 2723 'file:///tmp/foo/bar'
2721 2724 >>> str(url('file:///c:/tmp/foo/bar'))
2722 2725 'file:///c:/tmp/foo/bar'
2723 2726 >>> print url(r'bundle:foo\bar')
2724 2727 bundle:foo\bar
2725 2728 >>> print url(r'file:///D:\data\hg')
2726 2729 file:///D:\data\hg
2727 2730 """
2728 2731 return encoding.strfromlocal(self.__bytes__())
2729 2732
2730 2733 def __bytes__(self):
2731 2734 if self._localpath:
2732 2735 s = self.path
2733 2736 if self.scheme == 'bundle':
2734 2737 s = 'bundle:' + s
2735 2738 if self.fragment:
2736 2739 s += '#' + self.fragment
2737 2740 return s
2738 2741
2739 2742 s = self.scheme + ':'
2740 2743 if self.user or self.passwd or self.host:
2741 2744 s += '//'
2742 2745 elif self.scheme and (not self.path or self.path.startswith('/')
2743 2746 or hasdriveletter(self.path)):
2744 2747 s += '//'
2745 2748 if hasdriveletter(self.path):
2746 2749 s += '/'
2747 2750 if self.user:
2748 2751 s += urlreq.quote(self.user, safe=self._safechars)
2749 2752 if self.passwd:
2750 2753 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2751 2754 if self.user or self.passwd:
2752 2755 s += '@'
2753 2756 if self.host:
2754 2757 if not (self.host.startswith('[') and self.host.endswith(']')):
2755 2758 s += urlreq.quote(self.host)
2756 2759 else:
2757 2760 s += self.host
2758 2761 if self.port:
2759 2762 s += ':' + urlreq.quote(self.port)
2760 2763 if self.host:
2761 2764 s += '/'
2762 2765 if self.path:
2763 2766 # TODO: similar to the query string, we should not unescape the
2764 2767 # path when we store it, the path might contain '%2f' = '/',
2765 2768 # which we should *not* escape.
2766 2769 s += urlreq.quote(self.path, safe=self._safepchars)
2767 2770 if self.query:
2768 2771 # we store the query in escaped form.
2769 2772 s += '?' + self.query
2770 2773 if self.fragment is not None:
2771 2774 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2772 2775 return s
2773 2776
2774 2777 def authinfo(self):
2775 2778 user, passwd = self.user, self.passwd
2776 2779 try:
2777 2780 self.user, self.passwd = None, None
2778 2781 s = str(self)
2779 2782 finally:
2780 2783 self.user, self.passwd = user, passwd
2781 2784 if not self.user:
2782 2785 return (s, None)
2783 2786 # authinfo[1] is passed to urllib2 password manager, and its
2784 2787 # URIs must not contain credentials. The host is passed in the
2785 2788 # URIs list because Python < 2.4.3 uses only that to search for
2786 2789 # a password.
2787 2790 return (s, (None, (s, self.host),
2788 2791 self.user, self.passwd or ''))
2789 2792
2790 2793 def isabs(self):
2791 2794 if self.scheme and self.scheme != 'file':
2792 2795 return True # remote URL
2793 2796 if hasdriveletter(self.path):
2794 2797 return True # absolute for our purposes - can't be joined()
2795 2798 if self.path.startswith(r'\\'):
2796 2799 return True # Windows UNC path
2797 2800 if self.path.startswith('/'):
2798 2801 return True # POSIX-style
2799 2802 return False
2800 2803
2801 2804 def localpath(self):
2802 2805 if self.scheme == 'file' or self.scheme == 'bundle':
2803 2806 path = self.path or '/'
2804 2807 # For Windows, we need to promote hosts containing drive
2805 2808 # letters to paths with drive letters.
2806 2809 if hasdriveletter(self._hostport):
2807 2810 path = self._hostport + '/' + self.path
2808 2811 elif (self.host is not None and self.path
2809 2812 and not hasdriveletter(path)):
2810 2813 path = '/' + path
2811 2814 return path
2812 2815 return self._origpath
2813 2816
2814 2817 def islocal(self):
2815 2818 '''whether localpath will return something that posixfile can open'''
2816 2819 return (not self.scheme or self.scheme == 'file'
2817 2820 or self.scheme == 'bundle')
2818 2821
2819 2822 def hasscheme(path):
2820 2823 return bool(url(path).scheme)
2821 2824
2822 2825 def hasdriveletter(path):
2823 2826 return path and path[1:2] == ':' and path[0:1].isalpha()
2824 2827
2825 2828 def urllocalpath(path):
2826 2829 return url(path, parsequery=False, parsefragment=False).localpath()
2827 2830
2828 2831 def hidepassword(u):
2829 2832 '''hide user credential in a url string'''
2830 2833 u = url(u)
2831 2834 if u.passwd:
2832 2835 u.passwd = '***'
2833 2836 return str(u)
2834 2837
2835 2838 def removeauth(u):
2836 2839 '''remove all authentication information from a url string'''
2837 2840 u = url(u)
2838 2841 u.user = u.passwd = None
2839 2842 return str(u)
2840 2843
2841 2844 timecount = unitcountfn(
2842 2845 (1, 1e3, _('%.0f s')),
2843 2846 (100, 1, _('%.1f s')),
2844 2847 (10, 1, _('%.2f s')),
2845 2848 (1, 1, _('%.3f s')),
2846 2849 (100, 0.001, _('%.1f ms')),
2847 2850 (10, 0.001, _('%.2f ms')),
2848 2851 (1, 0.001, _('%.3f ms')),
2849 2852 (100, 0.000001, _('%.1f us')),
2850 2853 (10, 0.000001, _('%.2f us')),
2851 2854 (1, 0.000001, _('%.3f us')),
2852 2855 (100, 0.000000001, _('%.1f ns')),
2853 2856 (10, 0.000000001, _('%.2f ns')),
2854 2857 (1, 0.000000001, _('%.3f ns')),
2855 2858 )
2856 2859
2857 2860 _timenesting = [0]
2858 2861
2859 2862 def timed(func):
2860 2863 '''Report the execution time of a function call to stderr.
2861 2864
2862 2865 During development, use as a decorator when you need to measure
2863 2866 the cost of a function, e.g. as follows:
2864 2867
2865 2868 @util.timed
2866 2869 def foo(a, b, c):
2867 2870 pass
2868 2871 '''
2869 2872
2870 2873 def wrapper(*args, **kwargs):
2871 2874 start = timer()
2872 2875 indent = 2
2873 2876 _timenesting[0] += indent
2874 2877 try:
2875 2878 return func(*args, **kwargs)
2876 2879 finally:
2877 2880 elapsed = timer() - start
2878 2881 _timenesting[0] -= indent
2879 2882 stderr.write('%s%s: %s\n' %
2880 2883 (' ' * _timenesting[0], func.__name__,
2881 2884 timecount(elapsed)))
2882 2885 return wrapper
2883 2886
2884 2887 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2885 2888 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2886 2889
2887 2890 def sizetoint(s):
2888 2891 '''Convert a space specifier to a byte count.
2889 2892
2890 2893 >>> sizetoint('30')
2891 2894 30
2892 2895 >>> sizetoint('2.2kb')
2893 2896 2252
2894 2897 >>> sizetoint('6M')
2895 2898 6291456
2896 2899 '''
2897 2900 t = s.strip().lower()
2898 2901 try:
2899 2902 for k, u in _sizeunits:
2900 2903 if t.endswith(k):
2901 2904 return int(float(t[:-len(k)]) * u)
2902 2905 return int(t)
2903 2906 except ValueError:
2904 2907 raise error.ParseError(_("couldn't parse size: %s") % s)
2905 2908
2906 2909 class hooks(object):
2907 2910 '''A collection of hook functions that can be used to extend a
2908 2911 function's behavior. Hooks are called in lexicographic order,
2909 2912 based on the names of their sources.'''
2910 2913
2911 2914 def __init__(self):
2912 2915 self._hooks = []
2913 2916
2914 2917 def add(self, source, hook):
2915 2918 self._hooks.append((source, hook))
2916 2919
2917 2920 def __call__(self, *args):
2918 2921 self._hooks.sort(key=lambda x: x[0])
2919 2922 results = []
2920 2923 for source, hook in self._hooks:
2921 2924 results.append(hook(*args))
2922 2925 return results
2923 2926
2924 2927 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2925 2928 '''Yields lines for a nicely formatted stacktrace.
2926 2929 Skips the 'skip' last entries, then return the last 'depth' entries.
2927 2930 Each file+linenumber is formatted according to fileline.
2928 2931 Each line is formatted according to line.
2929 2932 If line is None, it yields:
2930 2933 length of longest filepath+line number,
2931 2934 filepath+linenumber,
2932 2935 function
2933 2936
2934 2937 Not be used in production code but very convenient while developing.
2935 2938 '''
2936 2939 entries = [(fileline % (fn, ln), func)
2937 2940 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2938 2941 ][-depth:]
2939 2942 if entries:
2940 2943 fnmax = max(len(entry[0]) for entry in entries)
2941 2944 for fnln, func in entries:
2942 2945 if line is None:
2943 2946 yield (fnmax, fnln, func)
2944 2947 else:
2945 2948 yield line % (fnmax, fnln, func)
2946 2949
2947 2950 def debugstacktrace(msg='stacktrace', skip=0,
2948 2951 f=stderr, otherf=stdout, depth=0):
2949 2952 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2950 2953 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2951 2954 By default it will flush stdout first.
2952 2955 It can be used everywhere and intentionally does not require an ui object.
2953 2956 Not be used in production code but very convenient while developing.
2954 2957 '''
2955 2958 if otherf:
2956 2959 otherf.flush()
2957 2960 f.write('%s at:\n' % msg.rstrip())
2958 2961 for line in getstackframes(skip + 1, depth=depth):
2959 2962 f.write(line)
2960 2963 f.flush()
2961 2964
2962 2965 class dirs(object):
2963 2966 '''a multiset of directory names from a dirstate or manifest'''
2964 2967
2965 2968 def __init__(self, map, skip=None):
2966 2969 self._dirs = {}
2967 2970 addpath = self.addpath
2968 2971 if safehasattr(map, 'iteritems') and skip is not None:
2969 2972 for f, s in map.iteritems():
2970 2973 if s[0] != skip:
2971 2974 addpath(f)
2972 2975 else:
2973 2976 for f in map:
2974 2977 addpath(f)
2975 2978
2976 2979 def addpath(self, path):
2977 2980 dirs = self._dirs
2978 2981 for base in finddirs(path):
2979 2982 if base in dirs:
2980 2983 dirs[base] += 1
2981 2984 return
2982 2985 dirs[base] = 1
2983 2986
2984 2987 def delpath(self, path):
2985 2988 dirs = self._dirs
2986 2989 for base in finddirs(path):
2987 2990 if dirs[base] > 1:
2988 2991 dirs[base] -= 1
2989 2992 return
2990 2993 del dirs[base]
2991 2994
2992 2995 def __iter__(self):
2993 2996 return iter(self._dirs)
2994 2997
2995 2998 def __contains__(self, d):
2996 2999 return d in self._dirs
2997 3000
2998 3001 if safehasattr(parsers, 'dirs'):
2999 3002 dirs = parsers.dirs
3000 3003
3001 3004 def finddirs(path):
3002 3005 pos = path.rfind('/')
3003 3006 while pos != -1:
3004 3007 yield path[:pos]
3005 3008 pos = path.rfind('/', 0, pos)
3006 3009
3007 3010 class ctxmanager(object):
3008 3011 '''A context manager for use in 'with' blocks to allow multiple
3009 3012 contexts to be entered at once. This is both safer and more
3010 3013 flexible than contextlib.nested.
3011 3014
3012 3015 Once Mercurial supports Python 2.7+, this will become mostly
3013 3016 unnecessary.
3014 3017 '''
3015 3018
3016 3019 def __init__(self, *args):
3017 3020 '''Accepts a list of no-argument functions that return context
3018 3021 managers. These will be invoked at __call__ time.'''
3019 3022 self._pending = args
3020 3023 self._atexit = []
3021 3024
3022 3025 def __enter__(self):
3023 3026 return self
3024 3027
3025 3028 def enter(self):
3026 3029 '''Create and enter context managers in the order in which they were
3027 3030 passed to the constructor.'''
3028 3031 values = []
3029 3032 for func in self._pending:
3030 3033 obj = func()
3031 3034 values.append(obj.__enter__())
3032 3035 self._atexit.append(obj.__exit__)
3033 3036 del self._pending
3034 3037 return values
3035 3038
3036 3039 def atexit(self, func, *args, **kwargs):
3037 3040 '''Add a function to call when this context manager exits. The
3038 3041 ordering of multiple atexit calls is unspecified, save that
3039 3042 they will happen before any __exit__ functions.'''
3040 3043 def wrapper(exc_type, exc_val, exc_tb):
3041 3044 func(*args, **kwargs)
3042 3045 self._atexit.append(wrapper)
3043 3046 return func
3044 3047
3045 3048 def __exit__(self, exc_type, exc_val, exc_tb):
3046 3049 '''Context managers are exited in the reverse order from which
3047 3050 they were created.'''
3048 3051 received = exc_type is not None
3049 3052 suppressed = False
3050 3053 pending = None
3051 3054 self._atexit.reverse()
3052 3055 for exitfunc in self._atexit:
3053 3056 try:
3054 3057 if exitfunc(exc_type, exc_val, exc_tb):
3055 3058 suppressed = True
3056 3059 exc_type = None
3057 3060 exc_val = None
3058 3061 exc_tb = None
3059 3062 except BaseException:
3060 3063 pending = sys.exc_info()
3061 3064 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3062 3065 del self._atexit
3063 3066 if pending:
3064 3067 raise exc_val
3065 3068 return received and suppressed
3066 3069
3067 3070 # compression code
3068 3071
3069 3072 SERVERROLE = 'server'
3070 3073 CLIENTROLE = 'client'
3071 3074
3072 3075 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3073 3076 (u'name', u'serverpriority',
3074 3077 u'clientpriority'))
3075 3078
3076 3079 class compressormanager(object):
3077 3080 """Holds registrations of various compression engines.
3078 3081
3079 3082 This class essentially abstracts the differences between compression
3080 3083 engines to allow new compression formats to be added easily, possibly from
3081 3084 extensions.
3082 3085
3083 3086 Compressors are registered against the global instance by calling its
3084 3087 ``register()`` method.
3085 3088 """
3086 3089 def __init__(self):
3087 3090 self._engines = {}
3088 3091 # Bundle spec human name to engine name.
3089 3092 self._bundlenames = {}
3090 3093 # Internal bundle identifier to engine name.
3091 3094 self._bundletypes = {}
3092 3095 # Revlog header to engine name.
3093 3096 self._revlogheaders = {}
3094 3097 # Wire proto identifier to engine name.
3095 3098 self._wiretypes = {}
3096 3099
3097 3100 def __getitem__(self, key):
3098 3101 return self._engines[key]
3099 3102
3100 3103 def __contains__(self, key):
3101 3104 return key in self._engines
3102 3105
3103 3106 def __iter__(self):
3104 3107 return iter(self._engines.keys())
3105 3108
3106 3109 def register(self, engine):
3107 3110 """Register a compression engine with the manager.
3108 3111
3109 3112 The argument must be a ``compressionengine`` instance.
3110 3113 """
3111 3114 if not isinstance(engine, compressionengine):
3112 3115 raise ValueError(_('argument must be a compressionengine'))
3113 3116
3114 3117 name = engine.name()
3115 3118
3116 3119 if name in self._engines:
3117 3120 raise error.Abort(_('compression engine %s already registered') %
3118 3121 name)
3119 3122
3120 3123 bundleinfo = engine.bundletype()
3121 3124 if bundleinfo:
3122 3125 bundlename, bundletype = bundleinfo
3123 3126
3124 3127 if bundlename in self._bundlenames:
3125 3128 raise error.Abort(_('bundle name %s already registered') %
3126 3129 bundlename)
3127 3130 if bundletype in self._bundletypes:
3128 3131 raise error.Abort(_('bundle type %s already registered by %s') %
3129 3132 (bundletype, self._bundletypes[bundletype]))
3130 3133
3131 3134 # No external facing name declared.
3132 3135 if bundlename:
3133 3136 self._bundlenames[bundlename] = name
3134 3137
3135 3138 self._bundletypes[bundletype] = name
3136 3139
3137 3140 wiresupport = engine.wireprotosupport()
3138 3141 if wiresupport:
3139 3142 wiretype = wiresupport.name
3140 3143 if wiretype in self._wiretypes:
3141 3144 raise error.Abort(_('wire protocol compression %s already '
3142 3145 'registered by %s') %
3143 3146 (wiretype, self._wiretypes[wiretype]))
3144 3147
3145 3148 self._wiretypes[wiretype] = name
3146 3149
3147 3150 revlogheader = engine.revlogheader()
3148 3151 if revlogheader and revlogheader in self._revlogheaders:
3149 3152 raise error.Abort(_('revlog header %s already registered by %s') %
3150 3153 (revlogheader, self._revlogheaders[revlogheader]))
3151 3154
3152 3155 if revlogheader:
3153 3156 self._revlogheaders[revlogheader] = name
3154 3157
3155 3158 self._engines[name] = engine
3156 3159
3157 3160 @property
3158 3161 def supportedbundlenames(self):
3159 3162 return set(self._bundlenames.keys())
3160 3163
3161 3164 @property
3162 3165 def supportedbundletypes(self):
3163 3166 return set(self._bundletypes.keys())
3164 3167
3165 3168 def forbundlename(self, bundlename):
3166 3169 """Obtain a compression engine registered to a bundle name.
3167 3170
3168 3171 Will raise KeyError if the bundle type isn't registered.
3169 3172
3170 3173 Will abort if the engine is known but not available.
3171 3174 """
3172 3175 engine = self._engines[self._bundlenames[bundlename]]
3173 3176 if not engine.available():
3174 3177 raise error.Abort(_('compression engine %s could not be loaded') %
3175 3178 engine.name())
3176 3179 return engine
3177 3180
3178 3181 def forbundletype(self, bundletype):
3179 3182 """Obtain a compression engine registered to a bundle type.
3180 3183
3181 3184 Will raise KeyError if the bundle type isn't registered.
3182 3185
3183 3186 Will abort if the engine is known but not available.
3184 3187 """
3185 3188 engine = self._engines[self._bundletypes[bundletype]]
3186 3189 if not engine.available():
3187 3190 raise error.Abort(_('compression engine %s could not be loaded') %
3188 3191 engine.name())
3189 3192 return engine
3190 3193
3191 3194 def supportedwireengines(self, role, onlyavailable=True):
3192 3195 """Obtain compression engines that support the wire protocol.
3193 3196
3194 3197 Returns a list of engines in prioritized order, most desired first.
3195 3198
3196 3199 If ``onlyavailable`` is set, filter out engines that can't be
3197 3200 loaded.
3198 3201 """
3199 3202 assert role in (SERVERROLE, CLIENTROLE)
3200 3203
3201 3204 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3202 3205
3203 3206 engines = [self._engines[e] for e in self._wiretypes.values()]
3204 3207 if onlyavailable:
3205 3208 engines = [e for e in engines if e.available()]
3206 3209
3207 3210 def getkey(e):
3208 3211 # Sort first by priority, highest first. In case of tie, sort
3209 3212 # alphabetically. This is arbitrary, but ensures output is
3210 3213 # stable.
3211 3214 w = e.wireprotosupport()
3212 3215 return -1 * getattr(w, attr), w.name
3213 3216
3214 3217 return list(sorted(engines, key=getkey))
3215 3218
3216 3219 def forwiretype(self, wiretype):
3217 3220 engine = self._engines[self._wiretypes[wiretype]]
3218 3221 if not engine.available():
3219 3222 raise error.Abort(_('compression engine %s could not be loaded') %
3220 3223 engine.name())
3221 3224 return engine
3222 3225
3223 3226 def forrevlogheader(self, header):
3224 3227 """Obtain a compression engine registered to a revlog header.
3225 3228
3226 3229 Will raise KeyError if the revlog header value isn't registered.
3227 3230 """
3228 3231 return self._engines[self._revlogheaders[header]]
3229 3232
3230 3233 compengines = compressormanager()
3231 3234
3232 3235 class compressionengine(object):
3233 3236 """Base class for compression engines.
3234 3237
3235 3238 Compression engines must implement the interface defined by this class.
3236 3239 """
3237 3240 def name(self):
3238 3241 """Returns the name of the compression engine.
3239 3242
3240 3243 This is the key the engine is registered under.
3241 3244
3242 3245 This method must be implemented.
3243 3246 """
3244 3247 raise NotImplementedError()
3245 3248
3246 3249 def available(self):
3247 3250 """Whether the compression engine is available.
3248 3251
3249 3252 The intent of this method is to allow optional compression engines
3250 3253 that may not be available in all installations (such as engines relying
3251 3254 on C extensions that may not be present).
3252 3255 """
3253 3256 return True
3254 3257
3255 3258 def bundletype(self):
3256 3259 """Describes bundle identifiers for this engine.
3257 3260
3258 3261 If this compression engine isn't supported for bundles, returns None.
3259 3262
3260 3263 If this engine can be used for bundles, returns a 2-tuple of strings of
3261 3264 the user-facing "bundle spec" compression name and an internal
3262 3265 identifier used to denote the compression format within bundles. To
3263 3266 exclude the name from external usage, set the first element to ``None``.
3264 3267
3265 3268 If bundle compression is supported, the class must also implement
3266 3269 ``compressstream`` and `decompressorreader``.
3267 3270 """
3268 3271 return None
3269 3272
3270 3273 def wireprotosupport(self):
3271 3274 """Declare support for this compression format on the wire protocol.
3272 3275
3273 3276 If this compression engine isn't supported for compressing wire
3274 3277 protocol payloads, returns None.
3275 3278
3276 3279 Otherwise, returns ``compenginewireprotosupport`` with the following
3277 3280 fields:
3278 3281
3279 3282 * String format identifier
3280 3283 * Integer priority for the server
3281 3284 * Integer priority for the client
3282 3285
3283 3286 The integer priorities are used to order the advertisement of format
3284 3287 support by server and client. The highest integer is advertised
3285 3288 first. Integers with non-positive values aren't advertised.
3286 3289
3287 3290 The priority values are somewhat arbitrary and only used for default
3288 3291 ordering. The relative order can be changed via config options.
3289 3292
3290 3293 If wire protocol compression is supported, the class must also implement
3291 3294 ``compressstream`` and ``decompressorreader``.
3292 3295 """
3293 3296 return None
3294 3297
3295 3298 def revlogheader(self):
3296 3299 """Header added to revlog chunks that identifies this engine.
3297 3300
3298 3301 If this engine can be used to compress revlogs, this method should
3299 3302 return the bytes used to identify chunks compressed with this engine.
3300 3303 Else, the method should return ``None`` to indicate it does not
3301 3304 participate in revlog compression.
3302 3305 """
3303 3306 return None
3304 3307
3305 3308 def compressstream(self, it, opts=None):
3306 3309 """Compress an iterator of chunks.
3307 3310
3308 3311 The method receives an iterator (ideally a generator) of chunks of
3309 3312 bytes to be compressed. It returns an iterator (ideally a generator)
3310 3313 of bytes of chunks representing the compressed output.
3311 3314
3312 3315 Optionally accepts an argument defining how to perform compression.
3313 3316 Each engine treats this argument differently.
3314 3317 """
3315 3318 raise NotImplementedError()
3316 3319
3317 3320 def decompressorreader(self, fh):
3318 3321 """Perform decompression on a file object.
3319 3322
3320 3323 Argument is an object with a ``read(size)`` method that returns
3321 3324 compressed data. Return value is an object with a ``read(size)`` that
3322 3325 returns uncompressed data.
3323 3326 """
3324 3327 raise NotImplementedError()
3325 3328
3326 3329 def revlogcompressor(self, opts=None):
3327 3330 """Obtain an object that can be used to compress revlog entries.
3328 3331
3329 3332 The object has a ``compress(data)`` method that compresses binary
3330 3333 data. This method returns compressed binary data or ``None`` if
3331 3334 the data could not be compressed (too small, not compressible, etc).
3332 3335 The returned data should have a header uniquely identifying this
3333 3336 compression format so decompression can be routed to this engine.
3334 3337 This header should be identified by the ``revlogheader()`` return
3335 3338 value.
3336 3339
3337 3340 The object has a ``decompress(data)`` method that decompresses
3338 3341 data. The method will only be called if ``data`` begins with
3339 3342 ``revlogheader()``. The method should return the raw, uncompressed
3340 3343 data or raise a ``RevlogError``.
3341 3344
3342 3345 The object is reusable but is not thread safe.
3343 3346 """
3344 3347 raise NotImplementedError()
3345 3348
3346 3349 class _zlibengine(compressionengine):
3347 3350 def name(self):
3348 3351 return 'zlib'
3349 3352
3350 3353 def bundletype(self):
3351 3354 return 'gzip', 'GZ'
3352 3355
3353 3356 def wireprotosupport(self):
3354 3357 return compewireprotosupport('zlib', 20, 20)
3355 3358
3356 3359 def revlogheader(self):
3357 3360 return 'x'
3358 3361
3359 3362 def compressstream(self, it, opts=None):
3360 3363 opts = opts or {}
3361 3364
3362 3365 z = zlib.compressobj(opts.get('level', -1))
3363 3366 for chunk in it:
3364 3367 data = z.compress(chunk)
3365 3368 # Not all calls to compress emit data. It is cheaper to inspect
3366 3369 # here than to feed empty chunks through generator.
3367 3370 if data:
3368 3371 yield data
3369 3372
3370 3373 yield z.flush()
3371 3374
3372 3375 def decompressorreader(self, fh):
3373 3376 def gen():
3374 3377 d = zlib.decompressobj()
3375 3378 for chunk in filechunkiter(fh):
3376 3379 while chunk:
3377 3380 # Limit output size to limit memory.
3378 3381 yield d.decompress(chunk, 2 ** 18)
3379 3382 chunk = d.unconsumed_tail
3380 3383
3381 3384 return chunkbuffer(gen())
3382 3385
3383 3386 class zlibrevlogcompressor(object):
3384 3387 def compress(self, data):
3385 3388 insize = len(data)
3386 3389 # Caller handles empty input case.
3387 3390 assert insize > 0
3388 3391
3389 3392 if insize < 44:
3390 3393 return None
3391 3394
3392 3395 elif insize <= 1000000:
3393 3396 compressed = zlib.compress(data)
3394 3397 if len(compressed) < insize:
3395 3398 return compressed
3396 3399 return None
3397 3400
3398 3401 # zlib makes an internal copy of the input buffer, doubling
3399 3402 # memory usage for large inputs. So do streaming compression
3400 3403 # on large inputs.
3401 3404 else:
3402 3405 z = zlib.compressobj()
3403 3406 parts = []
3404 3407 pos = 0
3405 3408 while pos < insize:
3406 3409 pos2 = pos + 2**20
3407 3410 parts.append(z.compress(data[pos:pos2]))
3408 3411 pos = pos2
3409 3412 parts.append(z.flush())
3410 3413
3411 3414 if sum(map(len, parts)) < insize:
3412 3415 return ''.join(parts)
3413 3416 return None
3414 3417
3415 3418 def decompress(self, data):
3416 3419 try:
3417 3420 return zlib.decompress(data)
3418 3421 except zlib.error as e:
3419 3422 raise error.RevlogError(_('revlog decompress error: %s') %
3420 3423 str(e))
3421 3424
3422 3425 def revlogcompressor(self, opts=None):
3423 3426 return self.zlibrevlogcompressor()
3424 3427
3425 3428 compengines.register(_zlibengine())
3426 3429
3427 3430 class _bz2engine(compressionengine):
3428 3431 def name(self):
3429 3432 return 'bz2'
3430 3433
3431 3434 def bundletype(self):
3432 3435 return 'bzip2', 'BZ'
3433 3436
3434 3437 # We declare a protocol name but don't advertise by default because
3435 3438 # it is slow.
3436 3439 def wireprotosupport(self):
3437 3440 return compewireprotosupport('bzip2', 0, 0)
3438 3441
3439 3442 def compressstream(self, it, opts=None):
3440 3443 opts = opts or {}
3441 3444 z = bz2.BZ2Compressor(opts.get('level', 9))
3442 3445 for chunk in it:
3443 3446 data = z.compress(chunk)
3444 3447 if data:
3445 3448 yield data
3446 3449
3447 3450 yield z.flush()
3448 3451
3449 3452 def decompressorreader(self, fh):
3450 3453 def gen():
3451 3454 d = bz2.BZ2Decompressor()
3452 3455 for chunk in filechunkiter(fh):
3453 3456 yield d.decompress(chunk)
3454 3457
3455 3458 return chunkbuffer(gen())
3456 3459
3457 3460 compengines.register(_bz2engine())
3458 3461
3459 3462 class _truncatedbz2engine(compressionengine):
3460 3463 def name(self):
3461 3464 return 'bz2truncated'
3462 3465
3463 3466 def bundletype(self):
3464 3467 return None, '_truncatedBZ'
3465 3468
3466 3469 # We don't implement compressstream because it is hackily handled elsewhere.
3467 3470
3468 3471 def decompressorreader(self, fh):
3469 3472 def gen():
3470 3473 # The input stream doesn't have the 'BZ' header. So add it back.
3471 3474 d = bz2.BZ2Decompressor()
3472 3475 d.decompress('BZ')
3473 3476 for chunk in filechunkiter(fh):
3474 3477 yield d.decompress(chunk)
3475 3478
3476 3479 return chunkbuffer(gen())
3477 3480
3478 3481 compengines.register(_truncatedbz2engine())
3479 3482
3480 3483 class _noopengine(compressionengine):
3481 3484 def name(self):
3482 3485 return 'none'
3483 3486
3484 3487 def bundletype(self):
3485 3488 return 'none', 'UN'
3486 3489
3487 3490 # Clients always support uncompressed payloads. Servers don't because
3488 3491 # unless you are on a fast network, uncompressed payloads can easily
3489 3492 # saturate your network pipe.
3490 3493 def wireprotosupport(self):
3491 3494 return compewireprotosupport('none', 0, 10)
3492 3495
3493 3496 # We don't implement revlogheader because it is handled specially
3494 3497 # in the revlog class.
3495 3498
3496 3499 def compressstream(self, it, opts=None):
3497 3500 return it
3498 3501
3499 3502 def decompressorreader(self, fh):
3500 3503 return fh
3501 3504
3502 3505 class nooprevlogcompressor(object):
3503 3506 def compress(self, data):
3504 3507 return None
3505 3508
3506 3509 def revlogcompressor(self, opts=None):
3507 3510 return self.nooprevlogcompressor()
3508 3511
3509 3512 compengines.register(_noopengine())
3510 3513
3511 3514 class _zstdengine(compressionengine):
3512 3515 def name(self):
3513 3516 return 'zstd'
3514 3517
3515 3518 @propertycache
3516 3519 def _module(self):
3517 3520 # Not all installs have the zstd module available. So defer importing
3518 3521 # until first access.
3519 3522 try:
3520 3523 from . import zstd
3521 3524 # Force delayed import.
3522 3525 zstd.__version__
3523 3526 return zstd
3524 3527 except ImportError:
3525 3528 return None
3526 3529
3527 3530 def available(self):
3528 3531 return bool(self._module)
3529 3532
3530 3533 def bundletype(self):
3531 3534 return 'zstd', 'ZS'
3532 3535
3533 3536 def wireprotosupport(self):
3534 3537 return compewireprotosupport('zstd', 50, 50)
3535 3538
3536 3539 def revlogheader(self):
3537 3540 return '\x28'
3538 3541
3539 3542 def compressstream(self, it, opts=None):
3540 3543 opts = opts or {}
3541 3544 # zstd level 3 is almost always significantly faster than zlib
3542 3545 # while providing no worse compression. It strikes a good balance
3543 3546 # between speed and compression.
3544 3547 level = opts.get('level', 3)
3545 3548
3546 3549 zstd = self._module
3547 3550 z = zstd.ZstdCompressor(level=level).compressobj()
3548 3551 for chunk in it:
3549 3552 data = z.compress(chunk)
3550 3553 if data:
3551 3554 yield data
3552 3555
3553 3556 yield z.flush()
3554 3557
3555 3558 def decompressorreader(self, fh):
3556 3559 zstd = self._module
3557 3560 dctx = zstd.ZstdDecompressor()
3558 3561 return chunkbuffer(dctx.read_from(fh))
3559 3562
3560 3563 class zstdrevlogcompressor(object):
3561 3564 def __init__(self, zstd, level=3):
3562 3565 # Writing the content size adds a few bytes to the output. However,
3563 3566 # it allows decompression to be more optimal since we can
3564 3567 # pre-allocate a buffer to hold the result.
3565 3568 self._cctx = zstd.ZstdCompressor(level=level,
3566 3569 write_content_size=True)
3567 3570 self._dctx = zstd.ZstdDecompressor()
3568 3571 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3569 3572 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3570 3573
3571 3574 def compress(self, data):
3572 3575 insize = len(data)
3573 3576 # Caller handles empty input case.
3574 3577 assert insize > 0
3575 3578
3576 3579 if insize < 50:
3577 3580 return None
3578 3581
3579 3582 elif insize <= 1000000:
3580 3583 compressed = self._cctx.compress(data)
3581 3584 if len(compressed) < insize:
3582 3585 return compressed
3583 3586 return None
3584 3587 else:
3585 3588 z = self._cctx.compressobj()
3586 3589 chunks = []
3587 3590 pos = 0
3588 3591 while pos < insize:
3589 3592 pos2 = pos + self._compinsize
3590 3593 chunk = z.compress(data[pos:pos2])
3591 3594 if chunk:
3592 3595 chunks.append(chunk)
3593 3596 pos = pos2
3594 3597 chunks.append(z.flush())
3595 3598
3596 3599 if sum(map(len, chunks)) < insize:
3597 3600 return ''.join(chunks)
3598 3601 return None
3599 3602
3600 3603 def decompress(self, data):
3601 3604 insize = len(data)
3602 3605
3603 3606 try:
3604 3607 # This was measured to be faster than other streaming
3605 3608 # decompressors.
3606 3609 dobj = self._dctx.decompressobj()
3607 3610 chunks = []
3608 3611 pos = 0
3609 3612 while pos < insize:
3610 3613 pos2 = pos + self._decompinsize
3611 3614 chunk = dobj.decompress(data[pos:pos2])
3612 3615 if chunk:
3613 3616 chunks.append(chunk)
3614 3617 pos = pos2
3615 3618 # Frame should be exhausted, so no finish() API.
3616 3619
3617 3620 return ''.join(chunks)
3618 3621 except Exception as e:
3619 3622 raise error.RevlogError(_('revlog decompress error: %s') %
3620 3623 str(e))
3621 3624
3622 3625 def revlogcompressor(self, opts=None):
3623 3626 opts = opts or {}
3624 3627 return self.zstdrevlogcompressor(self._module,
3625 3628 level=opts.get('level', 3))
3626 3629
3627 3630 compengines.register(_zstdengine())
3628 3631
3629 3632 # convenient shortcut
3630 3633 dst = debugstacktrace
@@ -1,633 +1,636
1 1 from __future__ import absolute_import
2 2
3 3 import errno
4 4 import os
5 5 import re
6 6 import socket
7 7 import stat
8 8 import subprocess
9 9 import sys
10 10 import tempfile
11 11
12 12 tempprefix = 'hg-hghave-'
13 13
14 14 checks = {
15 15 "true": (lambda: True, "yak shaving"),
16 16 "false": (lambda: False, "nail clipper"),
17 17 }
18 18
19 19 def check(name, desc):
20 20 """Registers a check function for a feature."""
21 21 def decorator(func):
22 22 checks[name] = (func, desc)
23 23 return func
24 24 return decorator
25 25
26 26 def checkvers(name, desc, vers):
27 27 """Registers a check function for each of a series of versions.
28 28
29 29 vers can be a list or an iterator"""
30 30 def decorator(func):
31 31 def funcv(v):
32 32 def f():
33 33 return func(v)
34 34 return f
35 35 for v in vers:
36 36 v = str(v)
37 37 f = funcv(v)
38 38 checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v)
39 39 return func
40 40 return decorator
41 41
42 42 def checkfeatures(features):
43 43 result = {
44 44 'error': [],
45 45 'missing': [],
46 46 'skipped': [],
47 47 }
48 48
49 49 for feature in features:
50 50 negate = feature.startswith('no-')
51 51 if negate:
52 52 feature = feature[3:]
53 53
54 54 if feature not in checks:
55 55 result['missing'].append(feature)
56 56 continue
57 57
58 58 check, desc = checks[feature]
59 59 try:
60 60 available = check()
61 61 except Exception:
62 62 result['error'].append('hghave check failed: %s' % feature)
63 63 continue
64 64
65 65 if not negate and not available:
66 66 result['skipped'].append('missing feature: %s' % desc)
67 67 elif negate and available:
68 68 result['skipped'].append('system supports %s' % desc)
69 69
70 70 return result
71 71
72 72 def require(features):
73 73 """Require that features are available, exiting if not."""
74 74 result = checkfeatures(features)
75 75
76 76 for missing in result['missing']:
77 77 sys.stderr.write('skipped: unknown feature: %s\n' % missing)
78 78 for msg in result['skipped']:
79 79 sys.stderr.write('skipped: %s\n' % msg)
80 80 for msg in result['error']:
81 81 sys.stderr.write('%s\n' % msg)
82 82
83 83 if result['missing']:
84 84 sys.exit(2)
85 85
86 86 if result['skipped'] or result['error']:
87 87 sys.exit(1)
88 88
89 89 def matchoutput(cmd, regexp, ignorestatus=False):
90 90 """Return the match object if cmd executes successfully and its output
91 91 is matched by the supplied regular expression.
92 92 """
93 93 r = re.compile(regexp)
94 94 try:
95 95 p = subprocess.Popen(
96 96 cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
97 97 except OSError as e:
98 98 if e.errno != errno.ENOENT:
99 99 raise
100 100 ret = -1
101 101 ret = p.wait()
102 102 s = p.stdout.read()
103 103 return (ignorestatus or not ret) and r.search(s)
104 104
105 105 @check("baz", "GNU Arch baz client")
106 106 def has_baz():
107 107 return matchoutput('baz --version 2>&1', br'baz Bazaar version')
108 108
109 109 @check("bzr", "Canonical's Bazaar client")
110 110 def has_bzr():
111 111 try:
112 112 import bzrlib
113 113 import bzrlib.bzrdir
114 114 import bzrlib.errors
115 115 import bzrlib.revision
116 116 import bzrlib.revisionspec
117 117 bzrlib.revisionspec.RevisionSpec
118 118 return bzrlib.__doc__ is not None
119 119 except (AttributeError, ImportError):
120 120 return False
121 121
122 122 @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,))
123 123 def has_bzr_range(v):
124 124 major, minor = v.split('.')[0:2]
125 125 try:
126 126 import bzrlib
127 127 return (bzrlib.__doc__ is not None
128 128 and bzrlib.version_info[:2] >= (int(major), int(minor)))
129 129 except ImportError:
130 130 return False
131 131
132 132 @check("chg", "running with chg")
133 133 def has_chg():
134 134 return 'CHGHG' in os.environ
135 135
136 136 @check("cvs", "cvs client/server")
137 137 def has_cvs():
138 138 re = br'Concurrent Versions System.*?server'
139 139 return matchoutput('cvs --version 2>&1', re) and not has_msys()
140 140
141 141 @check("cvs112", "cvs client/server 1.12.* (not cvsnt)")
142 142 def has_cvs112():
143 143 re = br'Concurrent Versions System \(CVS\) 1.12.*?server'
144 144 return matchoutput('cvs --version 2>&1', re) and not has_msys()
145 145
146 146 @check("cvsnt", "cvsnt client/server")
147 147 def has_cvsnt():
148 148 re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)'
149 149 return matchoutput('cvsnt --version 2>&1', re)
150 150
151 151 @check("darcs", "darcs client")
152 152 def has_darcs():
153 153 return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True)
154 154
155 155 @check("mtn", "monotone client (>= 1.0)")
156 156 def has_mtn():
157 157 return matchoutput('mtn --version', br'monotone', True) and not matchoutput(
158 158 'mtn --version', br'monotone 0\.', True)
159 159
160 160 @check("eol-in-paths", "end-of-lines in paths")
161 161 def has_eol_in_paths():
162 162 try:
163 163 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r')
164 164 os.close(fd)
165 165 os.remove(path)
166 166 return True
167 167 except (IOError, OSError):
168 168 return False
169 169
170 170 @check("execbit", "executable bit")
171 171 def has_executablebit():
172 172 try:
173 173 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
174 174 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
175 175 try:
176 176 os.close(fh)
177 177 m = os.stat(fn).st_mode & 0o777
178 178 new_file_has_exec = m & EXECFLAGS
179 179 os.chmod(fn, m ^ EXECFLAGS)
180 180 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m)
181 181 finally:
182 182 os.unlink(fn)
183 183 except (IOError, OSError):
184 184 # we don't care, the user probably won't be able to commit anyway
185 185 return False
186 186 return not (new_file_has_exec or exec_flags_cannot_flip)
187 187
188 188 @check("icasefs", "case insensitive file system")
189 189 def has_icasefs():
190 190 # Stolen from mercurial.util
191 191 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
192 192 os.close(fd)
193 193 try:
194 194 s1 = os.stat(path)
195 195 d, b = os.path.split(path)
196 196 p2 = os.path.join(d, b.upper())
197 197 if path == p2:
198 198 p2 = os.path.join(d, b.lower())
199 199 try:
200 200 s2 = os.stat(p2)
201 201 return s2 == s1
202 202 except OSError:
203 203 return False
204 204 finally:
205 205 os.remove(path)
206 206
207 207 @check("fifo", "named pipes")
208 208 def has_fifo():
209 209 if getattr(os, "mkfifo", None) is None:
210 210 return False
211 211 name = tempfile.mktemp(dir='.', prefix=tempprefix)
212 212 try:
213 213 os.mkfifo(name)
214 214 os.unlink(name)
215 215 return True
216 216 except OSError:
217 217 return False
218 218
219 219 @check("killdaemons", 'killdaemons.py support')
220 220 def has_killdaemons():
221 221 return True
222 222
223 223 @check("cacheable", "cacheable filesystem")
224 224 def has_cacheable_fs():
225 225 from mercurial import util
226 226
227 227 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
228 228 os.close(fd)
229 229 try:
230 230 return util.cachestat(path).cacheable()
231 231 finally:
232 232 os.remove(path)
233 233
234 234 @check("lsprof", "python lsprof module")
235 235 def has_lsprof():
236 236 try:
237 237 import _lsprof
238 238 _lsprof.Profiler # silence unused import warning
239 239 return True
240 240 except ImportError:
241 241 return False
242 242
243 243 def gethgversion():
244 244 m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)')
245 245 if not m:
246 246 return (0, 0)
247 247 return (int(m.group(1)), int(m.group(2)))
248 248
249 249 @checkvers("hg", "Mercurial >= %s",
250 250 list([(1.0 * x) / 10 for x in range(9, 40)]))
251 251 def has_hg_range(v):
252 252 major, minor = v.split('.')[0:2]
253 253 return gethgversion() >= (int(major), int(minor))
254 254
255 255 @check("hg08", "Mercurial >= 0.8")
256 256 def has_hg08():
257 257 if checks["hg09"][0]():
258 258 return True
259 259 return matchoutput('hg help annotate 2>&1', '--date')
260 260
261 261 @check("hg07", "Mercurial >= 0.7")
262 262 def has_hg07():
263 263 if checks["hg08"][0]():
264 264 return True
265 265 return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM')
266 266
267 267 @check("hg06", "Mercurial >= 0.6")
268 268 def has_hg06():
269 269 if checks["hg07"][0]():
270 270 return True
271 271 return matchoutput('hg --version --quiet 2>&1', 'Mercurial version')
272 272
273 273 @check("gettext", "GNU Gettext (msgfmt)")
274 274 def has_gettext():
275 275 return matchoutput('msgfmt --version', br'GNU gettext-tools')
276 276
277 277 @check("git", "git command line client")
278 278 def has_git():
279 279 return matchoutput('git --version 2>&1', br'^git version')
280 280
281 281 @check("docutils", "Docutils text processing library")
282 282 def has_docutils():
283 283 try:
284 284 import docutils.core
285 285 docutils.core.publish_cmdline # silence unused import
286 286 return True
287 287 except ImportError:
288 288 return False
289 289
290 290 def getsvnversion():
291 291 m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)')
292 292 if not m:
293 293 return (0, 0)
294 294 return (int(m.group(1)), int(m.group(2)))
295 295
296 296 @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5))
297 297 def has_svn_range(v):
298 298 major, minor = v.split('.')[0:2]
299 299 return getsvnversion() >= (int(major), int(minor))
300 300
301 301 @check("svn", "subversion client and admin tools")
302 302 def has_svn():
303 303 return matchoutput('svn --version 2>&1', br'^svn, version') and \
304 304 matchoutput('svnadmin --version 2>&1', br'^svnadmin, version')
305 305
306 306 @check("svn-bindings", "subversion python bindings")
307 307 def has_svn_bindings():
308 308 try:
309 309 import svn.core
310 310 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
311 311 if version < (1, 4):
312 312 return False
313 313 return True
314 314 except ImportError:
315 315 return False
316 316
317 317 @check("p4", "Perforce server and client")
318 318 def has_p4():
319 319 return (matchoutput('p4 -V', br'Rev\. P4/') and
320 320 matchoutput('p4d -V', br'Rev\. P4D/'))
321 321
322 322 @check("symlink", "symbolic links")
323 323 def has_symlink():
324 324 if getattr(os, "symlink", None) is None:
325 325 return False
326 326 name = tempfile.mktemp(dir='.', prefix=tempprefix)
327 327 try:
328 328 os.symlink(".", name)
329 329 os.unlink(name)
330 330 return True
331 331 except (OSError, AttributeError):
332 332 return False
333 333
334 334 @check("hardlink", "hardlinks")
335 335 def has_hardlink():
336 336 from mercurial import util
337 337 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
338 338 os.close(fh)
339 339 name = tempfile.mktemp(dir='.', prefix=tempprefix)
340 340 try:
341 341 util.oslink(fn, name)
342 342 os.unlink(name)
343 343 return True
344 344 except OSError:
345 345 return False
346 346 finally:
347 347 os.unlink(fn)
348 348
349 349 @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems")
350 350 def has_hardlink_whitelisted():
351 351 from mercurial import util
352 fstype = util.getfstype('.')
352 try:
353 fstype = util.getfstype('.')
354 except OSError:
355 return False
353 356 return fstype in util._hardlinkfswhitelist
354 357
355 358 @check("rmcwd", "can remove current working directory")
356 359 def has_rmcwd():
357 360 ocwd = os.getcwd()
358 361 temp = tempfile.mkdtemp(dir='.', prefix=tempprefix)
359 362 try:
360 363 os.chdir(temp)
361 364 # On Linux, 'rmdir .' isn't allowed, but the other names are okay.
362 365 # On Solaris and Windows, the cwd can't be removed by any names.
363 366 os.rmdir(os.getcwd())
364 367 return True
365 368 except OSError:
366 369 return False
367 370 finally:
368 371 os.chdir(ocwd)
369 372 # clean up temp dir on platforms where cwd can't be removed
370 373 try:
371 374 os.rmdir(temp)
372 375 except OSError:
373 376 pass
374 377
375 378 @check("tla", "GNU Arch tla client")
376 379 def has_tla():
377 380 return matchoutput('tla --version 2>&1', br'The GNU Arch Revision')
378 381
379 382 @check("gpg", "gpg client")
380 383 def has_gpg():
381 384 return matchoutput('gpg --version 2>&1', br'GnuPG')
382 385
383 386 @check("gpg2", "gpg client v2")
384 387 def has_gpg2():
385 388 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.')
386 389
387 390 @check("gpg21", "gpg client v2.1+")
388 391 def has_gpg21():
389 392 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)')
390 393
391 394 @check("unix-permissions", "unix-style permissions")
392 395 def has_unix_permissions():
393 396 d = tempfile.mkdtemp(dir='.', prefix=tempprefix)
394 397 try:
395 398 fname = os.path.join(d, 'foo')
396 399 for umask in (0o77, 0o07, 0o22):
397 400 os.umask(umask)
398 401 f = open(fname, 'w')
399 402 f.close()
400 403 mode = os.stat(fname).st_mode
401 404 os.unlink(fname)
402 405 if mode & 0o777 != ~umask & 0o666:
403 406 return False
404 407 return True
405 408 finally:
406 409 os.rmdir(d)
407 410
408 411 @check("unix-socket", "AF_UNIX socket family")
409 412 def has_unix_socket():
410 413 return getattr(socket, 'AF_UNIX', None) is not None
411 414
412 415 @check("root", "root permissions")
413 416 def has_root():
414 417 return getattr(os, 'geteuid', None) and os.geteuid() == 0
415 418
416 419 @check("pyflakes", "Pyflakes python linter")
417 420 def has_pyflakes():
418 421 return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"",
419 422 br"<stdin>:1: 're' imported but unused",
420 423 True)
421 424
422 425 @check("pylint", "Pylint python linter")
423 426 def has_pylint():
424 427 return matchoutput("pylint --help",
425 428 br"Usage: pylint",
426 429 True)
427 430
428 431 @check("pygments", "Pygments source highlighting library")
429 432 def has_pygments():
430 433 try:
431 434 import pygments
432 435 pygments.highlight # silence unused import warning
433 436 return True
434 437 except ImportError:
435 438 return False
436 439
437 440 @check("outer-repo", "outer repo")
438 441 def has_outer_repo():
439 442 # failing for other reasons than 'no repo' imply that there is a repo
440 443 return not matchoutput('hg root 2>&1',
441 444 br'abort: no repository found', True)
442 445
443 446 @check("ssl", "ssl module available")
444 447 def has_ssl():
445 448 try:
446 449 import ssl
447 450 ssl.CERT_NONE
448 451 return True
449 452 except ImportError:
450 453 return False
451 454
452 455 @check("sslcontext", "python >= 2.7.9 ssl")
453 456 def has_sslcontext():
454 457 try:
455 458 import ssl
456 459 ssl.SSLContext
457 460 return True
458 461 except (ImportError, AttributeError):
459 462 return False
460 463
461 464 @check("defaultcacerts", "can verify SSL certs by system's CA certs store")
462 465 def has_defaultcacerts():
463 466 from mercurial import sslutil, ui as uimod
464 467 ui = uimod.ui.load()
465 468 return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts
466 469
467 470 @check("defaultcacertsloaded", "detected presence of loaded system CA certs")
468 471 def has_defaultcacertsloaded():
469 472 import ssl
470 473 from mercurial import sslutil, ui as uimod
471 474
472 475 if not has_defaultcacerts():
473 476 return False
474 477 if not has_sslcontext():
475 478 return False
476 479
477 480 ui = uimod.ui.load()
478 481 cafile = sslutil._defaultcacerts(ui)
479 482 ctx = ssl.create_default_context()
480 483 if cafile:
481 484 ctx.load_verify_locations(cafile=cafile)
482 485 else:
483 486 ctx.load_default_certs()
484 487
485 488 return len(ctx.get_ca_certs()) > 0
486 489
487 490 @check("tls1.2", "TLS 1.2 protocol support")
488 491 def has_tls1_2():
489 492 from mercurial import sslutil
490 493 return 'tls1.2' in sslutil.supportedprotocols
491 494
492 495 @check("windows", "Windows")
493 496 def has_windows():
494 497 return os.name == 'nt'
495 498
496 499 @check("system-sh", "system() uses sh")
497 500 def has_system_sh():
498 501 return os.name != 'nt'
499 502
500 503 @check("serve", "platform and python can manage 'hg serve -d'")
501 504 def has_serve():
502 505 return os.name != 'nt' # gross approximation
503 506
504 507 @check("test-repo", "running tests from repository")
505 508 def has_test_repo():
506 509 t = os.environ["TESTDIR"]
507 510 return os.path.isdir(os.path.join(t, "..", ".hg"))
508 511
509 512 @check("tic", "terminfo compiler and curses module")
510 513 def has_tic():
511 514 try:
512 515 import curses
513 516 curses.COLOR_BLUE
514 517 return matchoutput('test -x "`which tic`"', br'')
515 518 except ImportError:
516 519 return False
517 520
518 521 @check("msys", "Windows with MSYS")
519 522 def has_msys():
520 523 return os.getenv('MSYSTEM')
521 524
522 525 @check("aix", "AIX")
523 526 def has_aix():
524 527 return sys.platform.startswith("aix")
525 528
526 529 @check("osx", "OS X")
527 530 def has_osx():
528 531 return sys.platform == 'darwin'
529 532
530 533 @check("osxpackaging", "OS X packaging tools")
531 534 def has_osxpackaging():
532 535 try:
533 536 return (matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1)
534 537 and matchoutput(
535 538 'productbuild', br'Usage: productbuild ',
536 539 ignorestatus=1)
537 540 and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1)
538 541 and matchoutput(
539 542 'xar --help', br'Usage: xar', ignorestatus=1))
540 543 except ImportError:
541 544 return False
542 545
543 546 @check("docker", "docker support")
544 547 def has_docker():
545 548 pat = br'A self-sufficient runtime for'
546 549 if matchoutput('docker --help', pat):
547 550 if 'linux' not in sys.platform:
548 551 # TODO: in theory we should be able to test docker-based
549 552 # package creation on non-linux using boot2docker, but in
550 553 # practice that requires extra coordination to make sure
551 554 # $TESTTEMP is going to be visible at the same path to the
552 555 # boot2docker VM. If we figure out how to verify that, we
553 556 # can use the following instead of just saying False:
554 557 # return 'DOCKER_HOST' in os.environ
555 558 return False
556 559
557 560 return True
558 561 return False
559 562
560 563 @check("debhelper", "debian packaging tools")
561 564 def has_debhelper():
562 565 dpkg = matchoutput('dpkg --version',
563 566 br"Debian `dpkg' package management program")
564 567 dh = matchoutput('dh --help',
565 568 br'dh is a part of debhelper.', ignorestatus=True)
566 569 dh_py2 = matchoutput('dh_python2 --help',
567 570 br'other supported Python versions')
568 571 return dpkg and dh and dh_py2
569 572
570 573 @check("demandimport", "demandimport enabled")
571 574 def has_demandimport():
572 575 return os.environ.get('HGDEMANDIMPORT') != 'disable'
573 576
574 577 @check("absimport", "absolute_import in __future__")
575 578 def has_absimport():
576 579 import __future__
577 580 from mercurial import util
578 581 return util.safehasattr(__future__, "absolute_import")
579 582
580 583 @check("py27+", "running with Python 2.7+")
581 584 def has_python27ornewer():
582 585 return sys.version_info[0:2] >= (2, 7)
583 586
584 587 @check("py3k", "running with Python 3.x")
585 588 def has_py3k():
586 589 return 3 == sys.version_info[0]
587 590
588 591 @check("py3exe", "a Python 3.x interpreter is available")
589 592 def has_python3exe():
590 593 return 'PYTHON3' in os.environ
591 594
592 595 @check("py3pygments", "Pygments available on Python 3.x")
593 596 def has_py3pygments():
594 597 if has_py3k():
595 598 return has_pygments()
596 599 elif has_python3exe():
597 600 # just check exit status (ignoring output)
598 601 py3 = os.environ['PYTHON3']
599 602 return matchoutput('%s -c "import pygments"' % py3, br'')
600 603 return False
601 604
602 605 @check("pure", "running with pure Python code")
603 606 def has_pure():
604 607 return any([
605 608 os.environ.get("HGMODULEPOLICY") == "py",
606 609 os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure",
607 610 ])
608 611
609 612 @check("slow", "allow slow tests")
610 613 def has_slow():
611 614 return os.environ.get('HGTEST_SLOW') == 'slow'
612 615
613 616 @check("hypothesis", "Hypothesis automated test generation")
614 617 def has_hypothesis():
615 618 try:
616 619 import hypothesis
617 620 hypothesis.given
618 621 return True
619 622 except ImportError:
620 623 return False
621 624
622 625 @check("unziplinks", "unzip(1) understands and extracts symlinks")
623 626 def unzip_understands_symlinks():
624 627 return matchoutput('unzip --help', br'Info-ZIP')
625 628
626 629 @check("zstd", "zstd Python module available")
627 630 def has_zstd():
628 631 try:
629 632 import mercurial.zstd
630 633 mercurial.zstd.__version__
631 634 return True
632 635 except ImportError:
633 636 return False
General Comments 0
You need to be logged in to leave comments. Login now