##// END OF EJS Templates
statfs: make getfstype() raise OSError...
Yuya Nishihara -
r31678:1ed57a7d default
parent child Browse files
Show More
@@ -1,1328 +1,1328
1 /*
1 /*
2 osutil.c - native operating system services
2 osutil.c - native operating system services
3
3
4 Copyright 2007 Matt Mackall and others
4 Copyright 2007 Matt Mackall and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define _ATFILE_SOURCE
10 #define _ATFILE_SOURCE
11 #include <Python.h>
11 #include <Python.h>
12 #include <fcntl.h>
12 #include <fcntl.h>
13 #include <stdio.h>
13 #include <stdio.h>
14 #include <stdlib.h>
14 #include <stdlib.h>
15 #include <string.h>
15 #include <string.h>
16 #include <errno.h>
16 #include <errno.h>
17
17
18 #ifdef _WIN32
18 #ifdef _WIN32
19 #include <windows.h>
19 #include <windows.h>
20 #include <io.h>
20 #include <io.h>
21 #else
21 #else
22 #include <dirent.h>
22 #include <dirent.h>
23 #include <sys/socket.h>
23 #include <sys/socket.h>
24 #include <sys/stat.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
25 #include <sys/types.h>
26 #include <unistd.h>
26 #include <unistd.h>
27 #ifdef HAVE_LINUX_STATFS
27 #ifdef HAVE_LINUX_STATFS
28 #include <linux/magic.h>
28 #include <linux/magic.h>
29 #include <sys/vfs.h>
29 #include <sys/vfs.h>
30 #endif
30 #endif
31 #ifdef HAVE_BSD_STATFS
31 #ifdef HAVE_BSD_STATFS
32 #include <sys/mount.h>
32 #include <sys/mount.h>
33 #include <sys/param.h>
33 #include <sys/param.h>
34 #endif
34 #endif
35 #endif
35 #endif
36
36
37 #ifdef __APPLE__
37 #ifdef __APPLE__
38 #include <sys/attr.h>
38 #include <sys/attr.h>
39 #include <sys/vnode.h>
39 #include <sys/vnode.h>
40 #endif
40 #endif
41
41
42 #include "util.h"
42 #include "util.h"
43
43
44 /* some platforms lack the PATH_MAX definition (eg. GNU/Hurd) */
44 /* some platforms lack the PATH_MAX definition (eg. GNU/Hurd) */
45 #ifndef PATH_MAX
45 #ifndef PATH_MAX
46 #define PATH_MAX 4096
46 #define PATH_MAX 4096
47 #endif
47 #endif
48
48
49 #ifdef _WIN32
49 #ifdef _WIN32
50 /*
50 /*
51 stat struct compatible with hg expectations
51 stat struct compatible with hg expectations
52 Mercurial only uses st_mode, st_size and st_mtime
52 Mercurial only uses st_mode, st_size and st_mtime
53 the rest is kept to minimize changes between implementations
53 the rest is kept to minimize changes between implementations
54 */
54 */
55 struct hg_stat {
55 struct hg_stat {
56 int st_dev;
56 int st_dev;
57 int st_mode;
57 int st_mode;
58 int st_nlink;
58 int st_nlink;
59 __int64 st_size;
59 __int64 st_size;
60 int st_mtime;
60 int st_mtime;
61 int st_ctime;
61 int st_ctime;
62 };
62 };
63 struct listdir_stat {
63 struct listdir_stat {
64 PyObject_HEAD
64 PyObject_HEAD
65 struct hg_stat st;
65 struct hg_stat st;
66 };
66 };
67 #else
67 #else
68 struct listdir_stat {
68 struct listdir_stat {
69 PyObject_HEAD
69 PyObject_HEAD
70 struct stat st;
70 struct stat st;
71 };
71 };
72 #endif
72 #endif
73
73
74 #ifdef IS_PY3K
74 #ifdef IS_PY3K
75 #define listdir_slot(name) \
75 #define listdir_slot(name) \
76 static PyObject *listdir_stat_##name(PyObject *self, void *x) \
76 static PyObject *listdir_stat_##name(PyObject *self, void *x) \
77 { \
77 { \
78 return PyLong_FromLong(((struct listdir_stat *)self)->st.name); \
78 return PyLong_FromLong(((struct listdir_stat *)self)->st.name); \
79 }
79 }
80 #else
80 #else
81 #define listdir_slot(name) \
81 #define listdir_slot(name) \
82 static PyObject *listdir_stat_##name(PyObject *self, void *x) \
82 static PyObject *listdir_stat_##name(PyObject *self, void *x) \
83 { \
83 { \
84 return PyInt_FromLong(((struct listdir_stat *)self)->st.name); \
84 return PyInt_FromLong(((struct listdir_stat *)self)->st.name); \
85 }
85 }
86 #endif
86 #endif
87
87
88 listdir_slot(st_dev)
88 listdir_slot(st_dev)
89 listdir_slot(st_mode)
89 listdir_slot(st_mode)
90 listdir_slot(st_nlink)
90 listdir_slot(st_nlink)
91 #ifdef _WIN32
91 #ifdef _WIN32
92 static PyObject *listdir_stat_st_size(PyObject *self, void *x)
92 static PyObject *listdir_stat_st_size(PyObject *self, void *x)
93 {
93 {
94 return PyLong_FromLongLong(
94 return PyLong_FromLongLong(
95 (PY_LONG_LONG)((struct listdir_stat *)self)->st.st_size);
95 (PY_LONG_LONG)((struct listdir_stat *)self)->st.st_size);
96 }
96 }
97 #else
97 #else
98 listdir_slot(st_size)
98 listdir_slot(st_size)
99 #endif
99 #endif
100 listdir_slot(st_mtime)
100 listdir_slot(st_mtime)
101 listdir_slot(st_ctime)
101 listdir_slot(st_ctime)
102
102
103 static struct PyGetSetDef listdir_stat_getsets[] = {
103 static struct PyGetSetDef listdir_stat_getsets[] = {
104 {"st_dev", listdir_stat_st_dev, 0, 0, 0},
104 {"st_dev", listdir_stat_st_dev, 0, 0, 0},
105 {"st_mode", listdir_stat_st_mode, 0, 0, 0},
105 {"st_mode", listdir_stat_st_mode, 0, 0, 0},
106 {"st_nlink", listdir_stat_st_nlink, 0, 0, 0},
106 {"st_nlink", listdir_stat_st_nlink, 0, 0, 0},
107 {"st_size", listdir_stat_st_size, 0, 0, 0},
107 {"st_size", listdir_stat_st_size, 0, 0, 0},
108 {"st_mtime", listdir_stat_st_mtime, 0, 0, 0},
108 {"st_mtime", listdir_stat_st_mtime, 0, 0, 0},
109 {"st_ctime", listdir_stat_st_ctime, 0, 0, 0},
109 {"st_ctime", listdir_stat_st_ctime, 0, 0, 0},
110 {0, 0, 0, 0, 0}
110 {0, 0, 0, 0, 0}
111 };
111 };
112
112
113 static PyObject *listdir_stat_new(PyTypeObject *t, PyObject *a, PyObject *k)
113 static PyObject *listdir_stat_new(PyTypeObject *t, PyObject *a, PyObject *k)
114 {
114 {
115 return t->tp_alloc(t, 0);
115 return t->tp_alloc(t, 0);
116 }
116 }
117
117
118 static void listdir_stat_dealloc(PyObject *o)
118 static void listdir_stat_dealloc(PyObject *o)
119 {
119 {
120 o->ob_type->tp_free(o);
120 o->ob_type->tp_free(o);
121 }
121 }
122
122
123 static PyTypeObject listdir_stat_type = {
123 static PyTypeObject listdir_stat_type = {
124 PyVarObject_HEAD_INIT(NULL, 0)
124 PyVarObject_HEAD_INIT(NULL, 0)
125 "osutil.stat", /*tp_name*/
125 "osutil.stat", /*tp_name*/
126 sizeof(struct listdir_stat), /*tp_basicsize*/
126 sizeof(struct listdir_stat), /*tp_basicsize*/
127 0, /*tp_itemsize*/
127 0, /*tp_itemsize*/
128 (destructor)listdir_stat_dealloc, /*tp_dealloc*/
128 (destructor)listdir_stat_dealloc, /*tp_dealloc*/
129 0, /*tp_print*/
129 0, /*tp_print*/
130 0, /*tp_getattr*/
130 0, /*tp_getattr*/
131 0, /*tp_setattr*/
131 0, /*tp_setattr*/
132 0, /*tp_compare*/
132 0, /*tp_compare*/
133 0, /*tp_repr*/
133 0, /*tp_repr*/
134 0, /*tp_as_number*/
134 0, /*tp_as_number*/
135 0, /*tp_as_sequence*/
135 0, /*tp_as_sequence*/
136 0, /*tp_as_mapping*/
136 0, /*tp_as_mapping*/
137 0, /*tp_hash */
137 0, /*tp_hash */
138 0, /*tp_call*/
138 0, /*tp_call*/
139 0, /*tp_str*/
139 0, /*tp_str*/
140 0, /*tp_getattro*/
140 0, /*tp_getattro*/
141 0, /*tp_setattro*/
141 0, /*tp_setattro*/
142 0, /*tp_as_buffer*/
142 0, /*tp_as_buffer*/
143 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
143 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
144 "stat objects", /* tp_doc */
144 "stat objects", /* tp_doc */
145 0, /* tp_traverse */
145 0, /* tp_traverse */
146 0, /* tp_clear */
146 0, /* tp_clear */
147 0, /* tp_richcompare */
147 0, /* tp_richcompare */
148 0, /* tp_weaklistoffset */
148 0, /* tp_weaklistoffset */
149 0, /* tp_iter */
149 0, /* tp_iter */
150 0, /* tp_iternext */
150 0, /* tp_iternext */
151 0, /* tp_methods */
151 0, /* tp_methods */
152 0, /* tp_members */
152 0, /* tp_members */
153 listdir_stat_getsets, /* tp_getset */
153 listdir_stat_getsets, /* tp_getset */
154 0, /* tp_base */
154 0, /* tp_base */
155 0, /* tp_dict */
155 0, /* tp_dict */
156 0, /* tp_descr_get */
156 0, /* tp_descr_get */
157 0, /* tp_descr_set */
157 0, /* tp_descr_set */
158 0, /* tp_dictoffset */
158 0, /* tp_dictoffset */
159 0, /* tp_init */
159 0, /* tp_init */
160 0, /* tp_alloc */
160 0, /* tp_alloc */
161 listdir_stat_new, /* tp_new */
161 listdir_stat_new, /* tp_new */
162 };
162 };
163
163
164 #ifdef _WIN32
164 #ifdef _WIN32
165
165
166 static int to_python_time(const FILETIME *tm)
166 static int to_python_time(const FILETIME *tm)
167 {
167 {
168 /* number of seconds between epoch and January 1 1601 */
168 /* number of seconds between epoch and January 1 1601 */
169 const __int64 a0 = (__int64)134774L * (__int64)24L * (__int64)3600L;
169 const __int64 a0 = (__int64)134774L * (__int64)24L * (__int64)3600L;
170 /* conversion factor from 100ns to 1s */
170 /* conversion factor from 100ns to 1s */
171 const __int64 a1 = 10000000;
171 const __int64 a1 = 10000000;
172 /* explicit (int) cast to suspend compiler warnings */
172 /* explicit (int) cast to suspend compiler warnings */
173 return (int)((((__int64)tm->dwHighDateTime << 32)
173 return (int)((((__int64)tm->dwHighDateTime << 32)
174 + tm->dwLowDateTime) / a1 - a0);
174 + tm->dwLowDateTime) / a1 - a0);
175 }
175 }
176
176
177 static PyObject *make_item(const WIN32_FIND_DATAA *fd, int wantstat)
177 static PyObject *make_item(const WIN32_FIND_DATAA *fd, int wantstat)
178 {
178 {
179 PyObject *py_st;
179 PyObject *py_st;
180 struct hg_stat *stp;
180 struct hg_stat *stp;
181
181
182 int kind = (fd->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
182 int kind = (fd->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
183 ? _S_IFDIR : _S_IFREG;
183 ? _S_IFDIR : _S_IFREG;
184
184
185 if (!wantstat)
185 if (!wantstat)
186 return Py_BuildValue("si", fd->cFileName, kind);
186 return Py_BuildValue("si", fd->cFileName, kind);
187
187
188 py_st = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
188 py_st = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
189 if (!py_st)
189 if (!py_st)
190 return NULL;
190 return NULL;
191
191
192 stp = &((struct listdir_stat *)py_st)->st;
192 stp = &((struct listdir_stat *)py_st)->st;
193 /*
193 /*
194 use kind as st_mode
194 use kind as st_mode
195 rwx bits on Win32 are meaningless
195 rwx bits on Win32 are meaningless
196 and Hg does not use them anyway
196 and Hg does not use them anyway
197 */
197 */
198 stp->st_mode = kind;
198 stp->st_mode = kind;
199 stp->st_mtime = to_python_time(&fd->ftLastWriteTime);
199 stp->st_mtime = to_python_time(&fd->ftLastWriteTime);
200 stp->st_ctime = to_python_time(&fd->ftCreationTime);
200 stp->st_ctime = to_python_time(&fd->ftCreationTime);
201 if (kind == _S_IFREG)
201 if (kind == _S_IFREG)
202 stp->st_size = ((__int64)fd->nFileSizeHigh << 32)
202 stp->st_size = ((__int64)fd->nFileSizeHigh << 32)
203 + fd->nFileSizeLow;
203 + fd->nFileSizeLow;
204 return Py_BuildValue("siN", fd->cFileName,
204 return Py_BuildValue("siN", fd->cFileName,
205 kind, py_st);
205 kind, py_st);
206 }
206 }
207
207
208 static PyObject *_listdir(char *path, int plen, int wantstat, char *skip)
208 static PyObject *_listdir(char *path, int plen, int wantstat, char *skip)
209 {
209 {
210 PyObject *rval = NULL; /* initialize - return value */
210 PyObject *rval = NULL; /* initialize - return value */
211 PyObject *list;
211 PyObject *list;
212 HANDLE fh;
212 HANDLE fh;
213 WIN32_FIND_DATAA fd;
213 WIN32_FIND_DATAA fd;
214 char *pattern;
214 char *pattern;
215
215
216 /* build the path + \* pattern string */
216 /* build the path + \* pattern string */
217 pattern = PyMem_Malloc(plen + 3); /* path + \* + \0 */
217 pattern = PyMem_Malloc(plen + 3); /* path + \* + \0 */
218 if (!pattern) {
218 if (!pattern) {
219 PyErr_NoMemory();
219 PyErr_NoMemory();
220 goto error_nomem;
220 goto error_nomem;
221 }
221 }
222 memcpy(pattern, path, plen);
222 memcpy(pattern, path, plen);
223
223
224 if (plen > 0) {
224 if (plen > 0) {
225 char c = path[plen-1];
225 char c = path[plen-1];
226 if (c != ':' && c != '/' && c != '\\')
226 if (c != ':' && c != '/' && c != '\\')
227 pattern[plen++] = '\\';
227 pattern[plen++] = '\\';
228 }
228 }
229 pattern[plen++] = '*';
229 pattern[plen++] = '*';
230 pattern[plen] = '\0';
230 pattern[plen] = '\0';
231
231
232 fh = FindFirstFileA(pattern, &fd);
232 fh = FindFirstFileA(pattern, &fd);
233 if (fh == INVALID_HANDLE_VALUE) {
233 if (fh == INVALID_HANDLE_VALUE) {
234 PyErr_SetFromWindowsErrWithFilename(GetLastError(), path);
234 PyErr_SetFromWindowsErrWithFilename(GetLastError(), path);
235 goto error_file;
235 goto error_file;
236 }
236 }
237
237
238 list = PyList_New(0);
238 list = PyList_New(0);
239 if (!list)
239 if (!list)
240 goto error_list;
240 goto error_list;
241
241
242 do {
242 do {
243 PyObject *item;
243 PyObject *item;
244
244
245 if (fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
245 if (fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
246 if (!strcmp(fd.cFileName, ".")
246 if (!strcmp(fd.cFileName, ".")
247 || !strcmp(fd.cFileName, ".."))
247 || !strcmp(fd.cFileName, ".."))
248 continue;
248 continue;
249
249
250 if (skip && !strcmp(fd.cFileName, skip)) {
250 if (skip && !strcmp(fd.cFileName, skip)) {
251 rval = PyList_New(0);
251 rval = PyList_New(0);
252 goto error;
252 goto error;
253 }
253 }
254 }
254 }
255
255
256 item = make_item(&fd, wantstat);
256 item = make_item(&fd, wantstat);
257 if (!item)
257 if (!item)
258 goto error;
258 goto error;
259
259
260 if (PyList_Append(list, item)) {
260 if (PyList_Append(list, item)) {
261 Py_XDECREF(item);
261 Py_XDECREF(item);
262 goto error;
262 goto error;
263 }
263 }
264
264
265 Py_XDECREF(item);
265 Py_XDECREF(item);
266 } while (FindNextFileA(fh, &fd));
266 } while (FindNextFileA(fh, &fd));
267
267
268 if (GetLastError() != ERROR_NO_MORE_FILES) {
268 if (GetLastError() != ERROR_NO_MORE_FILES) {
269 PyErr_SetFromWindowsErrWithFilename(GetLastError(), path);
269 PyErr_SetFromWindowsErrWithFilename(GetLastError(), path);
270 goto error;
270 goto error;
271 }
271 }
272
272
273 rval = list;
273 rval = list;
274 Py_XINCREF(rval);
274 Py_XINCREF(rval);
275 error:
275 error:
276 Py_XDECREF(list);
276 Py_XDECREF(list);
277 error_list:
277 error_list:
278 FindClose(fh);
278 FindClose(fh);
279 error_file:
279 error_file:
280 PyMem_Free(pattern);
280 PyMem_Free(pattern);
281 error_nomem:
281 error_nomem:
282 return rval;
282 return rval;
283 }
283 }
284
284
285 #else
285 #else
286
286
287 int entkind(struct dirent *ent)
287 int entkind(struct dirent *ent)
288 {
288 {
289 #ifdef DT_REG
289 #ifdef DT_REG
290 switch (ent->d_type) {
290 switch (ent->d_type) {
291 case DT_REG: return S_IFREG;
291 case DT_REG: return S_IFREG;
292 case DT_DIR: return S_IFDIR;
292 case DT_DIR: return S_IFDIR;
293 case DT_LNK: return S_IFLNK;
293 case DT_LNK: return S_IFLNK;
294 case DT_BLK: return S_IFBLK;
294 case DT_BLK: return S_IFBLK;
295 case DT_CHR: return S_IFCHR;
295 case DT_CHR: return S_IFCHR;
296 case DT_FIFO: return S_IFIFO;
296 case DT_FIFO: return S_IFIFO;
297 case DT_SOCK: return S_IFSOCK;
297 case DT_SOCK: return S_IFSOCK;
298 }
298 }
299 #endif
299 #endif
300 return -1;
300 return -1;
301 }
301 }
302
302
303 static PyObject *makestat(const struct stat *st)
303 static PyObject *makestat(const struct stat *st)
304 {
304 {
305 PyObject *stat;
305 PyObject *stat;
306
306
307 stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
307 stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
308 if (stat)
308 if (stat)
309 memcpy(&((struct listdir_stat *)stat)->st, st, sizeof(*st));
309 memcpy(&((struct listdir_stat *)stat)->st, st, sizeof(*st));
310 return stat;
310 return stat;
311 }
311 }
312
312
313 static PyObject *_listdir_stat(char *path, int pathlen, int keepstat,
313 static PyObject *_listdir_stat(char *path, int pathlen, int keepstat,
314 char *skip)
314 char *skip)
315 {
315 {
316 PyObject *list, *elem, *stat = NULL, *ret = NULL;
316 PyObject *list, *elem, *stat = NULL, *ret = NULL;
317 char fullpath[PATH_MAX + 10];
317 char fullpath[PATH_MAX + 10];
318 int kind, err;
318 int kind, err;
319 struct stat st;
319 struct stat st;
320 struct dirent *ent;
320 struct dirent *ent;
321 DIR *dir;
321 DIR *dir;
322 #ifdef AT_SYMLINK_NOFOLLOW
322 #ifdef AT_SYMLINK_NOFOLLOW
323 int dfd = -1;
323 int dfd = -1;
324 #endif
324 #endif
325
325
326 if (pathlen >= PATH_MAX) {
326 if (pathlen >= PATH_MAX) {
327 errno = ENAMETOOLONG;
327 errno = ENAMETOOLONG;
328 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
328 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
329 goto error_value;
329 goto error_value;
330 }
330 }
331 strncpy(fullpath, path, PATH_MAX);
331 strncpy(fullpath, path, PATH_MAX);
332 fullpath[pathlen] = '/';
332 fullpath[pathlen] = '/';
333
333
334 #ifdef AT_SYMLINK_NOFOLLOW
334 #ifdef AT_SYMLINK_NOFOLLOW
335 dfd = open(path, O_RDONLY);
335 dfd = open(path, O_RDONLY);
336 if (dfd == -1) {
336 if (dfd == -1) {
337 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
337 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
338 goto error_value;
338 goto error_value;
339 }
339 }
340 dir = fdopendir(dfd);
340 dir = fdopendir(dfd);
341 #else
341 #else
342 dir = opendir(path);
342 dir = opendir(path);
343 #endif
343 #endif
344 if (!dir) {
344 if (!dir) {
345 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
345 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
346 goto error_dir;
346 goto error_dir;
347 }
347 }
348
348
349 list = PyList_New(0);
349 list = PyList_New(0);
350 if (!list)
350 if (!list)
351 goto error_list;
351 goto error_list;
352
352
353 while ((ent = readdir(dir))) {
353 while ((ent = readdir(dir))) {
354 if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
354 if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
355 continue;
355 continue;
356
356
357 kind = entkind(ent);
357 kind = entkind(ent);
358 if (kind == -1 || keepstat) {
358 if (kind == -1 || keepstat) {
359 #ifdef AT_SYMLINK_NOFOLLOW
359 #ifdef AT_SYMLINK_NOFOLLOW
360 err = fstatat(dfd, ent->d_name, &st,
360 err = fstatat(dfd, ent->d_name, &st,
361 AT_SYMLINK_NOFOLLOW);
361 AT_SYMLINK_NOFOLLOW);
362 #else
362 #else
363 strncpy(fullpath + pathlen + 1, ent->d_name,
363 strncpy(fullpath + pathlen + 1, ent->d_name,
364 PATH_MAX - pathlen);
364 PATH_MAX - pathlen);
365 fullpath[PATH_MAX] = '\0';
365 fullpath[PATH_MAX] = '\0';
366 err = lstat(fullpath, &st);
366 err = lstat(fullpath, &st);
367 #endif
367 #endif
368 if (err == -1) {
368 if (err == -1) {
369 /* race with file deletion? */
369 /* race with file deletion? */
370 if (errno == ENOENT)
370 if (errno == ENOENT)
371 continue;
371 continue;
372 strncpy(fullpath + pathlen + 1, ent->d_name,
372 strncpy(fullpath + pathlen + 1, ent->d_name,
373 PATH_MAX - pathlen);
373 PATH_MAX - pathlen);
374 fullpath[PATH_MAX] = 0;
374 fullpath[PATH_MAX] = 0;
375 PyErr_SetFromErrnoWithFilename(PyExc_OSError,
375 PyErr_SetFromErrnoWithFilename(PyExc_OSError,
376 fullpath);
376 fullpath);
377 goto error;
377 goto error;
378 }
378 }
379 kind = st.st_mode & S_IFMT;
379 kind = st.st_mode & S_IFMT;
380 }
380 }
381
381
382 /* quit early? */
382 /* quit early? */
383 if (skip && kind == S_IFDIR && !strcmp(ent->d_name, skip)) {
383 if (skip && kind == S_IFDIR && !strcmp(ent->d_name, skip)) {
384 ret = PyList_New(0);
384 ret = PyList_New(0);
385 goto error;
385 goto error;
386 }
386 }
387
387
388 if (keepstat) {
388 if (keepstat) {
389 stat = makestat(&st);
389 stat = makestat(&st);
390 if (!stat)
390 if (!stat)
391 goto error;
391 goto error;
392 elem = Py_BuildValue("siN", ent->d_name, kind, stat);
392 elem = Py_BuildValue("siN", ent->d_name, kind, stat);
393 } else
393 } else
394 elem = Py_BuildValue("si", ent->d_name, kind);
394 elem = Py_BuildValue("si", ent->d_name, kind);
395 if (!elem)
395 if (!elem)
396 goto error;
396 goto error;
397 stat = NULL;
397 stat = NULL;
398
398
399 PyList_Append(list, elem);
399 PyList_Append(list, elem);
400 Py_DECREF(elem);
400 Py_DECREF(elem);
401 }
401 }
402
402
403 ret = list;
403 ret = list;
404 Py_INCREF(ret);
404 Py_INCREF(ret);
405
405
406 error:
406 error:
407 Py_DECREF(list);
407 Py_DECREF(list);
408 Py_XDECREF(stat);
408 Py_XDECREF(stat);
409 error_list:
409 error_list:
410 closedir(dir);
410 closedir(dir);
411 /* closedir also closes its dirfd */
411 /* closedir also closes its dirfd */
412 goto error_value;
412 goto error_value;
413 error_dir:
413 error_dir:
414 #ifdef AT_SYMLINK_NOFOLLOW
414 #ifdef AT_SYMLINK_NOFOLLOW
415 close(dfd);
415 close(dfd);
416 #endif
416 #endif
417 error_value:
417 error_value:
418 return ret;
418 return ret;
419 }
419 }
420
420
421 #ifdef __APPLE__
421 #ifdef __APPLE__
422
422
423 typedef struct {
423 typedef struct {
424 u_int32_t length;
424 u_int32_t length;
425 attrreference_t name;
425 attrreference_t name;
426 fsobj_type_t obj_type;
426 fsobj_type_t obj_type;
427 struct timespec mtime;
427 struct timespec mtime;
428 #if __LITTLE_ENDIAN__
428 #if __LITTLE_ENDIAN__
429 mode_t access_mask;
429 mode_t access_mask;
430 uint16_t padding;
430 uint16_t padding;
431 #else
431 #else
432 uint16_t padding;
432 uint16_t padding;
433 mode_t access_mask;
433 mode_t access_mask;
434 #endif
434 #endif
435 off_t size;
435 off_t size;
436 } __attribute__((packed)) attrbuf_entry;
436 } __attribute__((packed)) attrbuf_entry;
437
437
438 int attrkind(attrbuf_entry *entry)
438 int attrkind(attrbuf_entry *entry)
439 {
439 {
440 switch (entry->obj_type) {
440 switch (entry->obj_type) {
441 case VREG: return S_IFREG;
441 case VREG: return S_IFREG;
442 case VDIR: return S_IFDIR;
442 case VDIR: return S_IFDIR;
443 case VLNK: return S_IFLNK;
443 case VLNK: return S_IFLNK;
444 case VBLK: return S_IFBLK;
444 case VBLK: return S_IFBLK;
445 case VCHR: return S_IFCHR;
445 case VCHR: return S_IFCHR;
446 case VFIFO: return S_IFIFO;
446 case VFIFO: return S_IFIFO;
447 case VSOCK: return S_IFSOCK;
447 case VSOCK: return S_IFSOCK;
448 }
448 }
449 return -1;
449 return -1;
450 }
450 }
451
451
452 /* get these many entries at a time */
452 /* get these many entries at a time */
453 #define LISTDIR_BATCH_SIZE 50
453 #define LISTDIR_BATCH_SIZE 50
454
454
455 static PyObject *_listdir_batch(char *path, int pathlen, int keepstat,
455 static PyObject *_listdir_batch(char *path, int pathlen, int keepstat,
456 char *skip, bool *fallback)
456 char *skip, bool *fallback)
457 {
457 {
458 PyObject *list, *elem, *stat = NULL, *ret = NULL;
458 PyObject *list, *elem, *stat = NULL, *ret = NULL;
459 int kind, err;
459 int kind, err;
460 unsigned long index;
460 unsigned long index;
461 unsigned int count, old_state, new_state;
461 unsigned int count, old_state, new_state;
462 bool state_seen = false;
462 bool state_seen = false;
463 attrbuf_entry *entry;
463 attrbuf_entry *entry;
464 /* from the getattrlist(2) man page: a path can be no longer than
464 /* from the getattrlist(2) man page: a path can be no longer than
465 (NAME_MAX * 3 + 1) bytes. Also, "The getattrlist() function will
465 (NAME_MAX * 3 + 1) bytes. Also, "The getattrlist() function will
466 silently truncate attribute data if attrBufSize is too small." So
466 silently truncate attribute data if attrBufSize is too small." So
467 pass in a buffer big enough for the worst case. */
467 pass in a buffer big enough for the worst case. */
468 char attrbuf[LISTDIR_BATCH_SIZE * (sizeof(attrbuf_entry) + NAME_MAX * 3 + 1)];
468 char attrbuf[LISTDIR_BATCH_SIZE * (sizeof(attrbuf_entry) + NAME_MAX * 3 + 1)];
469 unsigned int basep_unused;
469 unsigned int basep_unused;
470
470
471 struct stat st;
471 struct stat st;
472 int dfd = -1;
472 int dfd = -1;
473
473
474 /* these must match the attrbuf_entry struct, otherwise you'll end up
474 /* these must match the attrbuf_entry struct, otherwise you'll end up
475 with garbage */
475 with garbage */
476 struct attrlist requested_attr = {0};
476 struct attrlist requested_attr = {0};
477 requested_attr.bitmapcount = ATTR_BIT_MAP_COUNT;
477 requested_attr.bitmapcount = ATTR_BIT_MAP_COUNT;
478 requested_attr.commonattr = (ATTR_CMN_NAME | ATTR_CMN_OBJTYPE |
478 requested_attr.commonattr = (ATTR_CMN_NAME | ATTR_CMN_OBJTYPE |
479 ATTR_CMN_MODTIME | ATTR_CMN_ACCESSMASK);
479 ATTR_CMN_MODTIME | ATTR_CMN_ACCESSMASK);
480 requested_attr.fileattr = ATTR_FILE_DATALENGTH;
480 requested_attr.fileattr = ATTR_FILE_DATALENGTH;
481
481
482 *fallback = false;
482 *fallback = false;
483
483
484 if (pathlen >= PATH_MAX) {
484 if (pathlen >= PATH_MAX) {
485 errno = ENAMETOOLONG;
485 errno = ENAMETOOLONG;
486 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
486 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
487 goto error_value;
487 goto error_value;
488 }
488 }
489
489
490 dfd = open(path, O_RDONLY);
490 dfd = open(path, O_RDONLY);
491 if (dfd == -1) {
491 if (dfd == -1) {
492 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
492 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
493 goto error_value;
493 goto error_value;
494 }
494 }
495
495
496 list = PyList_New(0);
496 list = PyList_New(0);
497 if (!list)
497 if (!list)
498 goto error_dir;
498 goto error_dir;
499
499
500 do {
500 do {
501 count = LISTDIR_BATCH_SIZE;
501 count = LISTDIR_BATCH_SIZE;
502 err = getdirentriesattr(dfd, &requested_attr, &attrbuf,
502 err = getdirentriesattr(dfd, &requested_attr, &attrbuf,
503 sizeof(attrbuf), &count, &basep_unused,
503 sizeof(attrbuf), &count, &basep_unused,
504 &new_state, 0);
504 &new_state, 0);
505 if (err < 0) {
505 if (err < 0) {
506 if (errno == ENOTSUP) {
506 if (errno == ENOTSUP) {
507 /* We're on a filesystem that doesn't support
507 /* We're on a filesystem that doesn't support
508 getdirentriesattr. Fall back to the
508 getdirentriesattr. Fall back to the
509 stat-based implementation. */
509 stat-based implementation. */
510 *fallback = true;
510 *fallback = true;
511 } else
511 } else
512 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
512 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
513 goto error;
513 goto error;
514 }
514 }
515
515
516 if (!state_seen) {
516 if (!state_seen) {
517 old_state = new_state;
517 old_state = new_state;
518 state_seen = true;
518 state_seen = true;
519 } else if (old_state != new_state) {
519 } else if (old_state != new_state) {
520 /* There's an edge case with getdirentriesattr. Consider
520 /* There's an edge case with getdirentriesattr. Consider
521 the following initial list of files:
521 the following initial list of files:
522
522
523 a
523 a
524 b
524 b
525 <--
525 <--
526 c
526 c
527 d
527 d
528
528
529 If the iteration is paused at the arrow, and b is
529 If the iteration is paused at the arrow, and b is
530 deleted before it is resumed, getdirentriesattr will
530 deleted before it is resumed, getdirentriesattr will
531 not return d at all! Ordinarily we're expected to
531 not return d at all! Ordinarily we're expected to
532 restart the iteration from the beginning. To avoid
532 restart the iteration from the beginning. To avoid
533 getting stuck in a retry loop here, fall back to
533 getting stuck in a retry loop here, fall back to
534 stat. */
534 stat. */
535 *fallback = true;
535 *fallback = true;
536 goto error;
536 goto error;
537 }
537 }
538
538
539 entry = (attrbuf_entry *)attrbuf;
539 entry = (attrbuf_entry *)attrbuf;
540
540
541 for (index = 0; index < count; index++) {
541 for (index = 0; index < count; index++) {
542 char *filename = ((char *)&entry->name) +
542 char *filename = ((char *)&entry->name) +
543 entry->name.attr_dataoffset;
543 entry->name.attr_dataoffset;
544
544
545 if (!strcmp(filename, ".") || !strcmp(filename, ".."))
545 if (!strcmp(filename, ".") || !strcmp(filename, ".."))
546 continue;
546 continue;
547
547
548 kind = attrkind(entry);
548 kind = attrkind(entry);
549 if (kind == -1) {
549 if (kind == -1) {
550 PyErr_Format(PyExc_OSError,
550 PyErr_Format(PyExc_OSError,
551 "unknown object type %u for file "
551 "unknown object type %u for file "
552 "%s%s!",
552 "%s%s!",
553 entry->obj_type, path, filename);
553 entry->obj_type, path, filename);
554 goto error;
554 goto error;
555 }
555 }
556
556
557 /* quit early? */
557 /* quit early? */
558 if (skip && kind == S_IFDIR && !strcmp(filename, skip)) {
558 if (skip && kind == S_IFDIR && !strcmp(filename, skip)) {
559 ret = PyList_New(0);
559 ret = PyList_New(0);
560 goto error;
560 goto error;
561 }
561 }
562
562
563 if (keepstat) {
563 if (keepstat) {
564 /* from the getattrlist(2) man page: "Only the
564 /* from the getattrlist(2) man page: "Only the
565 permission bits ... are valid". */
565 permission bits ... are valid". */
566 st.st_mode = (entry->access_mask & ~S_IFMT) | kind;
566 st.st_mode = (entry->access_mask & ~S_IFMT) | kind;
567 st.st_mtime = entry->mtime.tv_sec;
567 st.st_mtime = entry->mtime.tv_sec;
568 st.st_size = entry->size;
568 st.st_size = entry->size;
569 stat = makestat(&st);
569 stat = makestat(&st);
570 if (!stat)
570 if (!stat)
571 goto error;
571 goto error;
572 elem = Py_BuildValue("siN", filename, kind, stat);
572 elem = Py_BuildValue("siN", filename, kind, stat);
573 } else
573 } else
574 elem = Py_BuildValue("si", filename, kind);
574 elem = Py_BuildValue("si", filename, kind);
575 if (!elem)
575 if (!elem)
576 goto error;
576 goto error;
577 stat = NULL;
577 stat = NULL;
578
578
579 PyList_Append(list, elem);
579 PyList_Append(list, elem);
580 Py_DECREF(elem);
580 Py_DECREF(elem);
581
581
582 entry = (attrbuf_entry *)((char *)entry + entry->length);
582 entry = (attrbuf_entry *)((char *)entry + entry->length);
583 }
583 }
584 } while (err == 0);
584 } while (err == 0);
585
585
586 ret = list;
586 ret = list;
587 Py_INCREF(ret);
587 Py_INCREF(ret);
588
588
589 error:
589 error:
590 Py_DECREF(list);
590 Py_DECREF(list);
591 Py_XDECREF(stat);
591 Py_XDECREF(stat);
592 error_dir:
592 error_dir:
593 close(dfd);
593 close(dfd);
594 error_value:
594 error_value:
595 return ret;
595 return ret;
596 }
596 }
597
597
598 #endif /* __APPLE__ */
598 #endif /* __APPLE__ */
599
599
600 static PyObject *_listdir(char *path, int pathlen, int keepstat, char *skip)
600 static PyObject *_listdir(char *path, int pathlen, int keepstat, char *skip)
601 {
601 {
602 #ifdef __APPLE__
602 #ifdef __APPLE__
603 PyObject *ret;
603 PyObject *ret;
604 bool fallback = false;
604 bool fallback = false;
605
605
606 ret = _listdir_batch(path, pathlen, keepstat, skip, &fallback);
606 ret = _listdir_batch(path, pathlen, keepstat, skip, &fallback);
607 if (ret != NULL || !fallback)
607 if (ret != NULL || !fallback)
608 return ret;
608 return ret;
609 #endif
609 #endif
610 return _listdir_stat(path, pathlen, keepstat, skip);
610 return _listdir_stat(path, pathlen, keepstat, skip);
611 }
611 }
612
612
613 static PyObject *statfiles(PyObject *self, PyObject *args)
613 static PyObject *statfiles(PyObject *self, PyObject *args)
614 {
614 {
615 PyObject *names, *stats;
615 PyObject *names, *stats;
616 Py_ssize_t i, count;
616 Py_ssize_t i, count;
617
617
618 if (!PyArg_ParseTuple(args, "O:statfiles", &names))
618 if (!PyArg_ParseTuple(args, "O:statfiles", &names))
619 return NULL;
619 return NULL;
620
620
621 count = PySequence_Length(names);
621 count = PySequence_Length(names);
622 if (count == -1) {
622 if (count == -1) {
623 PyErr_SetString(PyExc_TypeError, "not a sequence");
623 PyErr_SetString(PyExc_TypeError, "not a sequence");
624 return NULL;
624 return NULL;
625 }
625 }
626
626
627 stats = PyList_New(count);
627 stats = PyList_New(count);
628 if (stats == NULL)
628 if (stats == NULL)
629 return NULL;
629 return NULL;
630
630
631 for (i = 0; i < count; i++) {
631 for (i = 0; i < count; i++) {
632 PyObject *stat, *pypath;
632 PyObject *stat, *pypath;
633 struct stat st;
633 struct stat st;
634 int ret, kind;
634 int ret, kind;
635 char *path;
635 char *path;
636
636
637 /* With a large file count or on a slow filesystem,
637 /* With a large file count or on a slow filesystem,
638 don't block signals for long (issue4878). */
638 don't block signals for long (issue4878). */
639 if ((i % 1000) == 999 && PyErr_CheckSignals() == -1)
639 if ((i % 1000) == 999 && PyErr_CheckSignals() == -1)
640 goto bail;
640 goto bail;
641
641
642 pypath = PySequence_GetItem(names, i);
642 pypath = PySequence_GetItem(names, i);
643 if (!pypath)
643 if (!pypath)
644 goto bail;
644 goto bail;
645 path = PyBytes_AsString(pypath);
645 path = PyBytes_AsString(pypath);
646 if (path == NULL) {
646 if (path == NULL) {
647 Py_DECREF(pypath);
647 Py_DECREF(pypath);
648 PyErr_SetString(PyExc_TypeError, "not a string");
648 PyErr_SetString(PyExc_TypeError, "not a string");
649 goto bail;
649 goto bail;
650 }
650 }
651 ret = lstat(path, &st);
651 ret = lstat(path, &st);
652 Py_DECREF(pypath);
652 Py_DECREF(pypath);
653 kind = st.st_mode & S_IFMT;
653 kind = st.st_mode & S_IFMT;
654 if (ret != -1 && (kind == S_IFREG || kind == S_IFLNK)) {
654 if (ret != -1 && (kind == S_IFREG || kind == S_IFLNK)) {
655 stat = makestat(&st);
655 stat = makestat(&st);
656 if (stat == NULL)
656 if (stat == NULL)
657 goto bail;
657 goto bail;
658 PyList_SET_ITEM(stats, i, stat);
658 PyList_SET_ITEM(stats, i, stat);
659 } else {
659 } else {
660 Py_INCREF(Py_None);
660 Py_INCREF(Py_None);
661 PyList_SET_ITEM(stats, i, Py_None);
661 PyList_SET_ITEM(stats, i, Py_None);
662 }
662 }
663 }
663 }
664
664
665 return stats;
665 return stats;
666
666
667 bail:
667 bail:
668 Py_DECREF(stats);
668 Py_DECREF(stats);
669 return NULL;
669 return NULL;
670 }
670 }
671
671
672 /*
672 /*
673 * recvfds() simply does not release GIL during blocking io operation because
673 * recvfds() simply does not release GIL during blocking io operation because
674 * command server is known to be single-threaded.
674 * command server is known to be single-threaded.
675 *
675 *
676 * Old systems such as Solaris don't provide CMSG_LEN, msg_control, etc.
676 * Old systems such as Solaris don't provide CMSG_LEN, msg_control, etc.
677 * Currently, recvfds() is not supported on these platforms.
677 * Currently, recvfds() is not supported on these platforms.
678 */
678 */
679 #ifdef CMSG_LEN
679 #ifdef CMSG_LEN
680
680
681 static ssize_t recvfdstobuf(int sockfd, int **rfds, void *cbuf, size_t cbufsize)
681 static ssize_t recvfdstobuf(int sockfd, int **rfds, void *cbuf, size_t cbufsize)
682 {
682 {
683 char dummy[1];
683 char dummy[1];
684 struct iovec iov = {dummy, sizeof(dummy)};
684 struct iovec iov = {dummy, sizeof(dummy)};
685 struct msghdr msgh = {0};
685 struct msghdr msgh = {0};
686 struct cmsghdr *cmsg;
686 struct cmsghdr *cmsg;
687
687
688 msgh.msg_iov = &iov;
688 msgh.msg_iov = &iov;
689 msgh.msg_iovlen = 1;
689 msgh.msg_iovlen = 1;
690 msgh.msg_control = cbuf;
690 msgh.msg_control = cbuf;
691 msgh.msg_controllen = (socklen_t)cbufsize;
691 msgh.msg_controllen = (socklen_t)cbufsize;
692 if (recvmsg(sockfd, &msgh, 0) < 0)
692 if (recvmsg(sockfd, &msgh, 0) < 0)
693 return -1;
693 return -1;
694
694
695 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg;
695 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg;
696 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
696 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
697 if (cmsg->cmsg_level != SOL_SOCKET ||
697 if (cmsg->cmsg_level != SOL_SOCKET ||
698 cmsg->cmsg_type != SCM_RIGHTS)
698 cmsg->cmsg_type != SCM_RIGHTS)
699 continue;
699 continue;
700 *rfds = (int *)CMSG_DATA(cmsg);
700 *rfds = (int *)CMSG_DATA(cmsg);
701 return (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
701 return (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
702 }
702 }
703
703
704 *rfds = cbuf;
704 *rfds = cbuf;
705 return 0;
705 return 0;
706 }
706 }
707
707
708 static PyObject *recvfds(PyObject *self, PyObject *args)
708 static PyObject *recvfds(PyObject *self, PyObject *args)
709 {
709 {
710 int sockfd;
710 int sockfd;
711 int *rfds = NULL;
711 int *rfds = NULL;
712 ssize_t rfdscount, i;
712 ssize_t rfdscount, i;
713 char cbuf[256];
713 char cbuf[256];
714 PyObject *rfdslist = NULL;
714 PyObject *rfdslist = NULL;
715
715
716 if (!PyArg_ParseTuple(args, "i", &sockfd))
716 if (!PyArg_ParseTuple(args, "i", &sockfd))
717 return NULL;
717 return NULL;
718
718
719 rfdscount = recvfdstobuf(sockfd, &rfds, cbuf, sizeof(cbuf));
719 rfdscount = recvfdstobuf(sockfd, &rfds, cbuf, sizeof(cbuf));
720 if (rfdscount < 0)
720 if (rfdscount < 0)
721 return PyErr_SetFromErrno(PyExc_OSError);
721 return PyErr_SetFromErrno(PyExc_OSError);
722
722
723 rfdslist = PyList_New(rfdscount);
723 rfdslist = PyList_New(rfdscount);
724 if (!rfdslist)
724 if (!rfdslist)
725 goto bail;
725 goto bail;
726 for (i = 0; i < rfdscount; i++) {
726 for (i = 0; i < rfdscount; i++) {
727 PyObject *obj = PyLong_FromLong(rfds[i]);
727 PyObject *obj = PyLong_FromLong(rfds[i]);
728 if (!obj)
728 if (!obj)
729 goto bail;
729 goto bail;
730 PyList_SET_ITEM(rfdslist, i, obj);
730 PyList_SET_ITEM(rfdslist, i, obj);
731 }
731 }
732 return rfdslist;
732 return rfdslist;
733
733
734 bail:
734 bail:
735 Py_XDECREF(rfdslist);
735 Py_XDECREF(rfdslist);
736 return NULL;
736 return NULL;
737 }
737 }
738
738
739 #endif /* CMSG_LEN */
739 #endif /* CMSG_LEN */
740
740
741 #if defined(HAVE_SETPROCTITLE)
741 #if defined(HAVE_SETPROCTITLE)
742 /* setproctitle is the first choice - available in FreeBSD */
742 /* setproctitle is the first choice - available in FreeBSD */
743 #define SETPROCNAME_USE_SETPROCTITLE
743 #define SETPROCNAME_USE_SETPROCTITLE
744 #elif (defined(__linux__) || defined(__APPLE__)) && PY_MAJOR_VERSION == 2
744 #elif (defined(__linux__) || defined(__APPLE__)) && PY_MAJOR_VERSION == 2
745 /* rewrite the argv buffer in place - works in Linux and OS X. Py_GetArgcArgv
745 /* rewrite the argv buffer in place - works in Linux and OS X. Py_GetArgcArgv
746 * in Python 3 returns the copied wchar_t **argv, thus unsupported. */
746 * in Python 3 returns the copied wchar_t **argv, thus unsupported. */
747 #define SETPROCNAME_USE_ARGVREWRITE
747 #define SETPROCNAME_USE_ARGVREWRITE
748 #else
748 #else
749 #define SETPROCNAME_USE_NONE
749 #define SETPROCNAME_USE_NONE
750 #endif
750 #endif
751
751
752 #ifndef SETPROCNAME_USE_NONE
752 #ifndef SETPROCNAME_USE_NONE
753 static PyObject *setprocname(PyObject *self, PyObject *args)
753 static PyObject *setprocname(PyObject *self, PyObject *args)
754 {
754 {
755 const char *name = NULL;
755 const char *name = NULL;
756 if (!PyArg_ParseTuple(args, "s", &name))
756 if (!PyArg_ParseTuple(args, "s", &name))
757 return NULL;
757 return NULL;
758
758
759 #if defined(SETPROCNAME_USE_SETPROCTITLE)
759 #if defined(SETPROCNAME_USE_SETPROCTITLE)
760 setproctitle("%s", name);
760 setproctitle("%s", name);
761 #elif defined(SETPROCNAME_USE_ARGVREWRITE)
761 #elif defined(SETPROCNAME_USE_ARGVREWRITE)
762 {
762 {
763 static char *argvstart = NULL;
763 static char *argvstart = NULL;
764 static size_t argvsize = 0;
764 static size_t argvsize = 0;
765 if (argvstart == NULL) {
765 if (argvstart == NULL) {
766 int argc = 0, i;
766 int argc = 0, i;
767 char **argv = NULL;
767 char **argv = NULL;
768 char *argvend;
768 char *argvend;
769 extern void Py_GetArgcArgv(int *argc, char ***argv);
769 extern void Py_GetArgcArgv(int *argc, char ***argv);
770 Py_GetArgcArgv(&argc, &argv);
770 Py_GetArgcArgv(&argc, &argv);
771
771
772 /* Check the memory we can use. Typically, argv[i] and
772 /* Check the memory we can use. Typically, argv[i] and
773 * argv[i + 1] are continuous. */
773 * argv[i + 1] are continuous. */
774 argvend = argvstart = argv[0];
774 argvend = argvstart = argv[0];
775 for (i = 0; i < argc; ++i) {
775 for (i = 0; i < argc; ++i) {
776 if (argv[i] > argvend || argv[i] < argvstart)
776 if (argv[i] > argvend || argv[i] < argvstart)
777 break; /* not continuous */
777 break; /* not continuous */
778 size_t len = strlen(argv[i]);
778 size_t len = strlen(argv[i]);
779 argvend = argv[i] + len + 1 /* '\0' */;
779 argvend = argv[i] + len + 1 /* '\0' */;
780 }
780 }
781 if (argvend > argvstart) /* sanity check */
781 if (argvend > argvstart) /* sanity check */
782 argvsize = argvend - argvstart;
782 argvsize = argvend - argvstart;
783 }
783 }
784
784
785 if (argvstart && argvsize > 1) {
785 if (argvstart && argvsize > 1) {
786 int n = snprintf(argvstart, argvsize, "%s", name);
786 int n = snprintf(argvstart, argvsize, "%s", name);
787 if (n >= 0 && (size_t)n < argvsize)
787 if (n >= 0 && (size_t)n < argvsize)
788 memset(argvstart + n, 0, argvsize - n);
788 memset(argvstart + n, 0, argvsize - n);
789 }
789 }
790 }
790 }
791 #endif
791 #endif
792
792
793 Py_RETURN_NONE;
793 Py_RETURN_NONE;
794 }
794 }
795 #endif /* ndef SETPROCNAME_USE_NONE */
795 #endif /* ndef SETPROCNAME_USE_NONE */
796
796
797 #if defined(HAVE_BSD_STATFS)
797 #if defined(HAVE_BSD_STATFS)
798 static const char *describefstype(const struct statfs *pbuf)
798 static const char *describefstype(const struct statfs *pbuf)
799 {
799 {
800 /* BSD or OSX provides a f_fstypename field */
800 /* BSD or OSX provides a f_fstypename field */
801 return pbuf->f_fstypename;
801 return pbuf->f_fstypename;
802 }
802 }
803 #elif defined(HAVE_LINUX_STATFS)
803 #elif defined(HAVE_LINUX_STATFS)
804 static const char *describefstype(const struct statfs *pbuf)
804 static const char *describefstype(const struct statfs *pbuf)
805 {
805 {
806 /* Begin of Linux filesystems */
806 /* Begin of Linux filesystems */
807 #ifdef ADFS_SUPER_MAGIC
807 #ifdef ADFS_SUPER_MAGIC
808 if (pbuf->f_type == ADFS_SUPER_MAGIC)
808 if (pbuf->f_type == ADFS_SUPER_MAGIC)
809 return "adfs";
809 return "adfs";
810 #endif
810 #endif
811 #ifdef AFFS_SUPER_MAGIC
811 #ifdef AFFS_SUPER_MAGIC
812 if (pbuf->f_type == AFFS_SUPER_MAGIC)
812 if (pbuf->f_type == AFFS_SUPER_MAGIC)
813 return "affs";
813 return "affs";
814 #endif
814 #endif
815 #ifdef AUTOFS_SUPER_MAGIC
815 #ifdef AUTOFS_SUPER_MAGIC
816 if (pbuf->f_type == AUTOFS_SUPER_MAGIC)
816 if (pbuf->f_type == AUTOFS_SUPER_MAGIC)
817 return "autofs";
817 return "autofs";
818 #endif
818 #endif
819 #ifdef BDEVFS_MAGIC
819 #ifdef BDEVFS_MAGIC
820 if (pbuf->f_type == BDEVFS_MAGIC)
820 if (pbuf->f_type == BDEVFS_MAGIC)
821 return "bdevfs";
821 return "bdevfs";
822 #endif
822 #endif
823 #ifdef BEFS_SUPER_MAGIC
823 #ifdef BEFS_SUPER_MAGIC
824 if (pbuf->f_type == BEFS_SUPER_MAGIC)
824 if (pbuf->f_type == BEFS_SUPER_MAGIC)
825 return "befs";
825 return "befs";
826 #endif
826 #endif
827 #ifdef BFS_MAGIC
827 #ifdef BFS_MAGIC
828 if (pbuf->f_type == BFS_MAGIC)
828 if (pbuf->f_type == BFS_MAGIC)
829 return "bfs";
829 return "bfs";
830 #endif
830 #endif
831 #ifdef BINFMTFS_MAGIC
831 #ifdef BINFMTFS_MAGIC
832 if (pbuf->f_type == BINFMTFS_MAGIC)
832 if (pbuf->f_type == BINFMTFS_MAGIC)
833 return "binfmtfs";
833 return "binfmtfs";
834 #endif
834 #endif
835 #ifdef BTRFS_SUPER_MAGIC
835 #ifdef BTRFS_SUPER_MAGIC
836 if (pbuf->f_type == BTRFS_SUPER_MAGIC)
836 if (pbuf->f_type == BTRFS_SUPER_MAGIC)
837 return "btrfs";
837 return "btrfs";
838 #endif
838 #endif
839 #ifdef CGROUP_SUPER_MAGIC
839 #ifdef CGROUP_SUPER_MAGIC
840 if (pbuf->f_type == CGROUP_SUPER_MAGIC)
840 if (pbuf->f_type == CGROUP_SUPER_MAGIC)
841 return "cgroup";
841 return "cgroup";
842 #endif
842 #endif
843 #ifdef CIFS_MAGIC_NUMBER
843 #ifdef CIFS_MAGIC_NUMBER
844 if (pbuf->f_type == CIFS_MAGIC_NUMBER)
844 if (pbuf->f_type == CIFS_MAGIC_NUMBER)
845 return "cifs";
845 return "cifs";
846 #endif
846 #endif
847 #ifdef CODA_SUPER_MAGIC
847 #ifdef CODA_SUPER_MAGIC
848 if (pbuf->f_type == CODA_SUPER_MAGIC)
848 if (pbuf->f_type == CODA_SUPER_MAGIC)
849 return "coda";
849 return "coda";
850 #endif
850 #endif
851 #ifdef COH_SUPER_MAGIC
851 #ifdef COH_SUPER_MAGIC
852 if (pbuf->f_type == COH_SUPER_MAGIC)
852 if (pbuf->f_type == COH_SUPER_MAGIC)
853 return "coh";
853 return "coh";
854 #endif
854 #endif
855 #ifdef CRAMFS_MAGIC
855 #ifdef CRAMFS_MAGIC
856 if (pbuf->f_type == CRAMFS_MAGIC)
856 if (pbuf->f_type == CRAMFS_MAGIC)
857 return "cramfs";
857 return "cramfs";
858 #endif
858 #endif
859 #ifdef DEBUGFS_MAGIC
859 #ifdef DEBUGFS_MAGIC
860 if (pbuf->f_type == DEBUGFS_MAGIC)
860 if (pbuf->f_type == DEBUGFS_MAGIC)
861 return "debugfs";
861 return "debugfs";
862 #endif
862 #endif
863 #ifdef DEVFS_SUPER_MAGIC
863 #ifdef DEVFS_SUPER_MAGIC
864 if (pbuf->f_type == DEVFS_SUPER_MAGIC)
864 if (pbuf->f_type == DEVFS_SUPER_MAGIC)
865 return "devfs";
865 return "devfs";
866 #endif
866 #endif
867 #ifdef DEVPTS_SUPER_MAGIC
867 #ifdef DEVPTS_SUPER_MAGIC
868 if (pbuf->f_type == DEVPTS_SUPER_MAGIC)
868 if (pbuf->f_type == DEVPTS_SUPER_MAGIC)
869 return "devpts";
869 return "devpts";
870 #endif
870 #endif
871 #ifdef EFIVARFS_MAGIC
871 #ifdef EFIVARFS_MAGIC
872 if (pbuf->f_type == EFIVARFS_MAGIC)
872 if (pbuf->f_type == EFIVARFS_MAGIC)
873 return "efivarfs";
873 return "efivarfs";
874 #endif
874 #endif
875 #ifdef EFS_SUPER_MAGIC
875 #ifdef EFS_SUPER_MAGIC
876 if (pbuf->f_type == EFS_SUPER_MAGIC)
876 if (pbuf->f_type == EFS_SUPER_MAGIC)
877 return "efs";
877 return "efs";
878 #endif
878 #endif
879 #ifdef EXT_SUPER_MAGIC
879 #ifdef EXT_SUPER_MAGIC
880 if (pbuf->f_type == EXT_SUPER_MAGIC)
880 if (pbuf->f_type == EXT_SUPER_MAGIC)
881 return "ext";
881 return "ext";
882 #endif
882 #endif
883 #ifdef EXT2_OLD_SUPER_MAGIC
883 #ifdef EXT2_OLD_SUPER_MAGIC
884 if (pbuf->f_type == EXT2_OLD_SUPER_MAGIC)
884 if (pbuf->f_type == EXT2_OLD_SUPER_MAGIC)
885 return "ext2";
885 return "ext2";
886 #endif
886 #endif
887 #ifdef EXT2_SUPER_MAGIC
887 #ifdef EXT2_SUPER_MAGIC
888 if (pbuf->f_type == EXT2_SUPER_MAGIC)
888 if (pbuf->f_type == EXT2_SUPER_MAGIC)
889 return "ext2";
889 return "ext2";
890 #endif
890 #endif
891 #ifdef EXT3_SUPER_MAGIC
891 #ifdef EXT3_SUPER_MAGIC
892 if (pbuf->f_type == EXT3_SUPER_MAGIC)
892 if (pbuf->f_type == EXT3_SUPER_MAGIC)
893 return "ext3";
893 return "ext3";
894 #endif
894 #endif
895 #ifdef EXT4_SUPER_MAGIC
895 #ifdef EXT4_SUPER_MAGIC
896 if (pbuf->f_type == EXT4_SUPER_MAGIC)
896 if (pbuf->f_type == EXT4_SUPER_MAGIC)
897 return "ext4";
897 return "ext4";
898 #endif
898 #endif
899 #ifdef F2FS_SUPER_MAGIC
899 #ifdef F2FS_SUPER_MAGIC
900 if (pbuf->f_type == F2FS_SUPER_MAGIC)
900 if (pbuf->f_type == F2FS_SUPER_MAGIC)
901 return "f2fs";
901 return "f2fs";
902 #endif
902 #endif
903 #ifdef FUSE_SUPER_MAGIC
903 #ifdef FUSE_SUPER_MAGIC
904 if (pbuf->f_type == FUSE_SUPER_MAGIC)
904 if (pbuf->f_type == FUSE_SUPER_MAGIC)
905 return "fuse";
905 return "fuse";
906 #endif
906 #endif
907 #ifdef FUTEXFS_SUPER_MAGIC
907 #ifdef FUTEXFS_SUPER_MAGIC
908 if (pbuf->f_type == FUTEXFS_SUPER_MAGIC)
908 if (pbuf->f_type == FUTEXFS_SUPER_MAGIC)
909 return "futexfs";
909 return "futexfs";
910 #endif
910 #endif
911 #ifdef HFS_SUPER_MAGIC
911 #ifdef HFS_SUPER_MAGIC
912 if (pbuf->f_type == HFS_SUPER_MAGIC)
912 if (pbuf->f_type == HFS_SUPER_MAGIC)
913 return "hfs";
913 return "hfs";
914 #endif
914 #endif
915 #ifdef HOSTFS_SUPER_MAGIC
915 #ifdef HOSTFS_SUPER_MAGIC
916 if (pbuf->f_type == HOSTFS_SUPER_MAGIC)
916 if (pbuf->f_type == HOSTFS_SUPER_MAGIC)
917 return "hostfs";
917 return "hostfs";
918 #endif
918 #endif
919 #ifdef HPFS_SUPER_MAGIC
919 #ifdef HPFS_SUPER_MAGIC
920 if (pbuf->f_type == HPFS_SUPER_MAGIC)
920 if (pbuf->f_type == HPFS_SUPER_MAGIC)
921 return "hpfs";
921 return "hpfs";
922 #endif
922 #endif
923 #ifdef HUGETLBFS_MAGIC
923 #ifdef HUGETLBFS_MAGIC
924 if (pbuf->f_type == HUGETLBFS_MAGIC)
924 if (pbuf->f_type == HUGETLBFS_MAGIC)
925 return "hugetlbfs";
925 return "hugetlbfs";
926 #endif
926 #endif
927 #ifdef ISOFS_SUPER_MAGIC
927 #ifdef ISOFS_SUPER_MAGIC
928 if (pbuf->f_type == ISOFS_SUPER_MAGIC)
928 if (pbuf->f_type == ISOFS_SUPER_MAGIC)
929 return "isofs";
929 return "isofs";
930 #endif
930 #endif
931 #ifdef JFFS2_SUPER_MAGIC
931 #ifdef JFFS2_SUPER_MAGIC
932 if (pbuf->f_type == JFFS2_SUPER_MAGIC)
932 if (pbuf->f_type == JFFS2_SUPER_MAGIC)
933 return "jffs2";
933 return "jffs2";
934 #endif
934 #endif
935 #ifdef JFS_SUPER_MAGIC
935 #ifdef JFS_SUPER_MAGIC
936 if (pbuf->f_type == JFS_SUPER_MAGIC)
936 if (pbuf->f_type == JFS_SUPER_MAGIC)
937 return "jfs";
937 return "jfs";
938 #endif
938 #endif
939 #ifdef MINIX_SUPER_MAGIC
939 #ifdef MINIX_SUPER_MAGIC
940 if (pbuf->f_type == MINIX_SUPER_MAGIC)
940 if (pbuf->f_type == MINIX_SUPER_MAGIC)
941 return "minix";
941 return "minix";
942 #endif
942 #endif
943 #ifdef MINIX2_SUPER_MAGIC
943 #ifdef MINIX2_SUPER_MAGIC
944 if (pbuf->f_type == MINIX2_SUPER_MAGIC)
944 if (pbuf->f_type == MINIX2_SUPER_MAGIC)
945 return "minix2";
945 return "minix2";
946 #endif
946 #endif
947 #ifdef MINIX3_SUPER_MAGIC
947 #ifdef MINIX3_SUPER_MAGIC
948 if (pbuf->f_type == MINIX3_SUPER_MAGIC)
948 if (pbuf->f_type == MINIX3_SUPER_MAGIC)
949 return "minix3";
949 return "minix3";
950 #endif
950 #endif
951 #ifdef MQUEUE_MAGIC
951 #ifdef MQUEUE_MAGIC
952 if (pbuf->f_type == MQUEUE_MAGIC)
952 if (pbuf->f_type == MQUEUE_MAGIC)
953 return "mqueue";
953 return "mqueue";
954 #endif
954 #endif
955 #ifdef MSDOS_SUPER_MAGIC
955 #ifdef MSDOS_SUPER_MAGIC
956 if (pbuf->f_type == MSDOS_SUPER_MAGIC)
956 if (pbuf->f_type == MSDOS_SUPER_MAGIC)
957 return "msdos";
957 return "msdos";
958 #endif
958 #endif
959 #ifdef NCP_SUPER_MAGIC
959 #ifdef NCP_SUPER_MAGIC
960 if (pbuf->f_type == NCP_SUPER_MAGIC)
960 if (pbuf->f_type == NCP_SUPER_MAGIC)
961 return "ncp";
961 return "ncp";
962 #endif
962 #endif
963 #ifdef NFS_SUPER_MAGIC
963 #ifdef NFS_SUPER_MAGIC
964 if (pbuf->f_type == NFS_SUPER_MAGIC)
964 if (pbuf->f_type == NFS_SUPER_MAGIC)
965 return "nfs";
965 return "nfs";
966 #endif
966 #endif
967 #ifdef NILFS_SUPER_MAGIC
967 #ifdef NILFS_SUPER_MAGIC
968 if (pbuf->f_type == NILFS_SUPER_MAGIC)
968 if (pbuf->f_type == NILFS_SUPER_MAGIC)
969 return "nilfs";
969 return "nilfs";
970 #endif
970 #endif
971 #ifdef NTFS_SB_MAGIC
971 #ifdef NTFS_SB_MAGIC
972 if (pbuf->f_type == NTFS_SB_MAGIC)
972 if (pbuf->f_type == NTFS_SB_MAGIC)
973 return "ntfs-sb";
973 return "ntfs-sb";
974 #endif
974 #endif
975 #ifdef OCFS2_SUPER_MAGIC
975 #ifdef OCFS2_SUPER_MAGIC
976 if (pbuf->f_type == OCFS2_SUPER_MAGIC)
976 if (pbuf->f_type == OCFS2_SUPER_MAGIC)
977 return "ocfs2";
977 return "ocfs2";
978 #endif
978 #endif
979 #ifdef OPENPROM_SUPER_MAGIC
979 #ifdef OPENPROM_SUPER_MAGIC
980 if (pbuf->f_type == OPENPROM_SUPER_MAGIC)
980 if (pbuf->f_type == OPENPROM_SUPER_MAGIC)
981 return "openprom";
981 return "openprom";
982 #endif
982 #endif
983 #ifdef OVERLAYFS_SUPER_MAGIC
983 #ifdef OVERLAYFS_SUPER_MAGIC
984 if (pbuf->f_type == OVERLAYFS_SUPER_MAGIC)
984 if (pbuf->f_type == OVERLAYFS_SUPER_MAGIC)
985 return "overlay";
985 return "overlay";
986 #endif
986 #endif
987 #ifdef PIPEFS_MAGIC
987 #ifdef PIPEFS_MAGIC
988 if (pbuf->f_type == PIPEFS_MAGIC)
988 if (pbuf->f_type == PIPEFS_MAGIC)
989 return "pipefs";
989 return "pipefs";
990 #endif
990 #endif
991 #ifdef PROC_SUPER_MAGIC
991 #ifdef PROC_SUPER_MAGIC
992 if (pbuf->f_type == PROC_SUPER_MAGIC)
992 if (pbuf->f_type == PROC_SUPER_MAGIC)
993 return "proc";
993 return "proc";
994 #endif
994 #endif
995 #ifdef PSTOREFS_MAGIC
995 #ifdef PSTOREFS_MAGIC
996 if (pbuf->f_type == PSTOREFS_MAGIC)
996 if (pbuf->f_type == PSTOREFS_MAGIC)
997 return "pstorefs";
997 return "pstorefs";
998 #endif
998 #endif
999 #ifdef QNX4_SUPER_MAGIC
999 #ifdef QNX4_SUPER_MAGIC
1000 if (pbuf->f_type == QNX4_SUPER_MAGIC)
1000 if (pbuf->f_type == QNX4_SUPER_MAGIC)
1001 return "qnx4";
1001 return "qnx4";
1002 #endif
1002 #endif
1003 #ifdef QNX6_SUPER_MAGIC
1003 #ifdef QNX6_SUPER_MAGIC
1004 if (pbuf->f_type == QNX6_SUPER_MAGIC)
1004 if (pbuf->f_type == QNX6_SUPER_MAGIC)
1005 return "qnx6";
1005 return "qnx6";
1006 #endif
1006 #endif
1007 #ifdef RAMFS_MAGIC
1007 #ifdef RAMFS_MAGIC
1008 if (pbuf->f_type == RAMFS_MAGIC)
1008 if (pbuf->f_type == RAMFS_MAGIC)
1009 return "ramfs";
1009 return "ramfs";
1010 #endif
1010 #endif
1011 #ifdef REISERFS_SUPER_MAGIC
1011 #ifdef REISERFS_SUPER_MAGIC
1012 if (pbuf->f_type == REISERFS_SUPER_MAGIC)
1012 if (pbuf->f_type == REISERFS_SUPER_MAGIC)
1013 return "reiserfs";
1013 return "reiserfs";
1014 #endif
1014 #endif
1015 #ifdef ROMFS_MAGIC
1015 #ifdef ROMFS_MAGIC
1016 if (pbuf->f_type == ROMFS_MAGIC)
1016 if (pbuf->f_type == ROMFS_MAGIC)
1017 return "romfs";
1017 return "romfs";
1018 #endif
1018 #endif
1019 #ifdef SECURITYFS_MAGIC
1019 #ifdef SECURITYFS_MAGIC
1020 if (pbuf->f_type == SECURITYFS_MAGIC)
1020 if (pbuf->f_type == SECURITYFS_MAGIC)
1021 return "securityfs";
1021 return "securityfs";
1022 #endif
1022 #endif
1023 #ifdef SELINUX_MAGIC
1023 #ifdef SELINUX_MAGIC
1024 if (pbuf->f_type == SELINUX_MAGIC)
1024 if (pbuf->f_type == SELINUX_MAGIC)
1025 return "selinux";
1025 return "selinux";
1026 #endif
1026 #endif
1027 #ifdef SMACK_MAGIC
1027 #ifdef SMACK_MAGIC
1028 if (pbuf->f_type == SMACK_MAGIC)
1028 if (pbuf->f_type == SMACK_MAGIC)
1029 return "smack";
1029 return "smack";
1030 #endif
1030 #endif
1031 #ifdef SMB_SUPER_MAGIC
1031 #ifdef SMB_SUPER_MAGIC
1032 if (pbuf->f_type == SMB_SUPER_MAGIC)
1032 if (pbuf->f_type == SMB_SUPER_MAGIC)
1033 return "smb";
1033 return "smb";
1034 #endif
1034 #endif
1035 #ifdef SOCKFS_MAGIC
1035 #ifdef SOCKFS_MAGIC
1036 if (pbuf->f_type == SOCKFS_MAGIC)
1036 if (pbuf->f_type == SOCKFS_MAGIC)
1037 return "sockfs";
1037 return "sockfs";
1038 #endif
1038 #endif
1039 #ifdef SQUASHFS_MAGIC
1039 #ifdef SQUASHFS_MAGIC
1040 if (pbuf->f_type == SQUASHFS_MAGIC)
1040 if (pbuf->f_type == SQUASHFS_MAGIC)
1041 return "squashfs";
1041 return "squashfs";
1042 #endif
1042 #endif
1043 #ifdef SYSFS_MAGIC
1043 #ifdef SYSFS_MAGIC
1044 if (pbuf->f_type == SYSFS_MAGIC)
1044 if (pbuf->f_type == SYSFS_MAGIC)
1045 return "sysfs";
1045 return "sysfs";
1046 #endif
1046 #endif
1047 #ifdef SYSV2_SUPER_MAGIC
1047 #ifdef SYSV2_SUPER_MAGIC
1048 if (pbuf->f_type == SYSV2_SUPER_MAGIC)
1048 if (pbuf->f_type == SYSV2_SUPER_MAGIC)
1049 return "sysv2";
1049 return "sysv2";
1050 #endif
1050 #endif
1051 #ifdef SYSV4_SUPER_MAGIC
1051 #ifdef SYSV4_SUPER_MAGIC
1052 if (pbuf->f_type == SYSV4_SUPER_MAGIC)
1052 if (pbuf->f_type == SYSV4_SUPER_MAGIC)
1053 return "sysv4";
1053 return "sysv4";
1054 #endif
1054 #endif
1055 #ifdef TMPFS_MAGIC
1055 #ifdef TMPFS_MAGIC
1056 if (pbuf->f_type == TMPFS_MAGIC)
1056 if (pbuf->f_type == TMPFS_MAGIC)
1057 return "tmpfs";
1057 return "tmpfs";
1058 #endif
1058 #endif
1059 #ifdef UDF_SUPER_MAGIC
1059 #ifdef UDF_SUPER_MAGIC
1060 if (pbuf->f_type == UDF_SUPER_MAGIC)
1060 if (pbuf->f_type == UDF_SUPER_MAGIC)
1061 return "udf";
1061 return "udf";
1062 #endif
1062 #endif
1063 #ifdef UFS_MAGIC
1063 #ifdef UFS_MAGIC
1064 if (pbuf->f_type == UFS_MAGIC)
1064 if (pbuf->f_type == UFS_MAGIC)
1065 return "ufs";
1065 return "ufs";
1066 #endif
1066 #endif
1067 #ifdef USBDEVICE_SUPER_MAGIC
1067 #ifdef USBDEVICE_SUPER_MAGIC
1068 if (pbuf->f_type == USBDEVICE_SUPER_MAGIC)
1068 if (pbuf->f_type == USBDEVICE_SUPER_MAGIC)
1069 return "usbdevice";
1069 return "usbdevice";
1070 #endif
1070 #endif
1071 #ifdef V9FS_MAGIC
1071 #ifdef V9FS_MAGIC
1072 if (pbuf->f_type == V9FS_MAGIC)
1072 if (pbuf->f_type == V9FS_MAGIC)
1073 return "v9fs";
1073 return "v9fs";
1074 #endif
1074 #endif
1075 #ifdef VXFS_SUPER_MAGIC
1075 #ifdef VXFS_SUPER_MAGIC
1076 if (pbuf->f_type == VXFS_SUPER_MAGIC)
1076 if (pbuf->f_type == VXFS_SUPER_MAGIC)
1077 return "vxfs";
1077 return "vxfs";
1078 #endif
1078 #endif
1079 #ifdef XENFS_SUPER_MAGIC
1079 #ifdef XENFS_SUPER_MAGIC
1080 if (pbuf->f_type == XENFS_SUPER_MAGIC)
1080 if (pbuf->f_type == XENFS_SUPER_MAGIC)
1081 return "xenfs";
1081 return "xenfs";
1082 #endif
1082 #endif
1083 #ifdef XENIX_SUPER_MAGIC
1083 #ifdef XENIX_SUPER_MAGIC
1084 if (pbuf->f_type == XENIX_SUPER_MAGIC)
1084 if (pbuf->f_type == XENIX_SUPER_MAGIC)
1085 return "xenix";
1085 return "xenix";
1086 #endif
1086 #endif
1087 #ifdef XFS_SUPER_MAGIC
1087 #ifdef XFS_SUPER_MAGIC
1088 if (pbuf->f_type == XFS_SUPER_MAGIC)
1088 if (pbuf->f_type == XFS_SUPER_MAGIC)
1089 return "xfs";
1089 return "xfs";
1090 #endif
1090 #endif
1091 /* End of Linux filesystems */
1091 /* End of Linux filesystems */
1092 return NULL;
1092 return NULL;
1093 }
1093 }
1094 #endif /* def HAVE_LINUX_STATFS */
1094 #endif /* def HAVE_LINUX_STATFS */
1095
1095
1096 #if defined(HAVE_BSD_STATFS) || defined(HAVE_LINUX_STATFS)
1096 #if defined(HAVE_BSD_STATFS) || defined(HAVE_LINUX_STATFS)
1097 /* given a directory path, return filesystem type name (best-effort) */
1097 /* given a directory path, return filesystem type name (best-effort) */
1098 static PyObject *getfstype(PyObject *self, PyObject *args)
1098 static PyObject *getfstype(PyObject *self, PyObject *args)
1099 {
1099 {
1100 const char *path = NULL;
1100 const char *path = NULL;
1101 struct statfs buf;
1101 struct statfs buf;
1102 int r;
1102 int r;
1103 if (!PyArg_ParseTuple(args, "s", &path))
1103 if (!PyArg_ParseTuple(args, "s", &path))
1104 return NULL;
1104 return NULL;
1105
1105
1106 memset(&buf, 0, sizeof(buf));
1106 memset(&buf, 0, sizeof(buf));
1107 r = statfs(path, &buf);
1107 r = statfs(path, &buf);
1108 if (r != 0)
1108 if (r != 0)
1109 Py_RETURN_NONE;
1109 return PyErr_SetFromErrno(PyExc_OSError);
1110 return Py_BuildValue("s", describefstype(&buf));
1110 return Py_BuildValue("s", describefstype(&buf));
1111 }
1111 }
1112 #endif /* defined(HAVE_LINUX_STATFS) || defined(HAVE_BSD_STATFS) */
1112 #endif /* defined(HAVE_LINUX_STATFS) || defined(HAVE_BSD_STATFS) */
1113
1113
1114 #endif /* ndef _WIN32 */
1114 #endif /* ndef _WIN32 */
1115
1115
1116 static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
1116 static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
1117 {
1117 {
1118 PyObject *statobj = NULL; /* initialize - optional arg */
1118 PyObject *statobj = NULL; /* initialize - optional arg */
1119 PyObject *skipobj = NULL; /* initialize - optional arg */
1119 PyObject *skipobj = NULL; /* initialize - optional arg */
1120 char *path, *skip = NULL;
1120 char *path, *skip = NULL;
1121 int wantstat, plen;
1121 int wantstat, plen;
1122
1122
1123 static char *kwlist[] = {"path", "stat", "skip", NULL};
1123 static char *kwlist[] = {"path", "stat", "skip", NULL};
1124
1124
1125 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|OO:listdir",
1125 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|OO:listdir",
1126 kwlist, &path, &plen, &statobj, &skipobj))
1126 kwlist, &path, &plen, &statobj, &skipobj))
1127 return NULL;
1127 return NULL;
1128
1128
1129 wantstat = statobj && PyObject_IsTrue(statobj);
1129 wantstat = statobj && PyObject_IsTrue(statobj);
1130
1130
1131 if (skipobj && skipobj != Py_None) {
1131 if (skipobj && skipobj != Py_None) {
1132 skip = PyBytes_AsString(skipobj);
1132 skip = PyBytes_AsString(skipobj);
1133 if (!skip)
1133 if (!skip)
1134 return NULL;
1134 return NULL;
1135 }
1135 }
1136
1136
1137 return _listdir(path, plen, wantstat, skip);
1137 return _listdir(path, plen, wantstat, skip);
1138 }
1138 }
1139
1139
1140 #ifdef _WIN32
1140 #ifdef _WIN32
1141 static PyObject *posixfile(PyObject *self, PyObject *args, PyObject *kwds)
1141 static PyObject *posixfile(PyObject *self, PyObject *args, PyObject *kwds)
1142 {
1142 {
1143 static char *kwlist[] = {"name", "mode", "buffering", NULL};
1143 static char *kwlist[] = {"name", "mode", "buffering", NULL};
1144 PyObject *file_obj = NULL;
1144 PyObject *file_obj = NULL;
1145 char *name = NULL;
1145 char *name = NULL;
1146 char *mode = "rb";
1146 char *mode = "rb";
1147 DWORD access = 0;
1147 DWORD access = 0;
1148 DWORD creation;
1148 DWORD creation;
1149 HANDLE handle;
1149 HANDLE handle;
1150 int fd, flags = 0;
1150 int fd, flags = 0;
1151 int bufsize = -1;
1151 int bufsize = -1;
1152 char m0, m1, m2;
1152 char m0, m1, m2;
1153 char fpmode[4];
1153 char fpmode[4];
1154 int fppos = 0;
1154 int fppos = 0;
1155 int plus;
1155 int plus;
1156 FILE *fp;
1156 FILE *fp;
1157
1157
1158 if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|si:posixfile", kwlist,
1158 if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|si:posixfile", kwlist,
1159 Py_FileSystemDefaultEncoding,
1159 Py_FileSystemDefaultEncoding,
1160 &name, &mode, &bufsize))
1160 &name, &mode, &bufsize))
1161 return NULL;
1161 return NULL;
1162
1162
1163 m0 = mode[0];
1163 m0 = mode[0];
1164 m1 = m0 ? mode[1] : '\0';
1164 m1 = m0 ? mode[1] : '\0';
1165 m2 = m1 ? mode[2] : '\0';
1165 m2 = m1 ? mode[2] : '\0';
1166 plus = m1 == '+' || m2 == '+';
1166 plus = m1 == '+' || m2 == '+';
1167
1167
1168 fpmode[fppos++] = m0;
1168 fpmode[fppos++] = m0;
1169 if (m1 == 'b' || m2 == 'b') {
1169 if (m1 == 'b' || m2 == 'b') {
1170 flags = _O_BINARY;
1170 flags = _O_BINARY;
1171 fpmode[fppos++] = 'b';
1171 fpmode[fppos++] = 'b';
1172 }
1172 }
1173 else
1173 else
1174 flags = _O_TEXT;
1174 flags = _O_TEXT;
1175 if (m0 == 'r' && !plus) {
1175 if (m0 == 'r' && !plus) {
1176 flags |= _O_RDONLY;
1176 flags |= _O_RDONLY;
1177 access = GENERIC_READ;
1177 access = GENERIC_READ;
1178 } else {
1178 } else {
1179 /*
1179 /*
1180 work around http://support.microsoft.com/kb/899149 and
1180 work around http://support.microsoft.com/kb/899149 and
1181 set _O_RDWR for 'w' and 'a', even if mode has no '+'
1181 set _O_RDWR for 'w' and 'a', even if mode has no '+'
1182 */
1182 */
1183 flags |= _O_RDWR;
1183 flags |= _O_RDWR;
1184 access = GENERIC_READ | GENERIC_WRITE;
1184 access = GENERIC_READ | GENERIC_WRITE;
1185 fpmode[fppos++] = '+';
1185 fpmode[fppos++] = '+';
1186 }
1186 }
1187 fpmode[fppos++] = '\0';
1187 fpmode[fppos++] = '\0';
1188
1188
1189 switch (m0) {
1189 switch (m0) {
1190 case 'r':
1190 case 'r':
1191 creation = OPEN_EXISTING;
1191 creation = OPEN_EXISTING;
1192 break;
1192 break;
1193 case 'w':
1193 case 'w':
1194 creation = CREATE_ALWAYS;
1194 creation = CREATE_ALWAYS;
1195 break;
1195 break;
1196 case 'a':
1196 case 'a':
1197 creation = OPEN_ALWAYS;
1197 creation = OPEN_ALWAYS;
1198 flags |= _O_APPEND;
1198 flags |= _O_APPEND;
1199 break;
1199 break;
1200 default:
1200 default:
1201 PyErr_Format(PyExc_ValueError,
1201 PyErr_Format(PyExc_ValueError,
1202 "mode string must begin with one of 'r', 'w', "
1202 "mode string must begin with one of 'r', 'w', "
1203 "or 'a', not '%c'", m0);
1203 "or 'a', not '%c'", m0);
1204 goto bail;
1204 goto bail;
1205 }
1205 }
1206
1206
1207 handle = CreateFile(name, access,
1207 handle = CreateFile(name, access,
1208 FILE_SHARE_READ | FILE_SHARE_WRITE |
1208 FILE_SHARE_READ | FILE_SHARE_WRITE |
1209 FILE_SHARE_DELETE,
1209 FILE_SHARE_DELETE,
1210 NULL,
1210 NULL,
1211 creation,
1211 creation,
1212 FILE_ATTRIBUTE_NORMAL,
1212 FILE_ATTRIBUTE_NORMAL,
1213 0);
1213 0);
1214
1214
1215 if (handle == INVALID_HANDLE_VALUE) {
1215 if (handle == INVALID_HANDLE_VALUE) {
1216 PyErr_SetFromWindowsErrWithFilename(GetLastError(), name);
1216 PyErr_SetFromWindowsErrWithFilename(GetLastError(), name);
1217 goto bail;
1217 goto bail;
1218 }
1218 }
1219
1219
1220 fd = _open_osfhandle((intptr_t)handle, flags);
1220 fd = _open_osfhandle((intptr_t)handle, flags);
1221
1221
1222 if (fd == -1) {
1222 if (fd == -1) {
1223 CloseHandle(handle);
1223 CloseHandle(handle);
1224 PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
1224 PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
1225 goto bail;
1225 goto bail;
1226 }
1226 }
1227 #ifndef IS_PY3K
1227 #ifndef IS_PY3K
1228 fp = _fdopen(fd, fpmode);
1228 fp = _fdopen(fd, fpmode);
1229 if (fp == NULL) {
1229 if (fp == NULL) {
1230 _close(fd);
1230 _close(fd);
1231 PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
1231 PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
1232 goto bail;
1232 goto bail;
1233 }
1233 }
1234
1234
1235 file_obj = PyFile_FromFile(fp, name, mode, fclose);
1235 file_obj = PyFile_FromFile(fp, name, mode, fclose);
1236 if (file_obj == NULL) {
1236 if (file_obj == NULL) {
1237 fclose(fp);
1237 fclose(fp);
1238 goto bail;
1238 goto bail;
1239 }
1239 }
1240
1240
1241 PyFile_SetBufSize(file_obj, bufsize);
1241 PyFile_SetBufSize(file_obj, bufsize);
1242 #else
1242 #else
1243 file_obj = PyFile_FromFd(fd, name, mode, bufsize, NULL, NULL, NULL, 1);
1243 file_obj = PyFile_FromFd(fd, name, mode, bufsize, NULL, NULL, NULL, 1);
1244 if (file_obj == NULL)
1244 if (file_obj == NULL)
1245 goto bail;
1245 goto bail;
1246 #endif
1246 #endif
1247 bail:
1247 bail:
1248 PyMem_Free(name);
1248 PyMem_Free(name);
1249 return file_obj;
1249 return file_obj;
1250 }
1250 }
1251 #endif
1251 #endif
1252
1252
1253 #ifdef __APPLE__
1253 #ifdef __APPLE__
1254 #include <ApplicationServices/ApplicationServices.h>
1254 #include <ApplicationServices/ApplicationServices.h>
1255
1255
1256 static PyObject *isgui(PyObject *self)
1256 static PyObject *isgui(PyObject *self)
1257 {
1257 {
1258 CFDictionaryRef dict = CGSessionCopyCurrentDictionary();
1258 CFDictionaryRef dict = CGSessionCopyCurrentDictionary();
1259
1259
1260 if (dict != NULL) {
1260 if (dict != NULL) {
1261 CFRelease(dict);
1261 CFRelease(dict);
1262 Py_RETURN_TRUE;
1262 Py_RETURN_TRUE;
1263 } else {
1263 } else {
1264 Py_RETURN_FALSE;
1264 Py_RETURN_FALSE;
1265 }
1265 }
1266 }
1266 }
1267 #endif
1267 #endif
1268
1268
1269 static char osutil_doc[] = "Native operating system services.";
1269 static char osutil_doc[] = "Native operating system services.";
1270
1270
1271 static PyMethodDef methods[] = {
1271 static PyMethodDef methods[] = {
1272 {"listdir", (PyCFunction)listdir, METH_VARARGS | METH_KEYWORDS,
1272 {"listdir", (PyCFunction)listdir, METH_VARARGS | METH_KEYWORDS,
1273 "list a directory\n"},
1273 "list a directory\n"},
1274 #ifdef _WIN32
1274 #ifdef _WIN32
1275 {"posixfile", (PyCFunction)posixfile, METH_VARARGS | METH_KEYWORDS,
1275 {"posixfile", (PyCFunction)posixfile, METH_VARARGS | METH_KEYWORDS,
1276 "Open a file with POSIX-like semantics.\n"
1276 "Open a file with POSIX-like semantics.\n"
1277 "On error, this function may raise either a WindowsError or an IOError."},
1277 "On error, this function may raise either a WindowsError or an IOError."},
1278 #else
1278 #else
1279 {"statfiles", (PyCFunction)statfiles, METH_VARARGS | METH_KEYWORDS,
1279 {"statfiles", (PyCFunction)statfiles, METH_VARARGS | METH_KEYWORDS,
1280 "stat a series of files or symlinks\n"
1280 "stat a series of files or symlinks\n"
1281 "Returns None for non-existent entries and entries of other types.\n"},
1281 "Returns None for non-existent entries and entries of other types.\n"},
1282 #ifdef CMSG_LEN
1282 #ifdef CMSG_LEN
1283 {"recvfds", (PyCFunction)recvfds, METH_VARARGS,
1283 {"recvfds", (PyCFunction)recvfds, METH_VARARGS,
1284 "receive list of file descriptors via socket\n"},
1284 "receive list of file descriptors via socket\n"},
1285 #endif
1285 #endif
1286 #ifndef SETPROCNAME_USE_NONE
1286 #ifndef SETPROCNAME_USE_NONE
1287 {"setprocname", (PyCFunction)setprocname, METH_VARARGS,
1287 {"setprocname", (PyCFunction)setprocname, METH_VARARGS,
1288 "set process title (best-effort)\n"},
1288 "set process title (best-effort)\n"},
1289 #endif
1289 #endif
1290 #if defined(HAVE_BSD_STATFS) || defined(HAVE_LINUX_STATFS)
1290 #if defined(HAVE_BSD_STATFS) || defined(HAVE_LINUX_STATFS)
1291 {"getfstype", (PyCFunction)getfstype, METH_VARARGS,
1291 {"getfstype", (PyCFunction)getfstype, METH_VARARGS,
1292 "get filesystem type (best-effort)\n"},
1292 "get filesystem type (best-effort)\n"},
1293 #endif
1293 #endif
1294 #endif /* ndef _WIN32 */
1294 #endif /* ndef _WIN32 */
1295 #ifdef __APPLE__
1295 #ifdef __APPLE__
1296 {
1296 {
1297 "isgui", (PyCFunction)isgui, METH_NOARGS,
1297 "isgui", (PyCFunction)isgui, METH_NOARGS,
1298 "Is a CoreGraphics session available?"
1298 "Is a CoreGraphics session available?"
1299 },
1299 },
1300 #endif
1300 #endif
1301 {NULL, NULL}
1301 {NULL, NULL}
1302 };
1302 };
1303
1303
1304 #ifdef IS_PY3K
1304 #ifdef IS_PY3K
1305 static struct PyModuleDef osutil_module = {
1305 static struct PyModuleDef osutil_module = {
1306 PyModuleDef_HEAD_INIT,
1306 PyModuleDef_HEAD_INIT,
1307 "osutil",
1307 "osutil",
1308 osutil_doc,
1308 osutil_doc,
1309 -1,
1309 -1,
1310 methods
1310 methods
1311 };
1311 };
1312
1312
1313 PyMODINIT_FUNC PyInit_osutil(void)
1313 PyMODINIT_FUNC PyInit_osutil(void)
1314 {
1314 {
1315 if (PyType_Ready(&listdir_stat_type) < 0)
1315 if (PyType_Ready(&listdir_stat_type) < 0)
1316 return NULL;
1316 return NULL;
1317
1317
1318 return PyModule_Create(&osutil_module);
1318 return PyModule_Create(&osutil_module);
1319 }
1319 }
1320 #else
1320 #else
1321 PyMODINIT_FUNC initosutil(void)
1321 PyMODINIT_FUNC initosutil(void)
1322 {
1322 {
1323 if (PyType_Ready(&listdir_stat_type) == -1)
1323 if (PyType_Ready(&listdir_stat_type) == -1)
1324 return;
1324 return;
1325
1325
1326 Py_InitModule3("osutil", methods, osutil_doc);
1326 Py_InitModule3("osutil", methods, osutil_doc);
1327 }
1327 }
1328 #endif
1328 #endif
@@ -1,3630 +1,3633
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import codecs
20 import codecs
21 import collections
21 import collections
22 import datetime
22 import datetime
23 import errno
23 import errno
24 import gc
24 import gc
25 import hashlib
25 import hashlib
26 import imp
26 import imp
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import signal
31 import signal
32 import socket
32 import socket
33 import stat
33 import stat
34 import string
34 import string
35 import subprocess
35 import subprocess
36 import sys
36 import sys
37 import tempfile
37 import tempfile
38 import textwrap
38 import textwrap
39 import time
39 import time
40 import traceback
40 import traceback
41 import zlib
41 import zlib
42
42
43 from . import (
43 from . import (
44 encoding,
44 encoding,
45 error,
45 error,
46 i18n,
46 i18n,
47 osutil,
47 osutil,
48 parsers,
48 parsers,
49 pycompat,
49 pycompat,
50 )
50 )
51
51
52 empty = pycompat.empty
52 empty = pycompat.empty
53 httplib = pycompat.httplib
53 httplib = pycompat.httplib
54 httpserver = pycompat.httpserver
54 httpserver = pycompat.httpserver
55 pickle = pycompat.pickle
55 pickle = pycompat.pickle
56 queue = pycompat.queue
56 queue = pycompat.queue
57 socketserver = pycompat.socketserver
57 socketserver = pycompat.socketserver
58 stderr = pycompat.stderr
58 stderr = pycompat.stderr
59 stdin = pycompat.stdin
59 stdin = pycompat.stdin
60 stdout = pycompat.stdout
60 stdout = pycompat.stdout
61 stringio = pycompat.stringio
61 stringio = pycompat.stringio
62 urlerr = pycompat.urlerr
62 urlerr = pycompat.urlerr
63 urlreq = pycompat.urlreq
63 urlreq = pycompat.urlreq
64 xmlrpclib = pycompat.xmlrpclib
64 xmlrpclib = pycompat.xmlrpclib
65
65
66 def isatty(fp):
66 def isatty(fp):
67 try:
67 try:
68 return fp.isatty()
68 return fp.isatty()
69 except AttributeError:
69 except AttributeError:
70 return False
70 return False
71
71
72 # glibc determines buffering on first write to stdout - if we replace a TTY
72 # glibc determines buffering on first write to stdout - if we replace a TTY
73 # destined stdout with a pipe destined stdout (e.g. pager), we want line
73 # destined stdout with a pipe destined stdout (e.g. pager), we want line
74 # buffering
74 # buffering
75 if isatty(stdout):
75 if isatty(stdout):
76 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
76 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
77
77
78 if pycompat.osname == 'nt':
78 if pycompat.osname == 'nt':
79 from . import windows as platform
79 from . import windows as platform
80 stdout = platform.winstdout(stdout)
80 stdout = platform.winstdout(stdout)
81 else:
81 else:
82 from . import posix as platform
82 from . import posix as platform
83
83
84 _ = i18n._
84 _ = i18n._
85
85
86 bindunixsocket = platform.bindunixsocket
86 bindunixsocket = platform.bindunixsocket
87 cachestat = platform.cachestat
87 cachestat = platform.cachestat
88 checkexec = platform.checkexec
88 checkexec = platform.checkexec
89 checklink = platform.checklink
89 checklink = platform.checklink
90 copymode = platform.copymode
90 copymode = platform.copymode
91 executablepath = platform.executablepath
91 executablepath = platform.executablepath
92 expandglobs = platform.expandglobs
92 expandglobs = platform.expandglobs
93 explainexit = platform.explainexit
93 explainexit = platform.explainexit
94 findexe = platform.findexe
94 findexe = platform.findexe
95 gethgcmd = platform.gethgcmd
95 gethgcmd = platform.gethgcmd
96 getuser = platform.getuser
96 getuser = platform.getuser
97 getpid = os.getpid
97 getpid = os.getpid
98 groupmembers = platform.groupmembers
98 groupmembers = platform.groupmembers
99 groupname = platform.groupname
99 groupname = platform.groupname
100 hidewindow = platform.hidewindow
100 hidewindow = platform.hidewindow
101 isexec = platform.isexec
101 isexec = platform.isexec
102 isowner = platform.isowner
102 isowner = platform.isowner
103 localpath = platform.localpath
103 localpath = platform.localpath
104 lookupreg = platform.lookupreg
104 lookupreg = platform.lookupreg
105 makedir = platform.makedir
105 makedir = platform.makedir
106 nlinks = platform.nlinks
106 nlinks = platform.nlinks
107 normpath = platform.normpath
107 normpath = platform.normpath
108 normcase = platform.normcase
108 normcase = platform.normcase
109 normcasespec = platform.normcasespec
109 normcasespec = platform.normcasespec
110 normcasefallback = platform.normcasefallback
110 normcasefallback = platform.normcasefallback
111 openhardlinks = platform.openhardlinks
111 openhardlinks = platform.openhardlinks
112 oslink = platform.oslink
112 oslink = platform.oslink
113 parsepatchoutput = platform.parsepatchoutput
113 parsepatchoutput = platform.parsepatchoutput
114 pconvert = platform.pconvert
114 pconvert = platform.pconvert
115 poll = platform.poll
115 poll = platform.poll
116 popen = platform.popen
116 popen = platform.popen
117 posixfile = platform.posixfile
117 posixfile = platform.posixfile
118 quotecommand = platform.quotecommand
118 quotecommand = platform.quotecommand
119 readpipe = platform.readpipe
119 readpipe = platform.readpipe
120 rename = platform.rename
120 rename = platform.rename
121 removedirs = platform.removedirs
121 removedirs = platform.removedirs
122 samedevice = platform.samedevice
122 samedevice = platform.samedevice
123 samefile = platform.samefile
123 samefile = platform.samefile
124 samestat = platform.samestat
124 samestat = platform.samestat
125 setbinary = platform.setbinary
125 setbinary = platform.setbinary
126 setflags = platform.setflags
126 setflags = platform.setflags
127 setsignalhandler = platform.setsignalhandler
127 setsignalhandler = platform.setsignalhandler
128 shellquote = platform.shellquote
128 shellquote = platform.shellquote
129 spawndetached = platform.spawndetached
129 spawndetached = platform.spawndetached
130 split = platform.split
130 split = platform.split
131 sshargs = platform.sshargs
131 sshargs = platform.sshargs
132 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
132 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
133 statisexec = platform.statisexec
133 statisexec = platform.statisexec
134 statislink = platform.statislink
134 statislink = platform.statislink
135 testpid = platform.testpid
135 testpid = platform.testpid
136 umask = platform.umask
136 umask = platform.umask
137 unlink = platform.unlink
137 unlink = platform.unlink
138 username = platform.username
138 username = platform.username
139
139
140 # Python compatibility
140 # Python compatibility
141
141
142 _notset = object()
142 _notset = object()
143
143
144 # disable Python's problematic floating point timestamps (issue4836)
144 # disable Python's problematic floating point timestamps (issue4836)
145 # (Python hypocritically says you shouldn't change this behavior in
145 # (Python hypocritically says you shouldn't change this behavior in
146 # libraries, and sure enough Mercurial is not a library.)
146 # libraries, and sure enough Mercurial is not a library.)
147 os.stat_float_times(False)
147 os.stat_float_times(False)
148
148
149 def safehasattr(thing, attr):
149 def safehasattr(thing, attr):
150 return getattr(thing, attr, _notset) is not _notset
150 return getattr(thing, attr, _notset) is not _notset
151
151
152 def bitsfrom(container):
152 def bitsfrom(container):
153 bits = 0
153 bits = 0
154 for bit in container:
154 for bit in container:
155 bits |= bit
155 bits |= bit
156 return bits
156 return bits
157
157
158 DIGESTS = {
158 DIGESTS = {
159 'md5': hashlib.md5,
159 'md5': hashlib.md5,
160 'sha1': hashlib.sha1,
160 'sha1': hashlib.sha1,
161 'sha512': hashlib.sha512,
161 'sha512': hashlib.sha512,
162 }
162 }
163 # List of digest types from strongest to weakest
163 # List of digest types from strongest to weakest
164 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
164 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
165
165
166 for k in DIGESTS_BY_STRENGTH:
166 for k in DIGESTS_BY_STRENGTH:
167 assert k in DIGESTS
167 assert k in DIGESTS
168
168
169 class digester(object):
169 class digester(object):
170 """helper to compute digests.
170 """helper to compute digests.
171
171
172 This helper can be used to compute one or more digests given their name.
172 This helper can be used to compute one or more digests given their name.
173
173
174 >>> d = digester(['md5', 'sha1'])
174 >>> d = digester(['md5', 'sha1'])
175 >>> d.update('foo')
175 >>> d.update('foo')
176 >>> [k for k in sorted(d)]
176 >>> [k for k in sorted(d)]
177 ['md5', 'sha1']
177 ['md5', 'sha1']
178 >>> d['md5']
178 >>> d['md5']
179 'acbd18db4cc2f85cedef654fccc4a4d8'
179 'acbd18db4cc2f85cedef654fccc4a4d8'
180 >>> d['sha1']
180 >>> d['sha1']
181 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
181 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
182 >>> digester.preferred(['md5', 'sha1'])
182 >>> digester.preferred(['md5', 'sha1'])
183 'sha1'
183 'sha1'
184 """
184 """
185
185
186 def __init__(self, digests, s=''):
186 def __init__(self, digests, s=''):
187 self._hashes = {}
187 self._hashes = {}
188 for k in digests:
188 for k in digests:
189 if k not in DIGESTS:
189 if k not in DIGESTS:
190 raise Abort(_('unknown digest type: %s') % k)
190 raise Abort(_('unknown digest type: %s') % k)
191 self._hashes[k] = DIGESTS[k]()
191 self._hashes[k] = DIGESTS[k]()
192 if s:
192 if s:
193 self.update(s)
193 self.update(s)
194
194
195 def update(self, data):
195 def update(self, data):
196 for h in self._hashes.values():
196 for h in self._hashes.values():
197 h.update(data)
197 h.update(data)
198
198
199 def __getitem__(self, key):
199 def __getitem__(self, key):
200 if key not in DIGESTS:
200 if key not in DIGESTS:
201 raise Abort(_('unknown digest type: %s') % k)
201 raise Abort(_('unknown digest type: %s') % k)
202 return self._hashes[key].hexdigest()
202 return self._hashes[key].hexdigest()
203
203
204 def __iter__(self):
204 def __iter__(self):
205 return iter(self._hashes)
205 return iter(self._hashes)
206
206
207 @staticmethod
207 @staticmethod
208 def preferred(supported):
208 def preferred(supported):
209 """returns the strongest digest type in both supported and DIGESTS."""
209 """returns the strongest digest type in both supported and DIGESTS."""
210
210
211 for k in DIGESTS_BY_STRENGTH:
211 for k in DIGESTS_BY_STRENGTH:
212 if k in supported:
212 if k in supported:
213 return k
213 return k
214 return None
214 return None
215
215
216 class digestchecker(object):
216 class digestchecker(object):
217 """file handle wrapper that additionally checks content against a given
217 """file handle wrapper that additionally checks content against a given
218 size and digests.
218 size and digests.
219
219
220 d = digestchecker(fh, size, {'md5': '...'})
220 d = digestchecker(fh, size, {'md5': '...'})
221
221
222 When multiple digests are given, all of them are validated.
222 When multiple digests are given, all of them are validated.
223 """
223 """
224
224
225 def __init__(self, fh, size, digests):
225 def __init__(self, fh, size, digests):
226 self._fh = fh
226 self._fh = fh
227 self._size = size
227 self._size = size
228 self._got = 0
228 self._got = 0
229 self._digests = dict(digests)
229 self._digests = dict(digests)
230 self._digester = digester(self._digests.keys())
230 self._digester = digester(self._digests.keys())
231
231
232 def read(self, length=-1):
232 def read(self, length=-1):
233 content = self._fh.read(length)
233 content = self._fh.read(length)
234 self._digester.update(content)
234 self._digester.update(content)
235 self._got += len(content)
235 self._got += len(content)
236 return content
236 return content
237
237
238 def validate(self):
238 def validate(self):
239 if self._size != self._got:
239 if self._size != self._got:
240 raise Abort(_('size mismatch: expected %d, got %d') %
240 raise Abort(_('size mismatch: expected %d, got %d') %
241 (self._size, self._got))
241 (self._size, self._got))
242 for k, v in self._digests.items():
242 for k, v in self._digests.items():
243 if v != self._digester[k]:
243 if v != self._digester[k]:
244 # i18n: first parameter is a digest name
244 # i18n: first parameter is a digest name
245 raise Abort(_('%s mismatch: expected %s, got %s') %
245 raise Abort(_('%s mismatch: expected %s, got %s') %
246 (k, v, self._digester[k]))
246 (k, v, self._digester[k]))
247
247
248 try:
248 try:
249 buffer = buffer
249 buffer = buffer
250 except NameError:
250 except NameError:
251 if not pycompat.ispy3:
251 if not pycompat.ispy3:
252 def buffer(sliceable, offset=0, length=None):
252 def buffer(sliceable, offset=0, length=None):
253 if length is not None:
253 if length is not None:
254 return sliceable[offset:offset + length]
254 return sliceable[offset:offset + length]
255 return sliceable[offset:]
255 return sliceable[offset:]
256 else:
256 else:
257 def buffer(sliceable, offset=0, length=None):
257 def buffer(sliceable, offset=0, length=None):
258 if length is not None:
258 if length is not None:
259 return memoryview(sliceable)[offset:offset + length]
259 return memoryview(sliceable)[offset:offset + length]
260 return memoryview(sliceable)[offset:]
260 return memoryview(sliceable)[offset:]
261
261
262 closefds = pycompat.osname == 'posix'
262 closefds = pycompat.osname == 'posix'
263
263
264 _chunksize = 4096
264 _chunksize = 4096
265
265
266 class bufferedinputpipe(object):
266 class bufferedinputpipe(object):
267 """a manually buffered input pipe
267 """a manually buffered input pipe
268
268
269 Python will not let us use buffered IO and lazy reading with 'polling' at
269 Python will not let us use buffered IO and lazy reading with 'polling' at
270 the same time. We cannot probe the buffer state and select will not detect
270 the same time. We cannot probe the buffer state and select will not detect
271 that data are ready to read if they are already buffered.
271 that data are ready to read if they are already buffered.
272
272
273 This class let us work around that by implementing its own buffering
273 This class let us work around that by implementing its own buffering
274 (allowing efficient readline) while offering a way to know if the buffer is
274 (allowing efficient readline) while offering a way to know if the buffer is
275 empty from the output (allowing collaboration of the buffer with polling).
275 empty from the output (allowing collaboration of the buffer with polling).
276
276
277 This class lives in the 'util' module because it makes use of the 'os'
277 This class lives in the 'util' module because it makes use of the 'os'
278 module from the python stdlib.
278 module from the python stdlib.
279 """
279 """
280
280
281 def __init__(self, input):
281 def __init__(self, input):
282 self._input = input
282 self._input = input
283 self._buffer = []
283 self._buffer = []
284 self._eof = False
284 self._eof = False
285 self._lenbuf = 0
285 self._lenbuf = 0
286
286
287 @property
287 @property
288 def hasbuffer(self):
288 def hasbuffer(self):
289 """True is any data is currently buffered
289 """True is any data is currently buffered
290
290
291 This will be used externally a pre-step for polling IO. If there is
291 This will be used externally a pre-step for polling IO. If there is
292 already data then no polling should be set in place."""
292 already data then no polling should be set in place."""
293 return bool(self._buffer)
293 return bool(self._buffer)
294
294
295 @property
295 @property
296 def closed(self):
296 def closed(self):
297 return self._input.closed
297 return self._input.closed
298
298
299 def fileno(self):
299 def fileno(self):
300 return self._input.fileno()
300 return self._input.fileno()
301
301
302 def close(self):
302 def close(self):
303 return self._input.close()
303 return self._input.close()
304
304
305 def read(self, size):
305 def read(self, size):
306 while (not self._eof) and (self._lenbuf < size):
306 while (not self._eof) and (self._lenbuf < size):
307 self._fillbuffer()
307 self._fillbuffer()
308 return self._frombuffer(size)
308 return self._frombuffer(size)
309
309
310 def readline(self, *args, **kwargs):
310 def readline(self, *args, **kwargs):
311 if 1 < len(self._buffer):
311 if 1 < len(self._buffer):
312 # this should not happen because both read and readline end with a
312 # this should not happen because both read and readline end with a
313 # _frombuffer call that collapse it.
313 # _frombuffer call that collapse it.
314 self._buffer = [''.join(self._buffer)]
314 self._buffer = [''.join(self._buffer)]
315 self._lenbuf = len(self._buffer[0])
315 self._lenbuf = len(self._buffer[0])
316 lfi = -1
316 lfi = -1
317 if self._buffer:
317 if self._buffer:
318 lfi = self._buffer[-1].find('\n')
318 lfi = self._buffer[-1].find('\n')
319 while (not self._eof) and lfi < 0:
319 while (not self._eof) and lfi < 0:
320 self._fillbuffer()
320 self._fillbuffer()
321 if self._buffer:
321 if self._buffer:
322 lfi = self._buffer[-1].find('\n')
322 lfi = self._buffer[-1].find('\n')
323 size = lfi + 1
323 size = lfi + 1
324 if lfi < 0: # end of file
324 if lfi < 0: # end of file
325 size = self._lenbuf
325 size = self._lenbuf
326 elif 1 < len(self._buffer):
326 elif 1 < len(self._buffer):
327 # we need to take previous chunks into account
327 # we need to take previous chunks into account
328 size += self._lenbuf - len(self._buffer[-1])
328 size += self._lenbuf - len(self._buffer[-1])
329 return self._frombuffer(size)
329 return self._frombuffer(size)
330
330
331 def _frombuffer(self, size):
331 def _frombuffer(self, size):
332 """return at most 'size' data from the buffer
332 """return at most 'size' data from the buffer
333
333
334 The data are removed from the buffer."""
334 The data are removed from the buffer."""
335 if size == 0 or not self._buffer:
335 if size == 0 or not self._buffer:
336 return ''
336 return ''
337 buf = self._buffer[0]
337 buf = self._buffer[0]
338 if 1 < len(self._buffer):
338 if 1 < len(self._buffer):
339 buf = ''.join(self._buffer)
339 buf = ''.join(self._buffer)
340
340
341 data = buf[:size]
341 data = buf[:size]
342 buf = buf[len(data):]
342 buf = buf[len(data):]
343 if buf:
343 if buf:
344 self._buffer = [buf]
344 self._buffer = [buf]
345 self._lenbuf = len(buf)
345 self._lenbuf = len(buf)
346 else:
346 else:
347 self._buffer = []
347 self._buffer = []
348 self._lenbuf = 0
348 self._lenbuf = 0
349 return data
349 return data
350
350
351 def _fillbuffer(self):
351 def _fillbuffer(self):
352 """read data to the buffer"""
352 """read data to the buffer"""
353 data = os.read(self._input.fileno(), _chunksize)
353 data = os.read(self._input.fileno(), _chunksize)
354 if not data:
354 if not data:
355 self._eof = True
355 self._eof = True
356 else:
356 else:
357 self._lenbuf += len(data)
357 self._lenbuf += len(data)
358 self._buffer.append(data)
358 self._buffer.append(data)
359
359
360 def popen2(cmd, env=None, newlines=False):
360 def popen2(cmd, env=None, newlines=False):
361 # Setting bufsize to -1 lets the system decide the buffer size.
361 # Setting bufsize to -1 lets the system decide the buffer size.
362 # The default for bufsize is 0, meaning unbuffered. This leads to
362 # The default for bufsize is 0, meaning unbuffered. This leads to
363 # poor performance on Mac OS X: http://bugs.python.org/issue4194
363 # poor performance on Mac OS X: http://bugs.python.org/issue4194
364 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
364 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
365 close_fds=closefds,
365 close_fds=closefds,
366 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
366 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
367 universal_newlines=newlines,
367 universal_newlines=newlines,
368 env=env)
368 env=env)
369 return p.stdin, p.stdout
369 return p.stdin, p.stdout
370
370
371 def popen3(cmd, env=None, newlines=False):
371 def popen3(cmd, env=None, newlines=False):
372 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
372 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
373 return stdin, stdout, stderr
373 return stdin, stdout, stderr
374
374
375 def popen4(cmd, env=None, newlines=False, bufsize=-1):
375 def popen4(cmd, env=None, newlines=False, bufsize=-1):
376 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
376 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
377 close_fds=closefds,
377 close_fds=closefds,
378 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
378 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
379 stderr=subprocess.PIPE,
379 stderr=subprocess.PIPE,
380 universal_newlines=newlines,
380 universal_newlines=newlines,
381 env=env)
381 env=env)
382 return p.stdin, p.stdout, p.stderr, p
382 return p.stdin, p.stdout, p.stderr, p
383
383
384 def version():
384 def version():
385 """Return version information if available."""
385 """Return version information if available."""
386 try:
386 try:
387 from . import __version__
387 from . import __version__
388 return __version__.version
388 return __version__.version
389 except ImportError:
389 except ImportError:
390 return 'unknown'
390 return 'unknown'
391
391
392 def versiontuple(v=None, n=4):
392 def versiontuple(v=None, n=4):
393 """Parses a Mercurial version string into an N-tuple.
393 """Parses a Mercurial version string into an N-tuple.
394
394
395 The version string to be parsed is specified with the ``v`` argument.
395 The version string to be parsed is specified with the ``v`` argument.
396 If it isn't defined, the current Mercurial version string will be parsed.
396 If it isn't defined, the current Mercurial version string will be parsed.
397
397
398 ``n`` can be 2, 3, or 4. Here is how some version strings map to
398 ``n`` can be 2, 3, or 4. Here is how some version strings map to
399 returned values:
399 returned values:
400
400
401 >>> v = '3.6.1+190-df9b73d2d444'
401 >>> v = '3.6.1+190-df9b73d2d444'
402 >>> versiontuple(v, 2)
402 >>> versiontuple(v, 2)
403 (3, 6)
403 (3, 6)
404 >>> versiontuple(v, 3)
404 >>> versiontuple(v, 3)
405 (3, 6, 1)
405 (3, 6, 1)
406 >>> versiontuple(v, 4)
406 >>> versiontuple(v, 4)
407 (3, 6, 1, '190-df9b73d2d444')
407 (3, 6, 1, '190-df9b73d2d444')
408
408
409 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
409 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
410 (3, 6, 1, '190-df9b73d2d444+20151118')
410 (3, 6, 1, '190-df9b73d2d444+20151118')
411
411
412 >>> v = '3.6'
412 >>> v = '3.6'
413 >>> versiontuple(v, 2)
413 >>> versiontuple(v, 2)
414 (3, 6)
414 (3, 6)
415 >>> versiontuple(v, 3)
415 >>> versiontuple(v, 3)
416 (3, 6, None)
416 (3, 6, None)
417 >>> versiontuple(v, 4)
417 >>> versiontuple(v, 4)
418 (3, 6, None, None)
418 (3, 6, None, None)
419
419
420 >>> v = '3.9-rc'
420 >>> v = '3.9-rc'
421 >>> versiontuple(v, 2)
421 >>> versiontuple(v, 2)
422 (3, 9)
422 (3, 9)
423 >>> versiontuple(v, 3)
423 >>> versiontuple(v, 3)
424 (3, 9, None)
424 (3, 9, None)
425 >>> versiontuple(v, 4)
425 >>> versiontuple(v, 4)
426 (3, 9, None, 'rc')
426 (3, 9, None, 'rc')
427
427
428 >>> v = '3.9-rc+2-02a8fea4289b'
428 >>> v = '3.9-rc+2-02a8fea4289b'
429 >>> versiontuple(v, 2)
429 >>> versiontuple(v, 2)
430 (3, 9)
430 (3, 9)
431 >>> versiontuple(v, 3)
431 >>> versiontuple(v, 3)
432 (3, 9, None)
432 (3, 9, None)
433 >>> versiontuple(v, 4)
433 >>> versiontuple(v, 4)
434 (3, 9, None, 'rc+2-02a8fea4289b')
434 (3, 9, None, 'rc+2-02a8fea4289b')
435 """
435 """
436 if not v:
436 if not v:
437 v = version()
437 v = version()
438 parts = remod.split('[\+-]', v, 1)
438 parts = remod.split('[\+-]', v, 1)
439 if len(parts) == 1:
439 if len(parts) == 1:
440 vparts, extra = parts[0], None
440 vparts, extra = parts[0], None
441 else:
441 else:
442 vparts, extra = parts
442 vparts, extra = parts
443
443
444 vints = []
444 vints = []
445 for i in vparts.split('.'):
445 for i in vparts.split('.'):
446 try:
446 try:
447 vints.append(int(i))
447 vints.append(int(i))
448 except ValueError:
448 except ValueError:
449 break
449 break
450 # (3, 6) -> (3, 6, None)
450 # (3, 6) -> (3, 6, None)
451 while len(vints) < 3:
451 while len(vints) < 3:
452 vints.append(None)
452 vints.append(None)
453
453
454 if n == 2:
454 if n == 2:
455 return (vints[0], vints[1])
455 return (vints[0], vints[1])
456 if n == 3:
456 if n == 3:
457 return (vints[0], vints[1], vints[2])
457 return (vints[0], vints[1], vints[2])
458 if n == 4:
458 if n == 4:
459 return (vints[0], vints[1], vints[2], extra)
459 return (vints[0], vints[1], vints[2], extra)
460
460
461 # used by parsedate
461 # used by parsedate
462 defaultdateformats = (
462 defaultdateformats = (
463 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
463 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
464 '%Y-%m-%dT%H:%M', # without seconds
464 '%Y-%m-%dT%H:%M', # without seconds
465 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
465 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
466 '%Y-%m-%dT%H%M', # without seconds
466 '%Y-%m-%dT%H%M', # without seconds
467 '%Y-%m-%d %H:%M:%S', # our common legal variant
467 '%Y-%m-%d %H:%M:%S', # our common legal variant
468 '%Y-%m-%d %H:%M', # without seconds
468 '%Y-%m-%d %H:%M', # without seconds
469 '%Y-%m-%d %H%M%S', # without :
469 '%Y-%m-%d %H%M%S', # without :
470 '%Y-%m-%d %H%M', # without seconds
470 '%Y-%m-%d %H%M', # without seconds
471 '%Y-%m-%d %I:%M:%S%p',
471 '%Y-%m-%d %I:%M:%S%p',
472 '%Y-%m-%d %H:%M',
472 '%Y-%m-%d %H:%M',
473 '%Y-%m-%d %I:%M%p',
473 '%Y-%m-%d %I:%M%p',
474 '%Y-%m-%d',
474 '%Y-%m-%d',
475 '%m-%d',
475 '%m-%d',
476 '%m/%d',
476 '%m/%d',
477 '%m/%d/%y',
477 '%m/%d/%y',
478 '%m/%d/%Y',
478 '%m/%d/%Y',
479 '%a %b %d %H:%M:%S %Y',
479 '%a %b %d %H:%M:%S %Y',
480 '%a %b %d %I:%M:%S%p %Y',
480 '%a %b %d %I:%M:%S%p %Y',
481 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
481 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
482 '%b %d %H:%M:%S %Y',
482 '%b %d %H:%M:%S %Y',
483 '%b %d %I:%M:%S%p %Y',
483 '%b %d %I:%M:%S%p %Y',
484 '%b %d %H:%M:%S',
484 '%b %d %H:%M:%S',
485 '%b %d %I:%M:%S%p',
485 '%b %d %I:%M:%S%p',
486 '%b %d %H:%M',
486 '%b %d %H:%M',
487 '%b %d %I:%M%p',
487 '%b %d %I:%M%p',
488 '%b %d %Y',
488 '%b %d %Y',
489 '%b %d',
489 '%b %d',
490 '%H:%M:%S',
490 '%H:%M:%S',
491 '%I:%M:%S%p',
491 '%I:%M:%S%p',
492 '%H:%M',
492 '%H:%M',
493 '%I:%M%p',
493 '%I:%M%p',
494 )
494 )
495
495
496 extendeddateformats = defaultdateformats + (
496 extendeddateformats = defaultdateformats + (
497 "%Y",
497 "%Y",
498 "%Y-%m",
498 "%Y-%m",
499 "%b",
499 "%b",
500 "%b %Y",
500 "%b %Y",
501 )
501 )
502
502
503 def cachefunc(func):
503 def cachefunc(func):
504 '''cache the result of function calls'''
504 '''cache the result of function calls'''
505 # XXX doesn't handle keywords args
505 # XXX doesn't handle keywords args
506 if func.__code__.co_argcount == 0:
506 if func.__code__.co_argcount == 0:
507 cache = []
507 cache = []
508 def f():
508 def f():
509 if len(cache) == 0:
509 if len(cache) == 0:
510 cache.append(func())
510 cache.append(func())
511 return cache[0]
511 return cache[0]
512 return f
512 return f
513 cache = {}
513 cache = {}
514 if func.__code__.co_argcount == 1:
514 if func.__code__.co_argcount == 1:
515 # we gain a small amount of time because
515 # we gain a small amount of time because
516 # we don't need to pack/unpack the list
516 # we don't need to pack/unpack the list
517 def f(arg):
517 def f(arg):
518 if arg not in cache:
518 if arg not in cache:
519 cache[arg] = func(arg)
519 cache[arg] = func(arg)
520 return cache[arg]
520 return cache[arg]
521 else:
521 else:
522 def f(*args):
522 def f(*args):
523 if args not in cache:
523 if args not in cache:
524 cache[args] = func(*args)
524 cache[args] = func(*args)
525 return cache[args]
525 return cache[args]
526
526
527 return f
527 return f
528
528
529 class sortdict(dict):
529 class sortdict(dict):
530 '''a simple sorted dictionary'''
530 '''a simple sorted dictionary'''
531 def __init__(self, data=None):
531 def __init__(self, data=None):
532 self._list = []
532 self._list = []
533 if data:
533 if data:
534 self.update(data)
534 self.update(data)
535 def copy(self):
535 def copy(self):
536 return sortdict(self)
536 return sortdict(self)
537 def __setitem__(self, key, val):
537 def __setitem__(self, key, val):
538 if key in self:
538 if key in self:
539 self._list.remove(key)
539 self._list.remove(key)
540 self._list.append(key)
540 self._list.append(key)
541 dict.__setitem__(self, key, val)
541 dict.__setitem__(self, key, val)
542 def __iter__(self):
542 def __iter__(self):
543 return self._list.__iter__()
543 return self._list.__iter__()
544 def update(self, src):
544 def update(self, src):
545 if isinstance(src, dict):
545 if isinstance(src, dict):
546 src = src.iteritems()
546 src = src.iteritems()
547 for k, v in src:
547 for k, v in src:
548 self[k] = v
548 self[k] = v
549 def clear(self):
549 def clear(self):
550 dict.clear(self)
550 dict.clear(self)
551 self._list = []
551 self._list = []
552 def items(self):
552 def items(self):
553 return [(k, self[k]) for k in self._list]
553 return [(k, self[k]) for k in self._list]
554 def __delitem__(self, key):
554 def __delitem__(self, key):
555 dict.__delitem__(self, key)
555 dict.__delitem__(self, key)
556 self._list.remove(key)
556 self._list.remove(key)
557 def pop(self, key, *args, **kwargs):
557 def pop(self, key, *args, **kwargs):
558 dict.pop(self, key, *args, **kwargs)
558 dict.pop(self, key, *args, **kwargs)
559 try:
559 try:
560 self._list.remove(key)
560 self._list.remove(key)
561 except ValueError:
561 except ValueError:
562 pass
562 pass
563 def keys(self):
563 def keys(self):
564 return self._list[:]
564 return self._list[:]
565 def iterkeys(self):
565 def iterkeys(self):
566 return self._list.__iter__()
566 return self._list.__iter__()
567 def iteritems(self):
567 def iteritems(self):
568 for k in self._list:
568 for k in self._list:
569 yield k, self[k]
569 yield k, self[k]
570 def insert(self, index, key, val):
570 def insert(self, index, key, val):
571 self._list.insert(index, key)
571 self._list.insert(index, key)
572 dict.__setitem__(self, key, val)
572 dict.__setitem__(self, key, val)
573 def __repr__(self):
573 def __repr__(self):
574 if not self:
574 if not self:
575 return '%s()' % self.__class__.__name__
575 return '%s()' % self.__class__.__name__
576 return '%s(%r)' % (self.__class__.__name__, self.items())
576 return '%s(%r)' % (self.__class__.__name__, self.items())
577
577
578 class _lrucachenode(object):
578 class _lrucachenode(object):
579 """A node in a doubly linked list.
579 """A node in a doubly linked list.
580
580
581 Holds a reference to nodes on either side as well as a key-value
581 Holds a reference to nodes on either side as well as a key-value
582 pair for the dictionary entry.
582 pair for the dictionary entry.
583 """
583 """
584 __slots__ = (u'next', u'prev', u'key', u'value')
584 __slots__ = (u'next', u'prev', u'key', u'value')
585
585
586 def __init__(self):
586 def __init__(self):
587 self.next = None
587 self.next = None
588 self.prev = None
588 self.prev = None
589
589
590 self.key = _notset
590 self.key = _notset
591 self.value = None
591 self.value = None
592
592
593 def markempty(self):
593 def markempty(self):
594 """Mark the node as emptied."""
594 """Mark the node as emptied."""
595 self.key = _notset
595 self.key = _notset
596
596
597 class lrucachedict(object):
597 class lrucachedict(object):
598 """Dict that caches most recent accesses and sets.
598 """Dict that caches most recent accesses and sets.
599
599
600 The dict consists of an actual backing dict - indexed by original
600 The dict consists of an actual backing dict - indexed by original
601 key - and a doubly linked circular list defining the order of entries in
601 key - and a doubly linked circular list defining the order of entries in
602 the cache.
602 the cache.
603
603
604 The head node is the newest entry in the cache. If the cache is full,
604 The head node is the newest entry in the cache. If the cache is full,
605 we recycle head.prev and make it the new head. Cache accesses result in
605 we recycle head.prev and make it the new head. Cache accesses result in
606 the node being moved to before the existing head and being marked as the
606 the node being moved to before the existing head and being marked as the
607 new head node.
607 new head node.
608 """
608 """
609 def __init__(self, max):
609 def __init__(self, max):
610 self._cache = {}
610 self._cache = {}
611
611
612 self._head = head = _lrucachenode()
612 self._head = head = _lrucachenode()
613 head.prev = head
613 head.prev = head
614 head.next = head
614 head.next = head
615 self._size = 1
615 self._size = 1
616 self._capacity = max
616 self._capacity = max
617
617
618 def __len__(self):
618 def __len__(self):
619 return len(self._cache)
619 return len(self._cache)
620
620
621 def __contains__(self, k):
621 def __contains__(self, k):
622 return k in self._cache
622 return k in self._cache
623
623
624 def __iter__(self):
624 def __iter__(self):
625 # We don't have to iterate in cache order, but why not.
625 # We don't have to iterate in cache order, but why not.
626 n = self._head
626 n = self._head
627 for i in range(len(self._cache)):
627 for i in range(len(self._cache)):
628 yield n.key
628 yield n.key
629 n = n.next
629 n = n.next
630
630
631 def __getitem__(self, k):
631 def __getitem__(self, k):
632 node = self._cache[k]
632 node = self._cache[k]
633 self._movetohead(node)
633 self._movetohead(node)
634 return node.value
634 return node.value
635
635
636 def __setitem__(self, k, v):
636 def __setitem__(self, k, v):
637 node = self._cache.get(k)
637 node = self._cache.get(k)
638 # Replace existing value and mark as newest.
638 # Replace existing value and mark as newest.
639 if node is not None:
639 if node is not None:
640 node.value = v
640 node.value = v
641 self._movetohead(node)
641 self._movetohead(node)
642 return
642 return
643
643
644 if self._size < self._capacity:
644 if self._size < self._capacity:
645 node = self._addcapacity()
645 node = self._addcapacity()
646 else:
646 else:
647 # Grab the last/oldest item.
647 # Grab the last/oldest item.
648 node = self._head.prev
648 node = self._head.prev
649
649
650 # At capacity. Kill the old entry.
650 # At capacity. Kill the old entry.
651 if node.key is not _notset:
651 if node.key is not _notset:
652 del self._cache[node.key]
652 del self._cache[node.key]
653
653
654 node.key = k
654 node.key = k
655 node.value = v
655 node.value = v
656 self._cache[k] = node
656 self._cache[k] = node
657 # And mark it as newest entry. No need to adjust order since it
657 # And mark it as newest entry. No need to adjust order since it
658 # is already self._head.prev.
658 # is already self._head.prev.
659 self._head = node
659 self._head = node
660
660
661 def __delitem__(self, k):
661 def __delitem__(self, k):
662 node = self._cache.pop(k)
662 node = self._cache.pop(k)
663 node.markempty()
663 node.markempty()
664
664
665 # Temporarily mark as newest item before re-adjusting head to make
665 # Temporarily mark as newest item before re-adjusting head to make
666 # this node the oldest item.
666 # this node the oldest item.
667 self._movetohead(node)
667 self._movetohead(node)
668 self._head = node.next
668 self._head = node.next
669
669
670 # Additional dict methods.
670 # Additional dict methods.
671
671
672 def get(self, k, default=None):
672 def get(self, k, default=None):
673 try:
673 try:
674 return self._cache[k].value
674 return self._cache[k].value
675 except KeyError:
675 except KeyError:
676 return default
676 return default
677
677
678 def clear(self):
678 def clear(self):
679 n = self._head
679 n = self._head
680 while n.key is not _notset:
680 while n.key is not _notset:
681 n.markempty()
681 n.markempty()
682 n = n.next
682 n = n.next
683
683
684 self._cache.clear()
684 self._cache.clear()
685
685
686 def copy(self):
686 def copy(self):
687 result = lrucachedict(self._capacity)
687 result = lrucachedict(self._capacity)
688 n = self._head.prev
688 n = self._head.prev
689 # Iterate in oldest-to-newest order, so the copy has the right ordering
689 # Iterate in oldest-to-newest order, so the copy has the right ordering
690 for i in range(len(self._cache)):
690 for i in range(len(self._cache)):
691 result[n.key] = n.value
691 result[n.key] = n.value
692 n = n.prev
692 n = n.prev
693 return result
693 return result
694
694
695 def _movetohead(self, node):
695 def _movetohead(self, node):
696 """Mark a node as the newest, making it the new head.
696 """Mark a node as the newest, making it the new head.
697
697
698 When a node is accessed, it becomes the freshest entry in the LRU
698 When a node is accessed, it becomes the freshest entry in the LRU
699 list, which is denoted by self._head.
699 list, which is denoted by self._head.
700
700
701 Visually, let's make ``N`` the new head node (* denotes head):
701 Visually, let's make ``N`` the new head node (* denotes head):
702
702
703 previous/oldest <-> head <-> next/next newest
703 previous/oldest <-> head <-> next/next newest
704
704
705 ----<->--- A* ---<->-----
705 ----<->--- A* ---<->-----
706 | |
706 | |
707 E <-> D <-> N <-> C <-> B
707 E <-> D <-> N <-> C <-> B
708
708
709 To:
709 To:
710
710
711 ----<->--- N* ---<->-----
711 ----<->--- N* ---<->-----
712 | |
712 | |
713 E <-> D <-> C <-> B <-> A
713 E <-> D <-> C <-> B <-> A
714
714
715 This requires the following moves:
715 This requires the following moves:
716
716
717 C.next = D (node.prev.next = node.next)
717 C.next = D (node.prev.next = node.next)
718 D.prev = C (node.next.prev = node.prev)
718 D.prev = C (node.next.prev = node.prev)
719 E.next = N (head.prev.next = node)
719 E.next = N (head.prev.next = node)
720 N.prev = E (node.prev = head.prev)
720 N.prev = E (node.prev = head.prev)
721 N.next = A (node.next = head)
721 N.next = A (node.next = head)
722 A.prev = N (head.prev = node)
722 A.prev = N (head.prev = node)
723 """
723 """
724 head = self._head
724 head = self._head
725 # C.next = D
725 # C.next = D
726 node.prev.next = node.next
726 node.prev.next = node.next
727 # D.prev = C
727 # D.prev = C
728 node.next.prev = node.prev
728 node.next.prev = node.prev
729 # N.prev = E
729 # N.prev = E
730 node.prev = head.prev
730 node.prev = head.prev
731 # N.next = A
731 # N.next = A
732 # It is tempting to do just "head" here, however if node is
732 # It is tempting to do just "head" here, however if node is
733 # adjacent to head, this will do bad things.
733 # adjacent to head, this will do bad things.
734 node.next = head.prev.next
734 node.next = head.prev.next
735 # E.next = N
735 # E.next = N
736 node.next.prev = node
736 node.next.prev = node
737 # A.prev = N
737 # A.prev = N
738 node.prev.next = node
738 node.prev.next = node
739
739
740 self._head = node
740 self._head = node
741
741
742 def _addcapacity(self):
742 def _addcapacity(self):
743 """Add a node to the circular linked list.
743 """Add a node to the circular linked list.
744
744
745 The new node is inserted before the head node.
745 The new node is inserted before the head node.
746 """
746 """
747 head = self._head
747 head = self._head
748 node = _lrucachenode()
748 node = _lrucachenode()
749 head.prev.next = node
749 head.prev.next = node
750 node.prev = head.prev
750 node.prev = head.prev
751 node.next = head
751 node.next = head
752 head.prev = node
752 head.prev = node
753 self._size += 1
753 self._size += 1
754 return node
754 return node
755
755
756 def lrucachefunc(func):
756 def lrucachefunc(func):
757 '''cache most recent results of function calls'''
757 '''cache most recent results of function calls'''
758 cache = {}
758 cache = {}
759 order = collections.deque()
759 order = collections.deque()
760 if func.__code__.co_argcount == 1:
760 if func.__code__.co_argcount == 1:
761 def f(arg):
761 def f(arg):
762 if arg not in cache:
762 if arg not in cache:
763 if len(cache) > 20:
763 if len(cache) > 20:
764 del cache[order.popleft()]
764 del cache[order.popleft()]
765 cache[arg] = func(arg)
765 cache[arg] = func(arg)
766 else:
766 else:
767 order.remove(arg)
767 order.remove(arg)
768 order.append(arg)
768 order.append(arg)
769 return cache[arg]
769 return cache[arg]
770 else:
770 else:
771 def f(*args):
771 def f(*args):
772 if args not in cache:
772 if args not in cache:
773 if len(cache) > 20:
773 if len(cache) > 20:
774 del cache[order.popleft()]
774 del cache[order.popleft()]
775 cache[args] = func(*args)
775 cache[args] = func(*args)
776 else:
776 else:
777 order.remove(args)
777 order.remove(args)
778 order.append(args)
778 order.append(args)
779 return cache[args]
779 return cache[args]
780
780
781 return f
781 return f
782
782
783 class propertycache(object):
783 class propertycache(object):
784 def __init__(self, func):
784 def __init__(self, func):
785 self.func = func
785 self.func = func
786 self.name = func.__name__
786 self.name = func.__name__
787 def __get__(self, obj, type=None):
787 def __get__(self, obj, type=None):
788 result = self.func(obj)
788 result = self.func(obj)
789 self.cachevalue(obj, result)
789 self.cachevalue(obj, result)
790 return result
790 return result
791
791
792 def cachevalue(self, obj, value):
792 def cachevalue(self, obj, value):
793 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
793 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
794 obj.__dict__[self.name] = value
794 obj.__dict__[self.name] = value
795
795
796 def pipefilter(s, cmd):
796 def pipefilter(s, cmd):
797 '''filter string S through command CMD, returning its output'''
797 '''filter string S through command CMD, returning its output'''
798 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
798 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
799 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
799 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
800 pout, perr = p.communicate(s)
800 pout, perr = p.communicate(s)
801 return pout
801 return pout
802
802
803 def tempfilter(s, cmd):
803 def tempfilter(s, cmd):
804 '''filter string S through a pair of temporary files with CMD.
804 '''filter string S through a pair of temporary files with CMD.
805 CMD is used as a template to create the real command to be run,
805 CMD is used as a template to create the real command to be run,
806 with the strings INFILE and OUTFILE replaced by the real names of
806 with the strings INFILE and OUTFILE replaced by the real names of
807 the temporary files generated.'''
807 the temporary files generated.'''
808 inname, outname = None, None
808 inname, outname = None, None
809 try:
809 try:
810 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
810 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
811 fp = os.fdopen(infd, pycompat.sysstr('wb'))
811 fp = os.fdopen(infd, pycompat.sysstr('wb'))
812 fp.write(s)
812 fp.write(s)
813 fp.close()
813 fp.close()
814 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
814 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
815 os.close(outfd)
815 os.close(outfd)
816 cmd = cmd.replace('INFILE', inname)
816 cmd = cmd.replace('INFILE', inname)
817 cmd = cmd.replace('OUTFILE', outname)
817 cmd = cmd.replace('OUTFILE', outname)
818 code = os.system(cmd)
818 code = os.system(cmd)
819 if pycompat.sysplatform == 'OpenVMS' and code & 1:
819 if pycompat.sysplatform == 'OpenVMS' and code & 1:
820 code = 0
820 code = 0
821 if code:
821 if code:
822 raise Abort(_("command '%s' failed: %s") %
822 raise Abort(_("command '%s' failed: %s") %
823 (cmd, explainexit(code)))
823 (cmd, explainexit(code)))
824 return readfile(outname)
824 return readfile(outname)
825 finally:
825 finally:
826 try:
826 try:
827 if inname:
827 if inname:
828 os.unlink(inname)
828 os.unlink(inname)
829 except OSError:
829 except OSError:
830 pass
830 pass
831 try:
831 try:
832 if outname:
832 if outname:
833 os.unlink(outname)
833 os.unlink(outname)
834 except OSError:
834 except OSError:
835 pass
835 pass
836
836
837 filtertable = {
837 filtertable = {
838 'tempfile:': tempfilter,
838 'tempfile:': tempfilter,
839 'pipe:': pipefilter,
839 'pipe:': pipefilter,
840 }
840 }
841
841
842 def filter(s, cmd):
842 def filter(s, cmd):
843 "filter a string through a command that transforms its input to its output"
843 "filter a string through a command that transforms its input to its output"
844 for name, fn in filtertable.iteritems():
844 for name, fn in filtertable.iteritems():
845 if cmd.startswith(name):
845 if cmd.startswith(name):
846 return fn(s, cmd[len(name):].lstrip())
846 return fn(s, cmd[len(name):].lstrip())
847 return pipefilter(s, cmd)
847 return pipefilter(s, cmd)
848
848
849 def binary(s):
849 def binary(s):
850 """return true if a string is binary data"""
850 """return true if a string is binary data"""
851 return bool(s and '\0' in s)
851 return bool(s and '\0' in s)
852
852
853 def increasingchunks(source, min=1024, max=65536):
853 def increasingchunks(source, min=1024, max=65536):
854 '''return no less than min bytes per chunk while data remains,
854 '''return no less than min bytes per chunk while data remains,
855 doubling min after each chunk until it reaches max'''
855 doubling min after each chunk until it reaches max'''
856 def log2(x):
856 def log2(x):
857 if not x:
857 if not x:
858 return 0
858 return 0
859 i = 0
859 i = 0
860 while x:
860 while x:
861 x >>= 1
861 x >>= 1
862 i += 1
862 i += 1
863 return i - 1
863 return i - 1
864
864
865 buf = []
865 buf = []
866 blen = 0
866 blen = 0
867 for chunk in source:
867 for chunk in source:
868 buf.append(chunk)
868 buf.append(chunk)
869 blen += len(chunk)
869 blen += len(chunk)
870 if blen >= min:
870 if blen >= min:
871 if min < max:
871 if min < max:
872 min = min << 1
872 min = min << 1
873 nmin = 1 << log2(blen)
873 nmin = 1 << log2(blen)
874 if nmin > min:
874 if nmin > min:
875 min = nmin
875 min = nmin
876 if min > max:
876 if min > max:
877 min = max
877 min = max
878 yield ''.join(buf)
878 yield ''.join(buf)
879 blen = 0
879 blen = 0
880 buf = []
880 buf = []
881 if buf:
881 if buf:
882 yield ''.join(buf)
882 yield ''.join(buf)
883
883
884 Abort = error.Abort
884 Abort = error.Abort
885
885
886 def always(fn):
886 def always(fn):
887 return True
887 return True
888
888
889 def never(fn):
889 def never(fn):
890 return False
890 return False
891
891
892 def nogc(func):
892 def nogc(func):
893 """disable garbage collector
893 """disable garbage collector
894
894
895 Python's garbage collector triggers a GC each time a certain number of
895 Python's garbage collector triggers a GC each time a certain number of
896 container objects (the number being defined by gc.get_threshold()) are
896 container objects (the number being defined by gc.get_threshold()) are
897 allocated even when marked not to be tracked by the collector. Tracking has
897 allocated even when marked not to be tracked by the collector. Tracking has
898 no effect on when GCs are triggered, only on what objects the GC looks
898 no effect on when GCs are triggered, only on what objects the GC looks
899 into. As a workaround, disable GC while building complex (huge)
899 into. As a workaround, disable GC while building complex (huge)
900 containers.
900 containers.
901
901
902 This garbage collector issue have been fixed in 2.7.
902 This garbage collector issue have been fixed in 2.7.
903 """
903 """
904 if sys.version_info >= (2, 7):
904 if sys.version_info >= (2, 7):
905 return func
905 return func
906 def wrapper(*args, **kwargs):
906 def wrapper(*args, **kwargs):
907 gcenabled = gc.isenabled()
907 gcenabled = gc.isenabled()
908 gc.disable()
908 gc.disable()
909 try:
909 try:
910 return func(*args, **kwargs)
910 return func(*args, **kwargs)
911 finally:
911 finally:
912 if gcenabled:
912 if gcenabled:
913 gc.enable()
913 gc.enable()
914 return wrapper
914 return wrapper
915
915
916 def pathto(root, n1, n2):
916 def pathto(root, n1, n2):
917 '''return the relative path from one place to another.
917 '''return the relative path from one place to another.
918 root should use os.sep to separate directories
918 root should use os.sep to separate directories
919 n1 should use os.sep to separate directories
919 n1 should use os.sep to separate directories
920 n2 should use "/" to separate directories
920 n2 should use "/" to separate directories
921 returns an os.sep-separated path.
921 returns an os.sep-separated path.
922
922
923 If n1 is a relative path, it's assumed it's
923 If n1 is a relative path, it's assumed it's
924 relative to root.
924 relative to root.
925 n2 should always be relative to root.
925 n2 should always be relative to root.
926 '''
926 '''
927 if not n1:
927 if not n1:
928 return localpath(n2)
928 return localpath(n2)
929 if os.path.isabs(n1):
929 if os.path.isabs(n1):
930 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
930 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
931 return os.path.join(root, localpath(n2))
931 return os.path.join(root, localpath(n2))
932 n2 = '/'.join((pconvert(root), n2))
932 n2 = '/'.join((pconvert(root), n2))
933 a, b = splitpath(n1), n2.split('/')
933 a, b = splitpath(n1), n2.split('/')
934 a.reverse()
934 a.reverse()
935 b.reverse()
935 b.reverse()
936 while a and b and a[-1] == b[-1]:
936 while a and b and a[-1] == b[-1]:
937 a.pop()
937 a.pop()
938 b.pop()
938 b.pop()
939 b.reverse()
939 b.reverse()
940 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
940 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
941
941
942 def mainfrozen():
942 def mainfrozen():
943 """return True if we are a frozen executable.
943 """return True if we are a frozen executable.
944
944
945 The code supports py2exe (most common, Windows only) and tools/freeze
945 The code supports py2exe (most common, Windows only) and tools/freeze
946 (portable, not much used).
946 (portable, not much used).
947 """
947 """
948 return (safehasattr(sys, "frozen") or # new py2exe
948 return (safehasattr(sys, "frozen") or # new py2exe
949 safehasattr(sys, "importers") or # old py2exe
949 safehasattr(sys, "importers") or # old py2exe
950 imp.is_frozen(u"__main__")) # tools/freeze
950 imp.is_frozen(u"__main__")) # tools/freeze
951
951
952 # the location of data files matching the source code
952 # the location of data files matching the source code
953 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
953 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
954 # executable version (py2exe) doesn't support __file__
954 # executable version (py2exe) doesn't support __file__
955 datapath = os.path.dirname(pycompat.sysexecutable)
955 datapath = os.path.dirname(pycompat.sysexecutable)
956 else:
956 else:
957 datapath = os.path.dirname(pycompat.fsencode(__file__))
957 datapath = os.path.dirname(pycompat.fsencode(__file__))
958
958
959 i18n.setdatapath(datapath)
959 i18n.setdatapath(datapath)
960
960
961 _hgexecutable = None
961 _hgexecutable = None
962
962
963 def hgexecutable():
963 def hgexecutable():
964 """return location of the 'hg' executable.
964 """return location of the 'hg' executable.
965
965
966 Defaults to $HG or 'hg' in the search path.
966 Defaults to $HG or 'hg' in the search path.
967 """
967 """
968 if _hgexecutable is None:
968 if _hgexecutable is None:
969 hg = encoding.environ.get('HG')
969 hg = encoding.environ.get('HG')
970 mainmod = sys.modules[pycompat.sysstr('__main__')]
970 mainmod = sys.modules[pycompat.sysstr('__main__')]
971 if hg:
971 if hg:
972 _sethgexecutable(hg)
972 _sethgexecutable(hg)
973 elif mainfrozen():
973 elif mainfrozen():
974 if getattr(sys, 'frozen', None) == 'macosx_app':
974 if getattr(sys, 'frozen', None) == 'macosx_app':
975 # Env variable set by py2app
975 # Env variable set by py2app
976 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
976 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
977 else:
977 else:
978 _sethgexecutable(pycompat.sysexecutable)
978 _sethgexecutable(pycompat.sysexecutable)
979 elif (os.path.basename(
979 elif (os.path.basename(
980 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
980 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
981 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
981 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
982 else:
982 else:
983 exe = findexe('hg') or os.path.basename(sys.argv[0])
983 exe = findexe('hg') or os.path.basename(sys.argv[0])
984 _sethgexecutable(exe)
984 _sethgexecutable(exe)
985 return _hgexecutable
985 return _hgexecutable
986
986
987 def _sethgexecutable(path):
987 def _sethgexecutable(path):
988 """set location of the 'hg' executable"""
988 """set location of the 'hg' executable"""
989 global _hgexecutable
989 global _hgexecutable
990 _hgexecutable = path
990 _hgexecutable = path
991
991
992 def _isstdout(f):
992 def _isstdout(f):
993 fileno = getattr(f, 'fileno', None)
993 fileno = getattr(f, 'fileno', None)
994 return fileno and fileno() == sys.__stdout__.fileno()
994 return fileno and fileno() == sys.__stdout__.fileno()
995
995
996 def shellenviron(environ=None):
996 def shellenviron(environ=None):
997 """return environ with optional override, useful for shelling out"""
997 """return environ with optional override, useful for shelling out"""
998 def py2shell(val):
998 def py2shell(val):
999 'convert python object into string that is useful to shell'
999 'convert python object into string that is useful to shell'
1000 if val is None or val is False:
1000 if val is None or val is False:
1001 return '0'
1001 return '0'
1002 if val is True:
1002 if val is True:
1003 return '1'
1003 return '1'
1004 return str(val)
1004 return str(val)
1005 env = dict(encoding.environ)
1005 env = dict(encoding.environ)
1006 if environ:
1006 if environ:
1007 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1007 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1008 env['HG'] = hgexecutable()
1008 env['HG'] = hgexecutable()
1009 return env
1009 return env
1010
1010
1011 def system(cmd, environ=None, cwd=None, out=None):
1011 def system(cmd, environ=None, cwd=None, out=None):
1012 '''enhanced shell command execution.
1012 '''enhanced shell command execution.
1013 run with environment maybe modified, maybe in different dir.
1013 run with environment maybe modified, maybe in different dir.
1014
1014
1015 if out is specified, it is assumed to be a file-like object that has a
1015 if out is specified, it is assumed to be a file-like object that has a
1016 write() method. stdout and stderr will be redirected to out.'''
1016 write() method. stdout and stderr will be redirected to out.'''
1017 try:
1017 try:
1018 stdout.flush()
1018 stdout.flush()
1019 except Exception:
1019 except Exception:
1020 pass
1020 pass
1021 cmd = quotecommand(cmd)
1021 cmd = quotecommand(cmd)
1022 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1022 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1023 and sys.version_info[1] < 7):
1023 and sys.version_info[1] < 7):
1024 # subprocess kludge to work around issues in half-baked Python
1024 # subprocess kludge to work around issues in half-baked Python
1025 # ports, notably bichued/python:
1025 # ports, notably bichued/python:
1026 if not cwd is None:
1026 if not cwd is None:
1027 os.chdir(cwd)
1027 os.chdir(cwd)
1028 rc = os.system(cmd)
1028 rc = os.system(cmd)
1029 else:
1029 else:
1030 env = shellenviron(environ)
1030 env = shellenviron(environ)
1031 if out is None or _isstdout(out):
1031 if out is None or _isstdout(out):
1032 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1032 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1033 env=env, cwd=cwd)
1033 env=env, cwd=cwd)
1034 else:
1034 else:
1035 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1035 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1036 env=env, cwd=cwd, stdout=subprocess.PIPE,
1036 env=env, cwd=cwd, stdout=subprocess.PIPE,
1037 stderr=subprocess.STDOUT)
1037 stderr=subprocess.STDOUT)
1038 for line in iter(proc.stdout.readline, ''):
1038 for line in iter(proc.stdout.readline, ''):
1039 out.write(line)
1039 out.write(line)
1040 proc.wait()
1040 proc.wait()
1041 rc = proc.returncode
1041 rc = proc.returncode
1042 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1042 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1043 rc = 0
1043 rc = 0
1044 return rc
1044 return rc
1045
1045
1046 def checksignature(func):
1046 def checksignature(func):
1047 '''wrap a function with code to check for calling errors'''
1047 '''wrap a function with code to check for calling errors'''
1048 def check(*args, **kwargs):
1048 def check(*args, **kwargs):
1049 try:
1049 try:
1050 return func(*args, **kwargs)
1050 return func(*args, **kwargs)
1051 except TypeError:
1051 except TypeError:
1052 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1052 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1053 raise error.SignatureError
1053 raise error.SignatureError
1054 raise
1054 raise
1055
1055
1056 return check
1056 return check
1057
1057
1058 # a whilelist of known filesystems where hardlink works reliably
1058 # a whilelist of known filesystems where hardlink works reliably
1059 _hardlinkfswhitelist = set([
1059 _hardlinkfswhitelist = set([
1060 'btrfs',
1060 'btrfs',
1061 'ext2',
1061 'ext2',
1062 'ext3',
1062 'ext3',
1063 'ext4',
1063 'ext4',
1064 'hfs',
1064 'hfs',
1065 'jfs',
1065 'jfs',
1066 'reiserfs',
1066 'reiserfs',
1067 'tmpfs',
1067 'tmpfs',
1068 'ufs',
1068 'ufs',
1069 'xfs',
1069 'xfs',
1070 'zfs',
1070 'zfs',
1071 ])
1071 ])
1072
1072
1073 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1073 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1074 '''copy a file, preserving mode and optionally other stat info like
1074 '''copy a file, preserving mode and optionally other stat info like
1075 atime/mtime
1075 atime/mtime
1076
1076
1077 checkambig argument is used with filestat, and is useful only if
1077 checkambig argument is used with filestat, and is useful only if
1078 destination file is guarded by any lock (e.g. repo.lock or
1078 destination file is guarded by any lock (e.g. repo.lock or
1079 repo.wlock).
1079 repo.wlock).
1080
1080
1081 copystat and checkambig should be exclusive.
1081 copystat and checkambig should be exclusive.
1082 '''
1082 '''
1083 assert not (copystat and checkambig)
1083 assert not (copystat and checkambig)
1084 oldstat = None
1084 oldstat = None
1085 if os.path.lexists(dest):
1085 if os.path.lexists(dest):
1086 if checkambig:
1086 if checkambig:
1087 oldstat = checkambig and filestat(dest)
1087 oldstat = checkambig and filestat(dest)
1088 unlink(dest)
1088 unlink(dest)
1089 if hardlink:
1089 if hardlink:
1090 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1090 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1091 # unless we are confident that dest is on a whitelisted filesystem.
1091 # unless we are confident that dest is on a whitelisted filesystem.
1092 try:
1092 fstype = getfstype(os.path.dirname(dest))
1093 fstype = getfstype(os.path.dirname(dest))
1094 except OSError:
1095 fstype = None
1093 if fstype not in _hardlinkfswhitelist:
1096 if fstype not in _hardlinkfswhitelist:
1094 hardlink = False
1097 hardlink = False
1095 if hardlink:
1098 if hardlink:
1096 try:
1099 try:
1097 oslink(src, dest)
1100 oslink(src, dest)
1098 return
1101 return
1099 except (IOError, OSError):
1102 except (IOError, OSError):
1100 pass # fall back to normal copy
1103 pass # fall back to normal copy
1101 if os.path.islink(src):
1104 if os.path.islink(src):
1102 os.symlink(os.readlink(src), dest)
1105 os.symlink(os.readlink(src), dest)
1103 # copytime is ignored for symlinks, but in general copytime isn't needed
1106 # copytime is ignored for symlinks, but in general copytime isn't needed
1104 # for them anyway
1107 # for them anyway
1105 else:
1108 else:
1106 try:
1109 try:
1107 shutil.copyfile(src, dest)
1110 shutil.copyfile(src, dest)
1108 if copystat:
1111 if copystat:
1109 # copystat also copies mode
1112 # copystat also copies mode
1110 shutil.copystat(src, dest)
1113 shutil.copystat(src, dest)
1111 else:
1114 else:
1112 shutil.copymode(src, dest)
1115 shutil.copymode(src, dest)
1113 if oldstat and oldstat.stat:
1116 if oldstat and oldstat.stat:
1114 newstat = filestat(dest)
1117 newstat = filestat(dest)
1115 if newstat.isambig(oldstat):
1118 if newstat.isambig(oldstat):
1116 # stat of copied file is ambiguous to original one
1119 # stat of copied file is ambiguous to original one
1117 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1120 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1118 os.utime(dest, (advanced, advanced))
1121 os.utime(dest, (advanced, advanced))
1119 except shutil.Error as inst:
1122 except shutil.Error as inst:
1120 raise Abort(str(inst))
1123 raise Abort(str(inst))
1121
1124
1122 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1125 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1123 """Copy a directory tree using hardlinks if possible."""
1126 """Copy a directory tree using hardlinks if possible."""
1124 num = 0
1127 num = 0
1125
1128
1126 if hardlink is None:
1129 if hardlink is None:
1127 hardlink = (os.stat(src).st_dev ==
1130 hardlink = (os.stat(src).st_dev ==
1128 os.stat(os.path.dirname(dst)).st_dev)
1131 os.stat(os.path.dirname(dst)).st_dev)
1129 if hardlink:
1132 if hardlink:
1130 topic = _('linking')
1133 topic = _('linking')
1131 else:
1134 else:
1132 topic = _('copying')
1135 topic = _('copying')
1133
1136
1134 if os.path.isdir(src):
1137 if os.path.isdir(src):
1135 os.mkdir(dst)
1138 os.mkdir(dst)
1136 for name, kind in osutil.listdir(src):
1139 for name, kind in osutil.listdir(src):
1137 srcname = os.path.join(src, name)
1140 srcname = os.path.join(src, name)
1138 dstname = os.path.join(dst, name)
1141 dstname = os.path.join(dst, name)
1139 def nprog(t, pos):
1142 def nprog(t, pos):
1140 if pos is not None:
1143 if pos is not None:
1141 return progress(t, pos + num)
1144 return progress(t, pos + num)
1142 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1145 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1143 num += n
1146 num += n
1144 else:
1147 else:
1145 if hardlink:
1148 if hardlink:
1146 try:
1149 try:
1147 oslink(src, dst)
1150 oslink(src, dst)
1148 except (IOError, OSError):
1151 except (IOError, OSError):
1149 hardlink = False
1152 hardlink = False
1150 shutil.copy(src, dst)
1153 shutil.copy(src, dst)
1151 else:
1154 else:
1152 shutil.copy(src, dst)
1155 shutil.copy(src, dst)
1153 num += 1
1156 num += 1
1154 progress(topic, num)
1157 progress(topic, num)
1155 progress(topic, None)
1158 progress(topic, None)
1156
1159
1157 return hardlink, num
1160 return hardlink, num
1158
1161
1159 _winreservednames = '''con prn aux nul
1162 _winreservednames = '''con prn aux nul
1160 com1 com2 com3 com4 com5 com6 com7 com8 com9
1163 com1 com2 com3 com4 com5 com6 com7 com8 com9
1161 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1164 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1162 _winreservedchars = ':*?"<>|'
1165 _winreservedchars = ':*?"<>|'
1163 def checkwinfilename(path):
1166 def checkwinfilename(path):
1164 r'''Check that the base-relative path is a valid filename on Windows.
1167 r'''Check that the base-relative path is a valid filename on Windows.
1165 Returns None if the path is ok, or a UI string describing the problem.
1168 Returns None if the path is ok, or a UI string describing the problem.
1166
1169
1167 >>> checkwinfilename("just/a/normal/path")
1170 >>> checkwinfilename("just/a/normal/path")
1168 >>> checkwinfilename("foo/bar/con.xml")
1171 >>> checkwinfilename("foo/bar/con.xml")
1169 "filename contains 'con', which is reserved on Windows"
1172 "filename contains 'con', which is reserved on Windows"
1170 >>> checkwinfilename("foo/con.xml/bar")
1173 >>> checkwinfilename("foo/con.xml/bar")
1171 "filename contains 'con', which is reserved on Windows"
1174 "filename contains 'con', which is reserved on Windows"
1172 >>> checkwinfilename("foo/bar/xml.con")
1175 >>> checkwinfilename("foo/bar/xml.con")
1173 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1176 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1174 "filename contains 'AUX', which is reserved on Windows"
1177 "filename contains 'AUX', which is reserved on Windows"
1175 >>> checkwinfilename("foo/bar/bla:.txt")
1178 >>> checkwinfilename("foo/bar/bla:.txt")
1176 "filename contains ':', which is reserved on Windows"
1179 "filename contains ':', which is reserved on Windows"
1177 >>> checkwinfilename("foo/bar/b\07la.txt")
1180 >>> checkwinfilename("foo/bar/b\07la.txt")
1178 "filename contains '\\x07', which is invalid on Windows"
1181 "filename contains '\\x07', which is invalid on Windows"
1179 >>> checkwinfilename("foo/bar/bla ")
1182 >>> checkwinfilename("foo/bar/bla ")
1180 "filename ends with ' ', which is not allowed on Windows"
1183 "filename ends with ' ', which is not allowed on Windows"
1181 >>> checkwinfilename("../bar")
1184 >>> checkwinfilename("../bar")
1182 >>> checkwinfilename("foo\\")
1185 >>> checkwinfilename("foo\\")
1183 "filename ends with '\\', which is invalid on Windows"
1186 "filename ends with '\\', which is invalid on Windows"
1184 >>> checkwinfilename("foo\\/bar")
1187 >>> checkwinfilename("foo\\/bar")
1185 "directory name ends with '\\', which is invalid on Windows"
1188 "directory name ends with '\\', which is invalid on Windows"
1186 '''
1189 '''
1187 if path.endswith('\\'):
1190 if path.endswith('\\'):
1188 return _("filename ends with '\\', which is invalid on Windows")
1191 return _("filename ends with '\\', which is invalid on Windows")
1189 if '\\/' in path:
1192 if '\\/' in path:
1190 return _("directory name ends with '\\', which is invalid on Windows")
1193 return _("directory name ends with '\\', which is invalid on Windows")
1191 for n in path.replace('\\', '/').split('/'):
1194 for n in path.replace('\\', '/').split('/'):
1192 if not n:
1195 if not n:
1193 continue
1196 continue
1194 for c in pycompat.bytestr(n):
1197 for c in pycompat.bytestr(n):
1195 if c in _winreservedchars:
1198 if c in _winreservedchars:
1196 return _("filename contains '%s', which is reserved "
1199 return _("filename contains '%s', which is reserved "
1197 "on Windows") % c
1200 "on Windows") % c
1198 if ord(c) <= 31:
1201 if ord(c) <= 31:
1199 return _("filename contains %r, which is invalid "
1202 return _("filename contains %r, which is invalid "
1200 "on Windows") % c
1203 "on Windows") % c
1201 base = n.split('.')[0]
1204 base = n.split('.')[0]
1202 if base and base.lower() in _winreservednames:
1205 if base and base.lower() in _winreservednames:
1203 return _("filename contains '%s', which is reserved "
1206 return _("filename contains '%s', which is reserved "
1204 "on Windows") % base
1207 "on Windows") % base
1205 t = n[-1]
1208 t = n[-1]
1206 if t in '. ' and n not in '..':
1209 if t in '. ' and n not in '..':
1207 return _("filename ends with '%s', which is not allowed "
1210 return _("filename ends with '%s', which is not allowed "
1208 "on Windows") % t
1211 "on Windows") % t
1209
1212
1210 if pycompat.osname == 'nt':
1213 if pycompat.osname == 'nt':
1211 checkosfilename = checkwinfilename
1214 checkosfilename = checkwinfilename
1212 timer = time.clock
1215 timer = time.clock
1213 else:
1216 else:
1214 checkosfilename = platform.checkosfilename
1217 checkosfilename = platform.checkosfilename
1215 timer = time.time
1218 timer = time.time
1216
1219
1217 if safehasattr(time, "perf_counter"):
1220 if safehasattr(time, "perf_counter"):
1218 timer = time.perf_counter
1221 timer = time.perf_counter
1219
1222
1220 def makelock(info, pathname):
1223 def makelock(info, pathname):
1221 try:
1224 try:
1222 return os.symlink(info, pathname)
1225 return os.symlink(info, pathname)
1223 except OSError as why:
1226 except OSError as why:
1224 if why.errno == errno.EEXIST:
1227 if why.errno == errno.EEXIST:
1225 raise
1228 raise
1226 except AttributeError: # no symlink in os
1229 except AttributeError: # no symlink in os
1227 pass
1230 pass
1228
1231
1229 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1232 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1230 os.write(ld, info)
1233 os.write(ld, info)
1231 os.close(ld)
1234 os.close(ld)
1232
1235
1233 def readlock(pathname):
1236 def readlock(pathname):
1234 try:
1237 try:
1235 return os.readlink(pathname)
1238 return os.readlink(pathname)
1236 except OSError as why:
1239 except OSError as why:
1237 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1240 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1238 raise
1241 raise
1239 except AttributeError: # no symlink in os
1242 except AttributeError: # no symlink in os
1240 pass
1243 pass
1241 fp = posixfile(pathname)
1244 fp = posixfile(pathname)
1242 r = fp.read()
1245 r = fp.read()
1243 fp.close()
1246 fp.close()
1244 return r
1247 return r
1245
1248
1246 def fstat(fp):
1249 def fstat(fp):
1247 '''stat file object that may not have fileno method.'''
1250 '''stat file object that may not have fileno method.'''
1248 try:
1251 try:
1249 return os.fstat(fp.fileno())
1252 return os.fstat(fp.fileno())
1250 except AttributeError:
1253 except AttributeError:
1251 return os.stat(fp.name)
1254 return os.stat(fp.name)
1252
1255
1253 # File system features
1256 # File system features
1254
1257
1255 def fscasesensitive(path):
1258 def fscasesensitive(path):
1256 """
1259 """
1257 Return true if the given path is on a case-sensitive filesystem
1260 Return true if the given path is on a case-sensitive filesystem
1258
1261
1259 Requires a path (like /foo/.hg) ending with a foldable final
1262 Requires a path (like /foo/.hg) ending with a foldable final
1260 directory component.
1263 directory component.
1261 """
1264 """
1262 s1 = os.lstat(path)
1265 s1 = os.lstat(path)
1263 d, b = os.path.split(path)
1266 d, b = os.path.split(path)
1264 b2 = b.upper()
1267 b2 = b.upper()
1265 if b == b2:
1268 if b == b2:
1266 b2 = b.lower()
1269 b2 = b.lower()
1267 if b == b2:
1270 if b == b2:
1268 return True # no evidence against case sensitivity
1271 return True # no evidence against case sensitivity
1269 p2 = os.path.join(d, b2)
1272 p2 = os.path.join(d, b2)
1270 try:
1273 try:
1271 s2 = os.lstat(p2)
1274 s2 = os.lstat(p2)
1272 if s2 == s1:
1275 if s2 == s1:
1273 return False
1276 return False
1274 return True
1277 return True
1275 except OSError:
1278 except OSError:
1276 return True
1279 return True
1277
1280
1278 try:
1281 try:
1279 import re2
1282 import re2
1280 _re2 = None
1283 _re2 = None
1281 except ImportError:
1284 except ImportError:
1282 _re2 = False
1285 _re2 = False
1283
1286
1284 class _re(object):
1287 class _re(object):
1285 def _checkre2(self):
1288 def _checkre2(self):
1286 global _re2
1289 global _re2
1287 try:
1290 try:
1288 # check if match works, see issue3964
1291 # check if match works, see issue3964
1289 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1292 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1290 except ImportError:
1293 except ImportError:
1291 _re2 = False
1294 _re2 = False
1292
1295
1293 def compile(self, pat, flags=0):
1296 def compile(self, pat, flags=0):
1294 '''Compile a regular expression, using re2 if possible
1297 '''Compile a regular expression, using re2 if possible
1295
1298
1296 For best performance, use only re2-compatible regexp features. The
1299 For best performance, use only re2-compatible regexp features. The
1297 only flags from the re module that are re2-compatible are
1300 only flags from the re module that are re2-compatible are
1298 IGNORECASE and MULTILINE.'''
1301 IGNORECASE and MULTILINE.'''
1299 if _re2 is None:
1302 if _re2 is None:
1300 self._checkre2()
1303 self._checkre2()
1301 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1304 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1302 if flags & remod.IGNORECASE:
1305 if flags & remod.IGNORECASE:
1303 pat = '(?i)' + pat
1306 pat = '(?i)' + pat
1304 if flags & remod.MULTILINE:
1307 if flags & remod.MULTILINE:
1305 pat = '(?m)' + pat
1308 pat = '(?m)' + pat
1306 try:
1309 try:
1307 return re2.compile(pat)
1310 return re2.compile(pat)
1308 except re2.error:
1311 except re2.error:
1309 pass
1312 pass
1310 return remod.compile(pat, flags)
1313 return remod.compile(pat, flags)
1311
1314
1312 @propertycache
1315 @propertycache
1313 def escape(self):
1316 def escape(self):
1314 '''Return the version of escape corresponding to self.compile.
1317 '''Return the version of escape corresponding to self.compile.
1315
1318
1316 This is imperfect because whether re2 or re is used for a particular
1319 This is imperfect because whether re2 or re is used for a particular
1317 function depends on the flags, etc, but it's the best we can do.
1320 function depends on the flags, etc, but it's the best we can do.
1318 '''
1321 '''
1319 global _re2
1322 global _re2
1320 if _re2 is None:
1323 if _re2 is None:
1321 self._checkre2()
1324 self._checkre2()
1322 if _re2:
1325 if _re2:
1323 return re2.escape
1326 return re2.escape
1324 else:
1327 else:
1325 return remod.escape
1328 return remod.escape
1326
1329
1327 re = _re()
1330 re = _re()
1328
1331
1329 _fspathcache = {}
1332 _fspathcache = {}
1330 def fspath(name, root):
1333 def fspath(name, root):
1331 '''Get name in the case stored in the filesystem
1334 '''Get name in the case stored in the filesystem
1332
1335
1333 The name should be relative to root, and be normcase-ed for efficiency.
1336 The name should be relative to root, and be normcase-ed for efficiency.
1334
1337
1335 Note that this function is unnecessary, and should not be
1338 Note that this function is unnecessary, and should not be
1336 called, for case-sensitive filesystems (simply because it's expensive).
1339 called, for case-sensitive filesystems (simply because it's expensive).
1337
1340
1338 The root should be normcase-ed, too.
1341 The root should be normcase-ed, too.
1339 '''
1342 '''
1340 def _makefspathcacheentry(dir):
1343 def _makefspathcacheentry(dir):
1341 return dict((normcase(n), n) for n in os.listdir(dir))
1344 return dict((normcase(n), n) for n in os.listdir(dir))
1342
1345
1343 seps = pycompat.ossep
1346 seps = pycompat.ossep
1344 if pycompat.osaltsep:
1347 if pycompat.osaltsep:
1345 seps = seps + pycompat.osaltsep
1348 seps = seps + pycompat.osaltsep
1346 # Protect backslashes. This gets silly very quickly.
1349 # Protect backslashes. This gets silly very quickly.
1347 seps.replace('\\','\\\\')
1350 seps.replace('\\','\\\\')
1348 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1351 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1349 dir = os.path.normpath(root)
1352 dir = os.path.normpath(root)
1350 result = []
1353 result = []
1351 for part, sep in pattern.findall(name):
1354 for part, sep in pattern.findall(name):
1352 if sep:
1355 if sep:
1353 result.append(sep)
1356 result.append(sep)
1354 continue
1357 continue
1355
1358
1356 if dir not in _fspathcache:
1359 if dir not in _fspathcache:
1357 _fspathcache[dir] = _makefspathcacheentry(dir)
1360 _fspathcache[dir] = _makefspathcacheentry(dir)
1358 contents = _fspathcache[dir]
1361 contents = _fspathcache[dir]
1359
1362
1360 found = contents.get(part)
1363 found = contents.get(part)
1361 if not found:
1364 if not found:
1362 # retry "once per directory" per "dirstate.walk" which
1365 # retry "once per directory" per "dirstate.walk" which
1363 # may take place for each patches of "hg qpush", for example
1366 # may take place for each patches of "hg qpush", for example
1364 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1367 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1365 found = contents.get(part)
1368 found = contents.get(part)
1366
1369
1367 result.append(found or part)
1370 result.append(found or part)
1368 dir = os.path.join(dir, part)
1371 dir = os.path.join(dir, part)
1369
1372
1370 return ''.join(result)
1373 return ''.join(result)
1371
1374
1372 def getfstype(dirpath):
1375 def getfstype(dirpath):
1373 '''Get the filesystem type name from a directory (best-effort)
1376 '''Get the filesystem type name from a directory (best-effort)
1374
1377
1375 Returns None if we are unsure, or errors like ENOENT, EPERM happen.
1378 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1376 '''
1379 '''
1377 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1380 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1378
1381
1379 def checknlink(testfile):
1382 def checknlink(testfile):
1380 '''check whether hardlink count reporting works properly'''
1383 '''check whether hardlink count reporting works properly'''
1381
1384
1382 # testfile may be open, so we need a separate file for checking to
1385 # testfile may be open, so we need a separate file for checking to
1383 # work around issue2543 (or testfile may get lost on Samba shares)
1386 # work around issue2543 (or testfile may get lost on Samba shares)
1384 f1 = testfile + ".hgtmp1"
1387 f1 = testfile + ".hgtmp1"
1385 if os.path.lexists(f1):
1388 if os.path.lexists(f1):
1386 return False
1389 return False
1387 try:
1390 try:
1388 posixfile(f1, 'w').close()
1391 posixfile(f1, 'w').close()
1389 except IOError:
1392 except IOError:
1390 try:
1393 try:
1391 os.unlink(f1)
1394 os.unlink(f1)
1392 except OSError:
1395 except OSError:
1393 pass
1396 pass
1394 return False
1397 return False
1395
1398
1396 f2 = testfile + ".hgtmp2"
1399 f2 = testfile + ".hgtmp2"
1397 fd = None
1400 fd = None
1398 try:
1401 try:
1399 oslink(f1, f2)
1402 oslink(f1, f2)
1400 # nlinks() may behave differently for files on Windows shares if
1403 # nlinks() may behave differently for files on Windows shares if
1401 # the file is open.
1404 # the file is open.
1402 fd = posixfile(f2)
1405 fd = posixfile(f2)
1403 return nlinks(f2) > 1
1406 return nlinks(f2) > 1
1404 except OSError:
1407 except OSError:
1405 return False
1408 return False
1406 finally:
1409 finally:
1407 if fd is not None:
1410 if fd is not None:
1408 fd.close()
1411 fd.close()
1409 for f in (f1, f2):
1412 for f in (f1, f2):
1410 try:
1413 try:
1411 os.unlink(f)
1414 os.unlink(f)
1412 except OSError:
1415 except OSError:
1413 pass
1416 pass
1414
1417
1415 def endswithsep(path):
1418 def endswithsep(path):
1416 '''Check path ends with os.sep or os.altsep.'''
1419 '''Check path ends with os.sep or os.altsep.'''
1417 return (path.endswith(pycompat.ossep)
1420 return (path.endswith(pycompat.ossep)
1418 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1421 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1419
1422
1420 def splitpath(path):
1423 def splitpath(path):
1421 '''Split path by os.sep.
1424 '''Split path by os.sep.
1422 Note that this function does not use os.altsep because this is
1425 Note that this function does not use os.altsep because this is
1423 an alternative of simple "xxx.split(os.sep)".
1426 an alternative of simple "xxx.split(os.sep)".
1424 It is recommended to use os.path.normpath() before using this
1427 It is recommended to use os.path.normpath() before using this
1425 function if need.'''
1428 function if need.'''
1426 return path.split(pycompat.ossep)
1429 return path.split(pycompat.ossep)
1427
1430
1428 def gui():
1431 def gui():
1429 '''Are we running in a GUI?'''
1432 '''Are we running in a GUI?'''
1430 if pycompat.sysplatform == 'darwin':
1433 if pycompat.sysplatform == 'darwin':
1431 if 'SSH_CONNECTION' in encoding.environ:
1434 if 'SSH_CONNECTION' in encoding.environ:
1432 # handle SSH access to a box where the user is logged in
1435 # handle SSH access to a box where the user is logged in
1433 return False
1436 return False
1434 elif getattr(osutil, 'isgui', None):
1437 elif getattr(osutil, 'isgui', None):
1435 # check if a CoreGraphics session is available
1438 # check if a CoreGraphics session is available
1436 return osutil.isgui()
1439 return osutil.isgui()
1437 else:
1440 else:
1438 # pure build; use a safe default
1441 # pure build; use a safe default
1439 return True
1442 return True
1440 else:
1443 else:
1441 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1444 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1442
1445
1443 def mktempcopy(name, emptyok=False, createmode=None):
1446 def mktempcopy(name, emptyok=False, createmode=None):
1444 """Create a temporary file with the same contents from name
1447 """Create a temporary file with the same contents from name
1445
1448
1446 The permission bits are copied from the original file.
1449 The permission bits are copied from the original file.
1447
1450
1448 If the temporary file is going to be truncated immediately, you
1451 If the temporary file is going to be truncated immediately, you
1449 can use emptyok=True as an optimization.
1452 can use emptyok=True as an optimization.
1450
1453
1451 Returns the name of the temporary file.
1454 Returns the name of the temporary file.
1452 """
1455 """
1453 d, fn = os.path.split(name)
1456 d, fn = os.path.split(name)
1454 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1457 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1455 os.close(fd)
1458 os.close(fd)
1456 # Temporary files are created with mode 0600, which is usually not
1459 # Temporary files are created with mode 0600, which is usually not
1457 # what we want. If the original file already exists, just copy
1460 # what we want. If the original file already exists, just copy
1458 # its mode. Otherwise, manually obey umask.
1461 # its mode. Otherwise, manually obey umask.
1459 copymode(name, temp, createmode)
1462 copymode(name, temp, createmode)
1460 if emptyok:
1463 if emptyok:
1461 return temp
1464 return temp
1462 try:
1465 try:
1463 try:
1466 try:
1464 ifp = posixfile(name, "rb")
1467 ifp = posixfile(name, "rb")
1465 except IOError as inst:
1468 except IOError as inst:
1466 if inst.errno == errno.ENOENT:
1469 if inst.errno == errno.ENOENT:
1467 return temp
1470 return temp
1468 if not getattr(inst, 'filename', None):
1471 if not getattr(inst, 'filename', None):
1469 inst.filename = name
1472 inst.filename = name
1470 raise
1473 raise
1471 ofp = posixfile(temp, "wb")
1474 ofp = posixfile(temp, "wb")
1472 for chunk in filechunkiter(ifp):
1475 for chunk in filechunkiter(ifp):
1473 ofp.write(chunk)
1476 ofp.write(chunk)
1474 ifp.close()
1477 ifp.close()
1475 ofp.close()
1478 ofp.close()
1476 except: # re-raises
1479 except: # re-raises
1477 try: os.unlink(temp)
1480 try: os.unlink(temp)
1478 except OSError: pass
1481 except OSError: pass
1479 raise
1482 raise
1480 return temp
1483 return temp
1481
1484
1482 class filestat(object):
1485 class filestat(object):
1483 """help to exactly detect change of a file
1486 """help to exactly detect change of a file
1484
1487
1485 'stat' attribute is result of 'os.stat()' if specified 'path'
1488 'stat' attribute is result of 'os.stat()' if specified 'path'
1486 exists. Otherwise, it is None. This can avoid preparative
1489 exists. Otherwise, it is None. This can avoid preparative
1487 'exists()' examination on client side of this class.
1490 'exists()' examination on client side of this class.
1488 """
1491 """
1489 def __init__(self, path):
1492 def __init__(self, path):
1490 try:
1493 try:
1491 self.stat = os.stat(path)
1494 self.stat = os.stat(path)
1492 except OSError as err:
1495 except OSError as err:
1493 if err.errno != errno.ENOENT:
1496 if err.errno != errno.ENOENT:
1494 raise
1497 raise
1495 self.stat = None
1498 self.stat = None
1496
1499
1497 __hash__ = object.__hash__
1500 __hash__ = object.__hash__
1498
1501
1499 def __eq__(self, old):
1502 def __eq__(self, old):
1500 try:
1503 try:
1501 # if ambiguity between stat of new and old file is
1504 # if ambiguity between stat of new and old file is
1502 # avoided, comparison of size, ctime and mtime is enough
1505 # avoided, comparison of size, ctime and mtime is enough
1503 # to exactly detect change of a file regardless of platform
1506 # to exactly detect change of a file regardless of platform
1504 return (self.stat.st_size == old.stat.st_size and
1507 return (self.stat.st_size == old.stat.st_size and
1505 self.stat.st_ctime == old.stat.st_ctime and
1508 self.stat.st_ctime == old.stat.st_ctime and
1506 self.stat.st_mtime == old.stat.st_mtime)
1509 self.stat.st_mtime == old.stat.st_mtime)
1507 except AttributeError:
1510 except AttributeError:
1508 return False
1511 return False
1509
1512
1510 def isambig(self, old):
1513 def isambig(self, old):
1511 """Examine whether new (= self) stat is ambiguous against old one
1514 """Examine whether new (= self) stat is ambiguous against old one
1512
1515
1513 "S[N]" below means stat of a file at N-th change:
1516 "S[N]" below means stat of a file at N-th change:
1514
1517
1515 - S[n-1].ctime < S[n].ctime: can detect change of a file
1518 - S[n-1].ctime < S[n].ctime: can detect change of a file
1516 - S[n-1].ctime == S[n].ctime
1519 - S[n-1].ctime == S[n].ctime
1517 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1520 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1518 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1521 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1519 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1522 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1520 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1523 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1521
1524
1522 Case (*2) above means that a file was changed twice or more at
1525 Case (*2) above means that a file was changed twice or more at
1523 same time in sec (= S[n-1].ctime), and comparison of timestamp
1526 same time in sec (= S[n-1].ctime), and comparison of timestamp
1524 is ambiguous.
1527 is ambiguous.
1525
1528
1526 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1529 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1527 timestamp is ambiguous".
1530 timestamp is ambiguous".
1528
1531
1529 But advancing mtime only in case (*2) doesn't work as
1532 But advancing mtime only in case (*2) doesn't work as
1530 expected, because naturally advanced S[n].mtime in case (*1)
1533 expected, because naturally advanced S[n].mtime in case (*1)
1531 might be equal to manually advanced S[n-1 or earlier].mtime.
1534 might be equal to manually advanced S[n-1 or earlier].mtime.
1532
1535
1533 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1536 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1534 treated as ambiguous regardless of mtime, to avoid overlooking
1537 treated as ambiguous regardless of mtime, to avoid overlooking
1535 by confliction between such mtime.
1538 by confliction between such mtime.
1536
1539
1537 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1540 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1538 S[n].mtime", even if size of a file isn't changed.
1541 S[n].mtime", even if size of a file isn't changed.
1539 """
1542 """
1540 try:
1543 try:
1541 return (self.stat.st_ctime == old.stat.st_ctime)
1544 return (self.stat.st_ctime == old.stat.st_ctime)
1542 except AttributeError:
1545 except AttributeError:
1543 return False
1546 return False
1544
1547
1545 def avoidambig(self, path, old):
1548 def avoidambig(self, path, old):
1546 """Change file stat of specified path to avoid ambiguity
1549 """Change file stat of specified path to avoid ambiguity
1547
1550
1548 'old' should be previous filestat of 'path'.
1551 'old' should be previous filestat of 'path'.
1549
1552
1550 This skips avoiding ambiguity, if a process doesn't have
1553 This skips avoiding ambiguity, if a process doesn't have
1551 appropriate privileges for 'path'.
1554 appropriate privileges for 'path'.
1552 """
1555 """
1553 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1556 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1554 try:
1557 try:
1555 os.utime(path, (advanced, advanced))
1558 os.utime(path, (advanced, advanced))
1556 except OSError as inst:
1559 except OSError as inst:
1557 if inst.errno == errno.EPERM:
1560 if inst.errno == errno.EPERM:
1558 # utime() on the file created by another user causes EPERM,
1561 # utime() on the file created by another user causes EPERM,
1559 # if a process doesn't have appropriate privileges
1562 # if a process doesn't have appropriate privileges
1560 return
1563 return
1561 raise
1564 raise
1562
1565
1563 def __ne__(self, other):
1566 def __ne__(self, other):
1564 return not self == other
1567 return not self == other
1565
1568
1566 class atomictempfile(object):
1569 class atomictempfile(object):
1567 '''writable file object that atomically updates a file
1570 '''writable file object that atomically updates a file
1568
1571
1569 All writes will go to a temporary copy of the original file. Call
1572 All writes will go to a temporary copy of the original file. Call
1570 close() when you are done writing, and atomictempfile will rename
1573 close() when you are done writing, and atomictempfile will rename
1571 the temporary copy to the original name, making the changes
1574 the temporary copy to the original name, making the changes
1572 visible. If the object is destroyed without being closed, all your
1575 visible. If the object is destroyed without being closed, all your
1573 writes are discarded.
1576 writes are discarded.
1574
1577
1575 checkambig argument of constructor is used with filestat, and is
1578 checkambig argument of constructor is used with filestat, and is
1576 useful only if target file is guarded by any lock (e.g. repo.lock
1579 useful only if target file is guarded by any lock (e.g. repo.lock
1577 or repo.wlock).
1580 or repo.wlock).
1578 '''
1581 '''
1579 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1582 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1580 self.__name = name # permanent name
1583 self.__name = name # permanent name
1581 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1584 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1582 createmode=createmode)
1585 createmode=createmode)
1583 self._fp = posixfile(self._tempname, mode)
1586 self._fp = posixfile(self._tempname, mode)
1584 self._checkambig = checkambig
1587 self._checkambig = checkambig
1585
1588
1586 # delegated methods
1589 # delegated methods
1587 self.read = self._fp.read
1590 self.read = self._fp.read
1588 self.write = self._fp.write
1591 self.write = self._fp.write
1589 self.seek = self._fp.seek
1592 self.seek = self._fp.seek
1590 self.tell = self._fp.tell
1593 self.tell = self._fp.tell
1591 self.fileno = self._fp.fileno
1594 self.fileno = self._fp.fileno
1592
1595
1593 def close(self):
1596 def close(self):
1594 if not self._fp.closed:
1597 if not self._fp.closed:
1595 self._fp.close()
1598 self._fp.close()
1596 filename = localpath(self.__name)
1599 filename = localpath(self.__name)
1597 oldstat = self._checkambig and filestat(filename)
1600 oldstat = self._checkambig and filestat(filename)
1598 if oldstat and oldstat.stat:
1601 if oldstat and oldstat.stat:
1599 rename(self._tempname, filename)
1602 rename(self._tempname, filename)
1600 newstat = filestat(filename)
1603 newstat = filestat(filename)
1601 if newstat.isambig(oldstat):
1604 if newstat.isambig(oldstat):
1602 # stat of changed file is ambiguous to original one
1605 # stat of changed file is ambiguous to original one
1603 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1606 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1604 os.utime(filename, (advanced, advanced))
1607 os.utime(filename, (advanced, advanced))
1605 else:
1608 else:
1606 rename(self._tempname, filename)
1609 rename(self._tempname, filename)
1607
1610
1608 def discard(self):
1611 def discard(self):
1609 if not self._fp.closed:
1612 if not self._fp.closed:
1610 try:
1613 try:
1611 os.unlink(self._tempname)
1614 os.unlink(self._tempname)
1612 except OSError:
1615 except OSError:
1613 pass
1616 pass
1614 self._fp.close()
1617 self._fp.close()
1615
1618
1616 def __del__(self):
1619 def __del__(self):
1617 if safehasattr(self, '_fp'): # constructor actually did something
1620 if safehasattr(self, '_fp'): # constructor actually did something
1618 self.discard()
1621 self.discard()
1619
1622
1620 def __enter__(self):
1623 def __enter__(self):
1621 return self
1624 return self
1622
1625
1623 def __exit__(self, exctype, excvalue, traceback):
1626 def __exit__(self, exctype, excvalue, traceback):
1624 if exctype is not None:
1627 if exctype is not None:
1625 self.discard()
1628 self.discard()
1626 else:
1629 else:
1627 self.close()
1630 self.close()
1628
1631
1629 def unlinkpath(f, ignoremissing=False):
1632 def unlinkpath(f, ignoremissing=False):
1630 """unlink and remove the directory if it is empty"""
1633 """unlink and remove the directory if it is empty"""
1631 if ignoremissing:
1634 if ignoremissing:
1632 tryunlink(f)
1635 tryunlink(f)
1633 else:
1636 else:
1634 unlink(f)
1637 unlink(f)
1635 # try removing directories that might now be empty
1638 # try removing directories that might now be empty
1636 try:
1639 try:
1637 removedirs(os.path.dirname(f))
1640 removedirs(os.path.dirname(f))
1638 except OSError:
1641 except OSError:
1639 pass
1642 pass
1640
1643
1641 def tryunlink(f):
1644 def tryunlink(f):
1642 """Attempt to remove a file, ignoring ENOENT errors."""
1645 """Attempt to remove a file, ignoring ENOENT errors."""
1643 try:
1646 try:
1644 unlink(f)
1647 unlink(f)
1645 except OSError as e:
1648 except OSError as e:
1646 if e.errno != errno.ENOENT:
1649 if e.errno != errno.ENOENT:
1647 raise
1650 raise
1648
1651
1649 def makedirs(name, mode=None, notindexed=False):
1652 def makedirs(name, mode=None, notindexed=False):
1650 """recursive directory creation with parent mode inheritance
1653 """recursive directory creation with parent mode inheritance
1651
1654
1652 Newly created directories are marked as "not to be indexed by
1655 Newly created directories are marked as "not to be indexed by
1653 the content indexing service", if ``notindexed`` is specified
1656 the content indexing service", if ``notindexed`` is specified
1654 for "write" mode access.
1657 for "write" mode access.
1655 """
1658 """
1656 try:
1659 try:
1657 makedir(name, notindexed)
1660 makedir(name, notindexed)
1658 except OSError as err:
1661 except OSError as err:
1659 if err.errno == errno.EEXIST:
1662 if err.errno == errno.EEXIST:
1660 return
1663 return
1661 if err.errno != errno.ENOENT or not name:
1664 if err.errno != errno.ENOENT or not name:
1662 raise
1665 raise
1663 parent = os.path.dirname(os.path.abspath(name))
1666 parent = os.path.dirname(os.path.abspath(name))
1664 if parent == name:
1667 if parent == name:
1665 raise
1668 raise
1666 makedirs(parent, mode, notindexed)
1669 makedirs(parent, mode, notindexed)
1667 try:
1670 try:
1668 makedir(name, notindexed)
1671 makedir(name, notindexed)
1669 except OSError as err:
1672 except OSError as err:
1670 # Catch EEXIST to handle races
1673 # Catch EEXIST to handle races
1671 if err.errno == errno.EEXIST:
1674 if err.errno == errno.EEXIST:
1672 return
1675 return
1673 raise
1676 raise
1674 if mode is not None:
1677 if mode is not None:
1675 os.chmod(name, mode)
1678 os.chmod(name, mode)
1676
1679
1677 def readfile(path):
1680 def readfile(path):
1678 with open(path, 'rb') as fp:
1681 with open(path, 'rb') as fp:
1679 return fp.read()
1682 return fp.read()
1680
1683
1681 def writefile(path, text):
1684 def writefile(path, text):
1682 with open(path, 'wb') as fp:
1685 with open(path, 'wb') as fp:
1683 fp.write(text)
1686 fp.write(text)
1684
1687
1685 def appendfile(path, text):
1688 def appendfile(path, text):
1686 with open(path, 'ab') as fp:
1689 with open(path, 'ab') as fp:
1687 fp.write(text)
1690 fp.write(text)
1688
1691
1689 class chunkbuffer(object):
1692 class chunkbuffer(object):
1690 """Allow arbitrary sized chunks of data to be efficiently read from an
1693 """Allow arbitrary sized chunks of data to be efficiently read from an
1691 iterator over chunks of arbitrary size."""
1694 iterator over chunks of arbitrary size."""
1692
1695
1693 def __init__(self, in_iter):
1696 def __init__(self, in_iter):
1694 """in_iter is the iterator that's iterating over the input chunks.
1697 """in_iter is the iterator that's iterating over the input chunks.
1695 targetsize is how big a buffer to try to maintain."""
1698 targetsize is how big a buffer to try to maintain."""
1696 def splitbig(chunks):
1699 def splitbig(chunks):
1697 for chunk in chunks:
1700 for chunk in chunks:
1698 if len(chunk) > 2**20:
1701 if len(chunk) > 2**20:
1699 pos = 0
1702 pos = 0
1700 while pos < len(chunk):
1703 while pos < len(chunk):
1701 end = pos + 2 ** 18
1704 end = pos + 2 ** 18
1702 yield chunk[pos:end]
1705 yield chunk[pos:end]
1703 pos = end
1706 pos = end
1704 else:
1707 else:
1705 yield chunk
1708 yield chunk
1706 self.iter = splitbig(in_iter)
1709 self.iter = splitbig(in_iter)
1707 self._queue = collections.deque()
1710 self._queue = collections.deque()
1708 self._chunkoffset = 0
1711 self._chunkoffset = 0
1709
1712
1710 def read(self, l=None):
1713 def read(self, l=None):
1711 """Read L bytes of data from the iterator of chunks of data.
1714 """Read L bytes of data from the iterator of chunks of data.
1712 Returns less than L bytes if the iterator runs dry.
1715 Returns less than L bytes if the iterator runs dry.
1713
1716
1714 If size parameter is omitted, read everything"""
1717 If size parameter is omitted, read everything"""
1715 if l is None:
1718 if l is None:
1716 return ''.join(self.iter)
1719 return ''.join(self.iter)
1717
1720
1718 left = l
1721 left = l
1719 buf = []
1722 buf = []
1720 queue = self._queue
1723 queue = self._queue
1721 while left > 0:
1724 while left > 0:
1722 # refill the queue
1725 # refill the queue
1723 if not queue:
1726 if not queue:
1724 target = 2**18
1727 target = 2**18
1725 for chunk in self.iter:
1728 for chunk in self.iter:
1726 queue.append(chunk)
1729 queue.append(chunk)
1727 target -= len(chunk)
1730 target -= len(chunk)
1728 if target <= 0:
1731 if target <= 0:
1729 break
1732 break
1730 if not queue:
1733 if not queue:
1731 break
1734 break
1732
1735
1733 # The easy way to do this would be to queue.popleft(), modify the
1736 # The easy way to do this would be to queue.popleft(), modify the
1734 # chunk (if necessary), then queue.appendleft(). However, for cases
1737 # chunk (if necessary), then queue.appendleft(). However, for cases
1735 # where we read partial chunk content, this incurs 2 dequeue
1738 # where we read partial chunk content, this incurs 2 dequeue
1736 # mutations and creates a new str for the remaining chunk in the
1739 # mutations and creates a new str for the remaining chunk in the
1737 # queue. Our code below avoids this overhead.
1740 # queue. Our code below avoids this overhead.
1738
1741
1739 chunk = queue[0]
1742 chunk = queue[0]
1740 chunkl = len(chunk)
1743 chunkl = len(chunk)
1741 offset = self._chunkoffset
1744 offset = self._chunkoffset
1742
1745
1743 # Use full chunk.
1746 # Use full chunk.
1744 if offset == 0 and left >= chunkl:
1747 if offset == 0 and left >= chunkl:
1745 left -= chunkl
1748 left -= chunkl
1746 queue.popleft()
1749 queue.popleft()
1747 buf.append(chunk)
1750 buf.append(chunk)
1748 # self._chunkoffset remains at 0.
1751 # self._chunkoffset remains at 0.
1749 continue
1752 continue
1750
1753
1751 chunkremaining = chunkl - offset
1754 chunkremaining = chunkl - offset
1752
1755
1753 # Use all of unconsumed part of chunk.
1756 # Use all of unconsumed part of chunk.
1754 if left >= chunkremaining:
1757 if left >= chunkremaining:
1755 left -= chunkremaining
1758 left -= chunkremaining
1756 queue.popleft()
1759 queue.popleft()
1757 # offset == 0 is enabled by block above, so this won't merely
1760 # offset == 0 is enabled by block above, so this won't merely
1758 # copy via ``chunk[0:]``.
1761 # copy via ``chunk[0:]``.
1759 buf.append(chunk[offset:])
1762 buf.append(chunk[offset:])
1760 self._chunkoffset = 0
1763 self._chunkoffset = 0
1761
1764
1762 # Partial chunk needed.
1765 # Partial chunk needed.
1763 else:
1766 else:
1764 buf.append(chunk[offset:offset + left])
1767 buf.append(chunk[offset:offset + left])
1765 self._chunkoffset += left
1768 self._chunkoffset += left
1766 left -= chunkremaining
1769 left -= chunkremaining
1767
1770
1768 return ''.join(buf)
1771 return ''.join(buf)
1769
1772
1770 def filechunkiter(f, size=131072, limit=None):
1773 def filechunkiter(f, size=131072, limit=None):
1771 """Create a generator that produces the data in the file size
1774 """Create a generator that produces the data in the file size
1772 (default 131072) bytes at a time, up to optional limit (default is
1775 (default 131072) bytes at a time, up to optional limit (default is
1773 to read all data). Chunks may be less than size bytes if the
1776 to read all data). Chunks may be less than size bytes if the
1774 chunk is the last chunk in the file, or the file is a socket or
1777 chunk is the last chunk in the file, or the file is a socket or
1775 some other type of file that sometimes reads less data than is
1778 some other type of file that sometimes reads less data than is
1776 requested."""
1779 requested."""
1777 assert size >= 0
1780 assert size >= 0
1778 assert limit is None or limit >= 0
1781 assert limit is None or limit >= 0
1779 while True:
1782 while True:
1780 if limit is None:
1783 if limit is None:
1781 nbytes = size
1784 nbytes = size
1782 else:
1785 else:
1783 nbytes = min(limit, size)
1786 nbytes = min(limit, size)
1784 s = nbytes and f.read(nbytes)
1787 s = nbytes and f.read(nbytes)
1785 if not s:
1788 if not s:
1786 break
1789 break
1787 if limit:
1790 if limit:
1788 limit -= len(s)
1791 limit -= len(s)
1789 yield s
1792 yield s
1790
1793
1791 def makedate(timestamp=None):
1794 def makedate(timestamp=None):
1792 '''Return a unix timestamp (or the current time) as a (unixtime,
1795 '''Return a unix timestamp (or the current time) as a (unixtime,
1793 offset) tuple based off the local timezone.'''
1796 offset) tuple based off the local timezone.'''
1794 if timestamp is None:
1797 if timestamp is None:
1795 timestamp = time.time()
1798 timestamp = time.time()
1796 if timestamp < 0:
1799 if timestamp < 0:
1797 hint = _("check your clock")
1800 hint = _("check your clock")
1798 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1801 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1799 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1802 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1800 datetime.datetime.fromtimestamp(timestamp))
1803 datetime.datetime.fromtimestamp(timestamp))
1801 tz = delta.days * 86400 + delta.seconds
1804 tz = delta.days * 86400 + delta.seconds
1802 return timestamp, tz
1805 return timestamp, tz
1803
1806
1804 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1807 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1805 """represent a (unixtime, offset) tuple as a localized time.
1808 """represent a (unixtime, offset) tuple as a localized time.
1806 unixtime is seconds since the epoch, and offset is the time zone's
1809 unixtime is seconds since the epoch, and offset is the time zone's
1807 number of seconds away from UTC.
1810 number of seconds away from UTC.
1808
1811
1809 >>> datestr((0, 0))
1812 >>> datestr((0, 0))
1810 'Thu Jan 01 00:00:00 1970 +0000'
1813 'Thu Jan 01 00:00:00 1970 +0000'
1811 >>> datestr((42, 0))
1814 >>> datestr((42, 0))
1812 'Thu Jan 01 00:00:42 1970 +0000'
1815 'Thu Jan 01 00:00:42 1970 +0000'
1813 >>> datestr((-42, 0))
1816 >>> datestr((-42, 0))
1814 'Wed Dec 31 23:59:18 1969 +0000'
1817 'Wed Dec 31 23:59:18 1969 +0000'
1815 >>> datestr((0x7fffffff, 0))
1818 >>> datestr((0x7fffffff, 0))
1816 'Tue Jan 19 03:14:07 2038 +0000'
1819 'Tue Jan 19 03:14:07 2038 +0000'
1817 >>> datestr((-0x80000000, 0))
1820 >>> datestr((-0x80000000, 0))
1818 'Fri Dec 13 20:45:52 1901 +0000'
1821 'Fri Dec 13 20:45:52 1901 +0000'
1819 """
1822 """
1820 t, tz = date or makedate()
1823 t, tz = date or makedate()
1821 if "%1" in format or "%2" in format or "%z" in format:
1824 if "%1" in format or "%2" in format or "%z" in format:
1822 sign = (tz > 0) and "-" or "+"
1825 sign = (tz > 0) and "-" or "+"
1823 minutes = abs(tz) // 60
1826 minutes = abs(tz) // 60
1824 q, r = divmod(minutes, 60)
1827 q, r = divmod(minutes, 60)
1825 format = format.replace("%z", "%1%2")
1828 format = format.replace("%z", "%1%2")
1826 format = format.replace("%1", "%c%02d" % (sign, q))
1829 format = format.replace("%1", "%c%02d" % (sign, q))
1827 format = format.replace("%2", "%02d" % r)
1830 format = format.replace("%2", "%02d" % r)
1828 d = t - tz
1831 d = t - tz
1829 if d > 0x7fffffff:
1832 if d > 0x7fffffff:
1830 d = 0x7fffffff
1833 d = 0x7fffffff
1831 elif d < -0x80000000:
1834 elif d < -0x80000000:
1832 d = -0x80000000
1835 d = -0x80000000
1833 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1836 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1834 # because they use the gmtime() system call which is buggy on Windows
1837 # because they use the gmtime() system call which is buggy on Windows
1835 # for negative values.
1838 # for negative values.
1836 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1839 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1837 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1840 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1838 return s
1841 return s
1839
1842
1840 def shortdate(date=None):
1843 def shortdate(date=None):
1841 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1844 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1842 return datestr(date, format='%Y-%m-%d')
1845 return datestr(date, format='%Y-%m-%d')
1843
1846
1844 def parsetimezone(s):
1847 def parsetimezone(s):
1845 """find a trailing timezone, if any, in string, and return a
1848 """find a trailing timezone, if any, in string, and return a
1846 (offset, remainder) pair"""
1849 (offset, remainder) pair"""
1847
1850
1848 if s.endswith("GMT") or s.endswith("UTC"):
1851 if s.endswith("GMT") or s.endswith("UTC"):
1849 return 0, s[:-3].rstrip()
1852 return 0, s[:-3].rstrip()
1850
1853
1851 # Unix-style timezones [+-]hhmm
1854 # Unix-style timezones [+-]hhmm
1852 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1855 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1853 sign = (s[-5] == "+") and 1 or -1
1856 sign = (s[-5] == "+") and 1 or -1
1854 hours = int(s[-4:-2])
1857 hours = int(s[-4:-2])
1855 minutes = int(s[-2:])
1858 minutes = int(s[-2:])
1856 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1859 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1857
1860
1858 # ISO8601 trailing Z
1861 # ISO8601 trailing Z
1859 if s.endswith("Z") and s[-2:-1].isdigit():
1862 if s.endswith("Z") and s[-2:-1].isdigit():
1860 return 0, s[:-1]
1863 return 0, s[:-1]
1861
1864
1862 # ISO8601-style [+-]hh:mm
1865 # ISO8601-style [+-]hh:mm
1863 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1866 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1864 s[-5:-3].isdigit() and s[-2:].isdigit()):
1867 s[-5:-3].isdigit() and s[-2:].isdigit()):
1865 sign = (s[-6] == "+") and 1 or -1
1868 sign = (s[-6] == "+") and 1 or -1
1866 hours = int(s[-5:-3])
1869 hours = int(s[-5:-3])
1867 minutes = int(s[-2:])
1870 minutes = int(s[-2:])
1868 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1871 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1869
1872
1870 return None, s
1873 return None, s
1871
1874
1872 def strdate(string, format, defaults=None):
1875 def strdate(string, format, defaults=None):
1873 """parse a localized time string and return a (unixtime, offset) tuple.
1876 """parse a localized time string and return a (unixtime, offset) tuple.
1874 if the string cannot be parsed, ValueError is raised."""
1877 if the string cannot be parsed, ValueError is raised."""
1875 if defaults is None:
1878 if defaults is None:
1876 defaults = {}
1879 defaults = {}
1877
1880
1878 # NOTE: unixtime = localunixtime + offset
1881 # NOTE: unixtime = localunixtime + offset
1879 offset, date = parsetimezone(string)
1882 offset, date = parsetimezone(string)
1880
1883
1881 # add missing elements from defaults
1884 # add missing elements from defaults
1882 usenow = False # default to using biased defaults
1885 usenow = False # default to using biased defaults
1883 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1886 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1884 found = [True for p in part if ("%"+p) in format]
1887 found = [True for p in part if ("%"+p) in format]
1885 if not found:
1888 if not found:
1886 date += "@" + defaults[part][usenow]
1889 date += "@" + defaults[part][usenow]
1887 format += "@%" + part[0]
1890 format += "@%" + part[0]
1888 else:
1891 else:
1889 # We've found a specific time element, less specific time
1892 # We've found a specific time element, less specific time
1890 # elements are relative to today
1893 # elements are relative to today
1891 usenow = True
1894 usenow = True
1892
1895
1893 timetuple = time.strptime(date, format)
1896 timetuple = time.strptime(date, format)
1894 localunixtime = int(calendar.timegm(timetuple))
1897 localunixtime = int(calendar.timegm(timetuple))
1895 if offset is None:
1898 if offset is None:
1896 # local timezone
1899 # local timezone
1897 unixtime = int(time.mktime(timetuple))
1900 unixtime = int(time.mktime(timetuple))
1898 offset = unixtime - localunixtime
1901 offset = unixtime - localunixtime
1899 else:
1902 else:
1900 unixtime = localunixtime + offset
1903 unixtime = localunixtime + offset
1901 return unixtime, offset
1904 return unixtime, offset
1902
1905
1903 def parsedate(date, formats=None, bias=None):
1906 def parsedate(date, formats=None, bias=None):
1904 """parse a localized date/time and return a (unixtime, offset) tuple.
1907 """parse a localized date/time and return a (unixtime, offset) tuple.
1905
1908
1906 The date may be a "unixtime offset" string or in one of the specified
1909 The date may be a "unixtime offset" string or in one of the specified
1907 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1910 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1908
1911
1909 >>> parsedate(' today ') == parsedate(\
1912 >>> parsedate(' today ') == parsedate(\
1910 datetime.date.today().strftime('%b %d'))
1913 datetime.date.today().strftime('%b %d'))
1911 True
1914 True
1912 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1915 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1913 datetime.timedelta(days=1)\
1916 datetime.timedelta(days=1)\
1914 ).strftime('%b %d'))
1917 ).strftime('%b %d'))
1915 True
1918 True
1916 >>> now, tz = makedate()
1919 >>> now, tz = makedate()
1917 >>> strnow, strtz = parsedate('now')
1920 >>> strnow, strtz = parsedate('now')
1918 >>> (strnow - now) < 1
1921 >>> (strnow - now) < 1
1919 True
1922 True
1920 >>> tz == strtz
1923 >>> tz == strtz
1921 True
1924 True
1922 """
1925 """
1923 if bias is None:
1926 if bias is None:
1924 bias = {}
1927 bias = {}
1925 if not date:
1928 if not date:
1926 return 0, 0
1929 return 0, 0
1927 if isinstance(date, tuple) and len(date) == 2:
1930 if isinstance(date, tuple) and len(date) == 2:
1928 return date
1931 return date
1929 if not formats:
1932 if not formats:
1930 formats = defaultdateformats
1933 formats = defaultdateformats
1931 date = date.strip()
1934 date = date.strip()
1932
1935
1933 if date == 'now' or date == _('now'):
1936 if date == 'now' or date == _('now'):
1934 return makedate()
1937 return makedate()
1935 if date == 'today' or date == _('today'):
1938 if date == 'today' or date == _('today'):
1936 date = datetime.date.today().strftime('%b %d')
1939 date = datetime.date.today().strftime('%b %d')
1937 elif date == 'yesterday' or date == _('yesterday'):
1940 elif date == 'yesterday' or date == _('yesterday'):
1938 date = (datetime.date.today() -
1941 date = (datetime.date.today() -
1939 datetime.timedelta(days=1)).strftime('%b %d')
1942 datetime.timedelta(days=1)).strftime('%b %d')
1940
1943
1941 try:
1944 try:
1942 when, offset = map(int, date.split(' '))
1945 when, offset = map(int, date.split(' '))
1943 except ValueError:
1946 except ValueError:
1944 # fill out defaults
1947 # fill out defaults
1945 now = makedate()
1948 now = makedate()
1946 defaults = {}
1949 defaults = {}
1947 for part in ("d", "mb", "yY", "HI", "M", "S"):
1950 for part in ("d", "mb", "yY", "HI", "M", "S"):
1948 # this piece is for rounding the specific end of unknowns
1951 # this piece is for rounding the specific end of unknowns
1949 b = bias.get(part)
1952 b = bias.get(part)
1950 if b is None:
1953 if b is None:
1951 if part[0] in "HMS":
1954 if part[0] in "HMS":
1952 b = "00"
1955 b = "00"
1953 else:
1956 else:
1954 b = "0"
1957 b = "0"
1955
1958
1956 # this piece is for matching the generic end to today's date
1959 # this piece is for matching the generic end to today's date
1957 n = datestr(now, "%" + part[0])
1960 n = datestr(now, "%" + part[0])
1958
1961
1959 defaults[part] = (b, n)
1962 defaults[part] = (b, n)
1960
1963
1961 for format in formats:
1964 for format in formats:
1962 try:
1965 try:
1963 when, offset = strdate(date, format, defaults)
1966 when, offset = strdate(date, format, defaults)
1964 except (ValueError, OverflowError):
1967 except (ValueError, OverflowError):
1965 pass
1968 pass
1966 else:
1969 else:
1967 break
1970 break
1968 else:
1971 else:
1969 raise Abort(_('invalid date: %r') % date)
1972 raise Abort(_('invalid date: %r') % date)
1970 # validate explicit (probably user-specified) date and
1973 # validate explicit (probably user-specified) date and
1971 # time zone offset. values must fit in signed 32 bits for
1974 # time zone offset. values must fit in signed 32 bits for
1972 # current 32-bit linux runtimes. timezones go from UTC-12
1975 # current 32-bit linux runtimes. timezones go from UTC-12
1973 # to UTC+14
1976 # to UTC+14
1974 if when < -0x80000000 or when > 0x7fffffff:
1977 if when < -0x80000000 or when > 0x7fffffff:
1975 raise Abort(_('date exceeds 32 bits: %d') % when)
1978 raise Abort(_('date exceeds 32 bits: %d') % when)
1976 if offset < -50400 or offset > 43200:
1979 if offset < -50400 or offset > 43200:
1977 raise Abort(_('impossible time zone offset: %d') % offset)
1980 raise Abort(_('impossible time zone offset: %d') % offset)
1978 return when, offset
1981 return when, offset
1979
1982
1980 def matchdate(date):
1983 def matchdate(date):
1981 """Return a function that matches a given date match specifier
1984 """Return a function that matches a given date match specifier
1982
1985
1983 Formats include:
1986 Formats include:
1984
1987
1985 '{date}' match a given date to the accuracy provided
1988 '{date}' match a given date to the accuracy provided
1986
1989
1987 '<{date}' on or before a given date
1990 '<{date}' on or before a given date
1988
1991
1989 '>{date}' on or after a given date
1992 '>{date}' on or after a given date
1990
1993
1991 >>> p1 = parsedate("10:29:59")
1994 >>> p1 = parsedate("10:29:59")
1992 >>> p2 = parsedate("10:30:00")
1995 >>> p2 = parsedate("10:30:00")
1993 >>> p3 = parsedate("10:30:59")
1996 >>> p3 = parsedate("10:30:59")
1994 >>> p4 = parsedate("10:31:00")
1997 >>> p4 = parsedate("10:31:00")
1995 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1998 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1996 >>> f = matchdate("10:30")
1999 >>> f = matchdate("10:30")
1997 >>> f(p1[0])
2000 >>> f(p1[0])
1998 False
2001 False
1999 >>> f(p2[0])
2002 >>> f(p2[0])
2000 True
2003 True
2001 >>> f(p3[0])
2004 >>> f(p3[0])
2002 True
2005 True
2003 >>> f(p4[0])
2006 >>> f(p4[0])
2004 False
2007 False
2005 >>> f(p5[0])
2008 >>> f(p5[0])
2006 False
2009 False
2007 """
2010 """
2008
2011
2009 def lower(date):
2012 def lower(date):
2010 d = {'mb': "1", 'd': "1"}
2013 d = {'mb': "1", 'd': "1"}
2011 return parsedate(date, extendeddateformats, d)[0]
2014 return parsedate(date, extendeddateformats, d)[0]
2012
2015
2013 def upper(date):
2016 def upper(date):
2014 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2017 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2015 for days in ("31", "30", "29"):
2018 for days in ("31", "30", "29"):
2016 try:
2019 try:
2017 d["d"] = days
2020 d["d"] = days
2018 return parsedate(date, extendeddateformats, d)[0]
2021 return parsedate(date, extendeddateformats, d)[0]
2019 except Abort:
2022 except Abort:
2020 pass
2023 pass
2021 d["d"] = "28"
2024 d["d"] = "28"
2022 return parsedate(date, extendeddateformats, d)[0]
2025 return parsedate(date, extendeddateformats, d)[0]
2023
2026
2024 date = date.strip()
2027 date = date.strip()
2025
2028
2026 if not date:
2029 if not date:
2027 raise Abort(_("dates cannot consist entirely of whitespace"))
2030 raise Abort(_("dates cannot consist entirely of whitespace"))
2028 elif date[0] == "<":
2031 elif date[0] == "<":
2029 if not date[1:]:
2032 if not date[1:]:
2030 raise Abort(_("invalid day spec, use '<DATE'"))
2033 raise Abort(_("invalid day spec, use '<DATE'"))
2031 when = upper(date[1:])
2034 when = upper(date[1:])
2032 return lambda x: x <= when
2035 return lambda x: x <= when
2033 elif date[0] == ">":
2036 elif date[0] == ">":
2034 if not date[1:]:
2037 if not date[1:]:
2035 raise Abort(_("invalid day spec, use '>DATE'"))
2038 raise Abort(_("invalid day spec, use '>DATE'"))
2036 when = lower(date[1:])
2039 when = lower(date[1:])
2037 return lambda x: x >= when
2040 return lambda x: x >= when
2038 elif date[0] == "-":
2041 elif date[0] == "-":
2039 try:
2042 try:
2040 days = int(date[1:])
2043 days = int(date[1:])
2041 except ValueError:
2044 except ValueError:
2042 raise Abort(_("invalid day spec: %s") % date[1:])
2045 raise Abort(_("invalid day spec: %s") % date[1:])
2043 if days < 0:
2046 if days < 0:
2044 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2047 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2045 % date[1:])
2048 % date[1:])
2046 when = makedate()[0] - days * 3600 * 24
2049 when = makedate()[0] - days * 3600 * 24
2047 return lambda x: x >= when
2050 return lambda x: x >= when
2048 elif " to " in date:
2051 elif " to " in date:
2049 a, b = date.split(" to ")
2052 a, b = date.split(" to ")
2050 start, stop = lower(a), upper(b)
2053 start, stop = lower(a), upper(b)
2051 return lambda x: x >= start and x <= stop
2054 return lambda x: x >= start and x <= stop
2052 else:
2055 else:
2053 start, stop = lower(date), upper(date)
2056 start, stop = lower(date), upper(date)
2054 return lambda x: x >= start and x <= stop
2057 return lambda x: x >= start and x <= stop
2055
2058
2056 def stringmatcher(pattern, casesensitive=True):
2059 def stringmatcher(pattern, casesensitive=True):
2057 """
2060 """
2058 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2061 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2059 returns the matcher name, pattern, and matcher function.
2062 returns the matcher name, pattern, and matcher function.
2060 missing or unknown prefixes are treated as literal matches.
2063 missing or unknown prefixes are treated as literal matches.
2061
2064
2062 helper for tests:
2065 helper for tests:
2063 >>> def test(pattern, *tests):
2066 >>> def test(pattern, *tests):
2064 ... kind, pattern, matcher = stringmatcher(pattern)
2067 ... kind, pattern, matcher = stringmatcher(pattern)
2065 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2068 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2066 >>> def itest(pattern, *tests):
2069 >>> def itest(pattern, *tests):
2067 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2070 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2068 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2071 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2069
2072
2070 exact matching (no prefix):
2073 exact matching (no prefix):
2071 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2074 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2072 ('literal', 'abcdefg', [False, False, True])
2075 ('literal', 'abcdefg', [False, False, True])
2073
2076
2074 regex matching ('re:' prefix)
2077 regex matching ('re:' prefix)
2075 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2078 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2076 ('re', 'a.+b', [False, False, True])
2079 ('re', 'a.+b', [False, False, True])
2077
2080
2078 force exact matches ('literal:' prefix)
2081 force exact matches ('literal:' prefix)
2079 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2082 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2080 ('literal', 're:foobar', [False, True])
2083 ('literal', 're:foobar', [False, True])
2081
2084
2082 unknown prefixes are ignored and treated as literals
2085 unknown prefixes are ignored and treated as literals
2083 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2086 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2084 ('literal', 'foo:bar', [False, False, True])
2087 ('literal', 'foo:bar', [False, False, True])
2085
2088
2086 case insensitive regex matches
2089 case insensitive regex matches
2087 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2090 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2088 ('re', 'A.+b', [False, False, True])
2091 ('re', 'A.+b', [False, False, True])
2089
2092
2090 case insensitive literal matches
2093 case insensitive literal matches
2091 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2094 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2092 ('literal', 'ABCDEFG', [False, False, True])
2095 ('literal', 'ABCDEFG', [False, False, True])
2093 """
2096 """
2094 if pattern.startswith('re:'):
2097 if pattern.startswith('re:'):
2095 pattern = pattern[3:]
2098 pattern = pattern[3:]
2096 try:
2099 try:
2097 flags = 0
2100 flags = 0
2098 if not casesensitive:
2101 if not casesensitive:
2099 flags = remod.I
2102 flags = remod.I
2100 regex = remod.compile(pattern, flags)
2103 regex = remod.compile(pattern, flags)
2101 except remod.error as e:
2104 except remod.error as e:
2102 raise error.ParseError(_('invalid regular expression: %s')
2105 raise error.ParseError(_('invalid regular expression: %s')
2103 % e)
2106 % e)
2104 return 're', pattern, regex.search
2107 return 're', pattern, regex.search
2105 elif pattern.startswith('literal:'):
2108 elif pattern.startswith('literal:'):
2106 pattern = pattern[8:]
2109 pattern = pattern[8:]
2107
2110
2108 match = pattern.__eq__
2111 match = pattern.__eq__
2109
2112
2110 if not casesensitive:
2113 if not casesensitive:
2111 ipat = encoding.lower(pattern)
2114 ipat = encoding.lower(pattern)
2112 match = lambda s: ipat == encoding.lower(s)
2115 match = lambda s: ipat == encoding.lower(s)
2113 return 'literal', pattern, match
2116 return 'literal', pattern, match
2114
2117
2115 def shortuser(user):
2118 def shortuser(user):
2116 """Return a short representation of a user name or email address."""
2119 """Return a short representation of a user name or email address."""
2117 f = user.find('@')
2120 f = user.find('@')
2118 if f >= 0:
2121 if f >= 0:
2119 user = user[:f]
2122 user = user[:f]
2120 f = user.find('<')
2123 f = user.find('<')
2121 if f >= 0:
2124 if f >= 0:
2122 user = user[f + 1:]
2125 user = user[f + 1:]
2123 f = user.find(' ')
2126 f = user.find(' ')
2124 if f >= 0:
2127 if f >= 0:
2125 user = user[:f]
2128 user = user[:f]
2126 f = user.find('.')
2129 f = user.find('.')
2127 if f >= 0:
2130 if f >= 0:
2128 user = user[:f]
2131 user = user[:f]
2129 return user
2132 return user
2130
2133
2131 def emailuser(user):
2134 def emailuser(user):
2132 """Return the user portion of an email address."""
2135 """Return the user portion of an email address."""
2133 f = user.find('@')
2136 f = user.find('@')
2134 if f >= 0:
2137 if f >= 0:
2135 user = user[:f]
2138 user = user[:f]
2136 f = user.find('<')
2139 f = user.find('<')
2137 if f >= 0:
2140 if f >= 0:
2138 user = user[f + 1:]
2141 user = user[f + 1:]
2139 return user
2142 return user
2140
2143
2141 def email(author):
2144 def email(author):
2142 '''get email of author.'''
2145 '''get email of author.'''
2143 r = author.find('>')
2146 r = author.find('>')
2144 if r == -1:
2147 if r == -1:
2145 r = None
2148 r = None
2146 return author[author.find('<') + 1:r]
2149 return author[author.find('<') + 1:r]
2147
2150
2148 def ellipsis(text, maxlength=400):
2151 def ellipsis(text, maxlength=400):
2149 """Trim string to at most maxlength (default: 400) columns in display."""
2152 """Trim string to at most maxlength (default: 400) columns in display."""
2150 return encoding.trim(text, maxlength, ellipsis='...')
2153 return encoding.trim(text, maxlength, ellipsis='...')
2151
2154
2152 def unitcountfn(*unittable):
2155 def unitcountfn(*unittable):
2153 '''return a function that renders a readable count of some quantity'''
2156 '''return a function that renders a readable count of some quantity'''
2154
2157
2155 def go(count):
2158 def go(count):
2156 for multiplier, divisor, format in unittable:
2159 for multiplier, divisor, format in unittable:
2157 if count >= divisor * multiplier:
2160 if count >= divisor * multiplier:
2158 return format % (count / float(divisor))
2161 return format % (count / float(divisor))
2159 return unittable[-1][2] % count
2162 return unittable[-1][2] % count
2160
2163
2161 return go
2164 return go
2162
2165
2163 def processlinerange(fromline, toline):
2166 def processlinerange(fromline, toline):
2164 """Check that linerange <fromline>:<toline> makes sense and return a
2167 """Check that linerange <fromline>:<toline> makes sense and return a
2165 0-based range.
2168 0-based range.
2166
2169
2167 >>> processlinerange(10, 20)
2170 >>> processlinerange(10, 20)
2168 (9, 20)
2171 (9, 20)
2169 >>> processlinerange(2, 1)
2172 >>> processlinerange(2, 1)
2170 Traceback (most recent call last):
2173 Traceback (most recent call last):
2171 ...
2174 ...
2172 ParseError: line range must be positive
2175 ParseError: line range must be positive
2173 >>> processlinerange(0, 5)
2176 >>> processlinerange(0, 5)
2174 Traceback (most recent call last):
2177 Traceback (most recent call last):
2175 ...
2178 ...
2176 ParseError: fromline must be strictly positive
2179 ParseError: fromline must be strictly positive
2177 """
2180 """
2178 if toline - fromline < 0:
2181 if toline - fromline < 0:
2179 raise error.ParseError(_("line range must be positive"))
2182 raise error.ParseError(_("line range must be positive"))
2180 if fromline < 1:
2183 if fromline < 1:
2181 raise error.ParseError(_("fromline must be strictly positive"))
2184 raise error.ParseError(_("fromline must be strictly positive"))
2182 return fromline - 1, toline
2185 return fromline - 1, toline
2183
2186
2184 bytecount = unitcountfn(
2187 bytecount = unitcountfn(
2185 (100, 1 << 30, _('%.0f GB')),
2188 (100, 1 << 30, _('%.0f GB')),
2186 (10, 1 << 30, _('%.1f GB')),
2189 (10, 1 << 30, _('%.1f GB')),
2187 (1, 1 << 30, _('%.2f GB')),
2190 (1, 1 << 30, _('%.2f GB')),
2188 (100, 1 << 20, _('%.0f MB')),
2191 (100, 1 << 20, _('%.0f MB')),
2189 (10, 1 << 20, _('%.1f MB')),
2192 (10, 1 << 20, _('%.1f MB')),
2190 (1, 1 << 20, _('%.2f MB')),
2193 (1, 1 << 20, _('%.2f MB')),
2191 (100, 1 << 10, _('%.0f KB')),
2194 (100, 1 << 10, _('%.0f KB')),
2192 (10, 1 << 10, _('%.1f KB')),
2195 (10, 1 << 10, _('%.1f KB')),
2193 (1, 1 << 10, _('%.2f KB')),
2196 (1, 1 << 10, _('%.2f KB')),
2194 (1, 1, _('%.0f bytes')),
2197 (1, 1, _('%.0f bytes')),
2195 )
2198 )
2196
2199
2197 def escapestr(s):
2200 def escapestr(s):
2198 # call underlying function of s.encode('string_escape') directly for
2201 # call underlying function of s.encode('string_escape') directly for
2199 # Python 3 compatibility
2202 # Python 3 compatibility
2200 return codecs.escape_encode(s)[0]
2203 return codecs.escape_encode(s)[0]
2201
2204
2202 def unescapestr(s):
2205 def unescapestr(s):
2203 return codecs.escape_decode(s)[0]
2206 return codecs.escape_decode(s)[0]
2204
2207
2205 def uirepr(s):
2208 def uirepr(s):
2206 # Avoid double backslash in Windows path repr()
2209 # Avoid double backslash in Windows path repr()
2207 return repr(s).replace('\\\\', '\\')
2210 return repr(s).replace('\\\\', '\\')
2208
2211
2209 # delay import of textwrap
2212 # delay import of textwrap
2210 def MBTextWrapper(**kwargs):
2213 def MBTextWrapper(**kwargs):
2211 class tw(textwrap.TextWrapper):
2214 class tw(textwrap.TextWrapper):
2212 """
2215 """
2213 Extend TextWrapper for width-awareness.
2216 Extend TextWrapper for width-awareness.
2214
2217
2215 Neither number of 'bytes' in any encoding nor 'characters' is
2218 Neither number of 'bytes' in any encoding nor 'characters' is
2216 appropriate to calculate terminal columns for specified string.
2219 appropriate to calculate terminal columns for specified string.
2217
2220
2218 Original TextWrapper implementation uses built-in 'len()' directly,
2221 Original TextWrapper implementation uses built-in 'len()' directly,
2219 so overriding is needed to use width information of each characters.
2222 so overriding is needed to use width information of each characters.
2220
2223
2221 In addition, characters classified into 'ambiguous' width are
2224 In addition, characters classified into 'ambiguous' width are
2222 treated as wide in East Asian area, but as narrow in other.
2225 treated as wide in East Asian area, but as narrow in other.
2223
2226
2224 This requires use decision to determine width of such characters.
2227 This requires use decision to determine width of such characters.
2225 """
2228 """
2226 def _cutdown(self, ucstr, space_left):
2229 def _cutdown(self, ucstr, space_left):
2227 l = 0
2230 l = 0
2228 colwidth = encoding.ucolwidth
2231 colwidth = encoding.ucolwidth
2229 for i in xrange(len(ucstr)):
2232 for i in xrange(len(ucstr)):
2230 l += colwidth(ucstr[i])
2233 l += colwidth(ucstr[i])
2231 if space_left < l:
2234 if space_left < l:
2232 return (ucstr[:i], ucstr[i:])
2235 return (ucstr[:i], ucstr[i:])
2233 return ucstr, ''
2236 return ucstr, ''
2234
2237
2235 # overriding of base class
2238 # overriding of base class
2236 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2239 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2237 space_left = max(width - cur_len, 1)
2240 space_left = max(width - cur_len, 1)
2238
2241
2239 if self.break_long_words:
2242 if self.break_long_words:
2240 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2243 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2241 cur_line.append(cut)
2244 cur_line.append(cut)
2242 reversed_chunks[-1] = res
2245 reversed_chunks[-1] = res
2243 elif not cur_line:
2246 elif not cur_line:
2244 cur_line.append(reversed_chunks.pop())
2247 cur_line.append(reversed_chunks.pop())
2245
2248
2246 # this overriding code is imported from TextWrapper of Python 2.6
2249 # this overriding code is imported from TextWrapper of Python 2.6
2247 # to calculate columns of string by 'encoding.ucolwidth()'
2250 # to calculate columns of string by 'encoding.ucolwidth()'
2248 def _wrap_chunks(self, chunks):
2251 def _wrap_chunks(self, chunks):
2249 colwidth = encoding.ucolwidth
2252 colwidth = encoding.ucolwidth
2250
2253
2251 lines = []
2254 lines = []
2252 if self.width <= 0:
2255 if self.width <= 0:
2253 raise ValueError("invalid width %r (must be > 0)" % self.width)
2256 raise ValueError("invalid width %r (must be > 0)" % self.width)
2254
2257
2255 # Arrange in reverse order so items can be efficiently popped
2258 # Arrange in reverse order so items can be efficiently popped
2256 # from a stack of chucks.
2259 # from a stack of chucks.
2257 chunks.reverse()
2260 chunks.reverse()
2258
2261
2259 while chunks:
2262 while chunks:
2260
2263
2261 # Start the list of chunks that will make up the current line.
2264 # Start the list of chunks that will make up the current line.
2262 # cur_len is just the length of all the chunks in cur_line.
2265 # cur_len is just the length of all the chunks in cur_line.
2263 cur_line = []
2266 cur_line = []
2264 cur_len = 0
2267 cur_len = 0
2265
2268
2266 # Figure out which static string will prefix this line.
2269 # Figure out which static string will prefix this line.
2267 if lines:
2270 if lines:
2268 indent = self.subsequent_indent
2271 indent = self.subsequent_indent
2269 else:
2272 else:
2270 indent = self.initial_indent
2273 indent = self.initial_indent
2271
2274
2272 # Maximum width for this line.
2275 # Maximum width for this line.
2273 width = self.width - len(indent)
2276 width = self.width - len(indent)
2274
2277
2275 # First chunk on line is whitespace -- drop it, unless this
2278 # First chunk on line is whitespace -- drop it, unless this
2276 # is the very beginning of the text (i.e. no lines started yet).
2279 # is the very beginning of the text (i.e. no lines started yet).
2277 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2280 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2278 del chunks[-1]
2281 del chunks[-1]
2279
2282
2280 while chunks:
2283 while chunks:
2281 l = colwidth(chunks[-1])
2284 l = colwidth(chunks[-1])
2282
2285
2283 # Can at least squeeze this chunk onto the current line.
2286 # Can at least squeeze this chunk onto the current line.
2284 if cur_len + l <= width:
2287 if cur_len + l <= width:
2285 cur_line.append(chunks.pop())
2288 cur_line.append(chunks.pop())
2286 cur_len += l
2289 cur_len += l
2287
2290
2288 # Nope, this line is full.
2291 # Nope, this line is full.
2289 else:
2292 else:
2290 break
2293 break
2291
2294
2292 # The current line is full, and the next chunk is too big to
2295 # The current line is full, and the next chunk is too big to
2293 # fit on *any* line (not just this one).
2296 # fit on *any* line (not just this one).
2294 if chunks and colwidth(chunks[-1]) > width:
2297 if chunks and colwidth(chunks[-1]) > width:
2295 self._handle_long_word(chunks, cur_line, cur_len, width)
2298 self._handle_long_word(chunks, cur_line, cur_len, width)
2296
2299
2297 # If the last chunk on this line is all whitespace, drop it.
2300 # If the last chunk on this line is all whitespace, drop it.
2298 if (self.drop_whitespace and
2301 if (self.drop_whitespace and
2299 cur_line and cur_line[-1].strip() == ''):
2302 cur_line and cur_line[-1].strip() == ''):
2300 del cur_line[-1]
2303 del cur_line[-1]
2301
2304
2302 # Convert current line back to a string and store it in list
2305 # Convert current line back to a string and store it in list
2303 # of all lines (return value).
2306 # of all lines (return value).
2304 if cur_line:
2307 if cur_line:
2305 lines.append(indent + ''.join(cur_line))
2308 lines.append(indent + ''.join(cur_line))
2306
2309
2307 return lines
2310 return lines
2308
2311
2309 global MBTextWrapper
2312 global MBTextWrapper
2310 MBTextWrapper = tw
2313 MBTextWrapper = tw
2311 return tw(**kwargs)
2314 return tw(**kwargs)
2312
2315
2313 def wrap(line, width, initindent='', hangindent=''):
2316 def wrap(line, width, initindent='', hangindent=''):
2314 maxindent = max(len(hangindent), len(initindent))
2317 maxindent = max(len(hangindent), len(initindent))
2315 if width <= maxindent:
2318 if width <= maxindent:
2316 # adjust for weird terminal size
2319 # adjust for weird terminal size
2317 width = max(78, maxindent + 1)
2320 width = max(78, maxindent + 1)
2318 line = line.decode(pycompat.sysstr(encoding.encoding),
2321 line = line.decode(pycompat.sysstr(encoding.encoding),
2319 pycompat.sysstr(encoding.encodingmode))
2322 pycompat.sysstr(encoding.encodingmode))
2320 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2323 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2321 pycompat.sysstr(encoding.encodingmode))
2324 pycompat.sysstr(encoding.encodingmode))
2322 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2325 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2323 pycompat.sysstr(encoding.encodingmode))
2326 pycompat.sysstr(encoding.encodingmode))
2324 wrapper = MBTextWrapper(width=width,
2327 wrapper = MBTextWrapper(width=width,
2325 initial_indent=initindent,
2328 initial_indent=initindent,
2326 subsequent_indent=hangindent)
2329 subsequent_indent=hangindent)
2327 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2330 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2328
2331
2329 if (pyplatform.python_implementation() == 'CPython' and
2332 if (pyplatform.python_implementation() == 'CPython' and
2330 sys.version_info < (3, 0)):
2333 sys.version_info < (3, 0)):
2331 # There is an issue in CPython that some IO methods do not handle EINTR
2334 # There is an issue in CPython that some IO methods do not handle EINTR
2332 # correctly. The following table shows what CPython version (and functions)
2335 # correctly. The following table shows what CPython version (and functions)
2333 # are affected (buggy: has the EINTR bug, okay: otherwise):
2336 # are affected (buggy: has the EINTR bug, okay: otherwise):
2334 #
2337 #
2335 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2338 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2336 # --------------------------------------------------
2339 # --------------------------------------------------
2337 # fp.__iter__ | buggy | buggy | okay
2340 # fp.__iter__ | buggy | buggy | okay
2338 # fp.read* | buggy | okay [1] | okay
2341 # fp.read* | buggy | okay [1] | okay
2339 #
2342 #
2340 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2343 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2341 #
2344 #
2342 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2345 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2343 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2346 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2344 #
2347 #
2345 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2348 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2346 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2349 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2347 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2350 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2348 # fp.__iter__ but not other fp.read* methods.
2351 # fp.__iter__ but not other fp.read* methods.
2349 #
2352 #
2350 # On modern systems like Linux, the "read" syscall cannot be interrupted
2353 # On modern systems like Linux, the "read" syscall cannot be interrupted
2351 # when reading "fast" files like on-disk files. So the EINTR issue only
2354 # when reading "fast" files like on-disk files. So the EINTR issue only
2352 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2355 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2353 # files approximately as "fast" files and use the fast (unsafe) code path,
2356 # files approximately as "fast" files and use the fast (unsafe) code path,
2354 # to minimize the performance impact.
2357 # to minimize the performance impact.
2355 if sys.version_info >= (2, 7, 4):
2358 if sys.version_info >= (2, 7, 4):
2356 # fp.readline deals with EINTR correctly, use it as a workaround.
2359 # fp.readline deals with EINTR correctly, use it as a workaround.
2357 def _safeiterfile(fp):
2360 def _safeiterfile(fp):
2358 return iter(fp.readline, '')
2361 return iter(fp.readline, '')
2359 else:
2362 else:
2360 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2363 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2361 # note: this may block longer than necessary because of bufsize.
2364 # note: this may block longer than necessary because of bufsize.
2362 def _safeiterfile(fp, bufsize=4096):
2365 def _safeiterfile(fp, bufsize=4096):
2363 fd = fp.fileno()
2366 fd = fp.fileno()
2364 line = ''
2367 line = ''
2365 while True:
2368 while True:
2366 try:
2369 try:
2367 buf = os.read(fd, bufsize)
2370 buf = os.read(fd, bufsize)
2368 except OSError as ex:
2371 except OSError as ex:
2369 # os.read only raises EINTR before any data is read
2372 # os.read only raises EINTR before any data is read
2370 if ex.errno == errno.EINTR:
2373 if ex.errno == errno.EINTR:
2371 continue
2374 continue
2372 else:
2375 else:
2373 raise
2376 raise
2374 line += buf
2377 line += buf
2375 if '\n' in buf:
2378 if '\n' in buf:
2376 splitted = line.splitlines(True)
2379 splitted = line.splitlines(True)
2377 line = ''
2380 line = ''
2378 for l in splitted:
2381 for l in splitted:
2379 if l[-1] == '\n':
2382 if l[-1] == '\n':
2380 yield l
2383 yield l
2381 else:
2384 else:
2382 line = l
2385 line = l
2383 if not buf:
2386 if not buf:
2384 break
2387 break
2385 if line:
2388 if line:
2386 yield line
2389 yield line
2387
2390
2388 def iterfile(fp):
2391 def iterfile(fp):
2389 fastpath = True
2392 fastpath = True
2390 if type(fp) is file:
2393 if type(fp) is file:
2391 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2394 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2392 if fastpath:
2395 if fastpath:
2393 return fp
2396 return fp
2394 else:
2397 else:
2395 return _safeiterfile(fp)
2398 return _safeiterfile(fp)
2396 else:
2399 else:
2397 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2400 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2398 def iterfile(fp):
2401 def iterfile(fp):
2399 return fp
2402 return fp
2400
2403
2401 def iterlines(iterator):
2404 def iterlines(iterator):
2402 for chunk in iterator:
2405 for chunk in iterator:
2403 for line in chunk.splitlines():
2406 for line in chunk.splitlines():
2404 yield line
2407 yield line
2405
2408
2406 def expandpath(path):
2409 def expandpath(path):
2407 return os.path.expanduser(os.path.expandvars(path))
2410 return os.path.expanduser(os.path.expandvars(path))
2408
2411
2409 def hgcmd():
2412 def hgcmd():
2410 """Return the command used to execute current hg
2413 """Return the command used to execute current hg
2411
2414
2412 This is different from hgexecutable() because on Windows we want
2415 This is different from hgexecutable() because on Windows we want
2413 to avoid things opening new shell windows like batch files, so we
2416 to avoid things opening new shell windows like batch files, so we
2414 get either the python call or current executable.
2417 get either the python call or current executable.
2415 """
2418 """
2416 if mainfrozen():
2419 if mainfrozen():
2417 if getattr(sys, 'frozen', None) == 'macosx_app':
2420 if getattr(sys, 'frozen', None) == 'macosx_app':
2418 # Env variable set by py2app
2421 # Env variable set by py2app
2419 return [encoding.environ['EXECUTABLEPATH']]
2422 return [encoding.environ['EXECUTABLEPATH']]
2420 else:
2423 else:
2421 return [pycompat.sysexecutable]
2424 return [pycompat.sysexecutable]
2422 return gethgcmd()
2425 return gethgcmd()
2423
2426
2424 def rundetached(args, condfn):
2427 def rundetached(args, condfn):
2425 """Execute the argument list in a detached process.
2428 """Execute the argument list in a detached process.
2426
2429
2427 condfn is a callable which is called repeatedly and should return
2430 condfn is a callable which is called repeatedly and should return
2428 True once the child process is known to have started successfully.
2431 True once the child process is known to have started successfully.
2429 At this point, the child process PID is returned. If the child
2432 At this point, the child process PID is returned. If the child
2430 process fails to start or finishes before condfn() evaluates to
2433 process fails to start or finishes before condfn() evaluates to
2431 True, return -1.
2434 True, return -1.
2432 """
2435 """
2433 # Windows case is easier because the child process is either
2436 # Windows case is easier because the child process is either
2434 # successfully starting and validating the condition or exiting
2437 # successfully starting and validating the condition or exiting
2435 # on failure. We just poll on its PID. On Unix, if the child
2438 # on failure. We just poll on its PID. On Unix, if the child
2436 # process fails to start, it will be left in a zombie state until
2439 # process fails to start, it will be left in a zombie state until
2437 # the parent wait on it, which we cannot do since we expect a long
2440 # the parent wait on it, which we cannot do since we expect a long
2438 # running process on success. Instead we listen for SIGCHLD telling
2441 # running process on success. Instead we listen for SIGCHLD telling
2439 # us our child process terminated.
2442 # us our child process terminated.
2440 terminated = set()
2443 terminated = set()
2441 def handler(signum, frame):
2444 def handler(signum, frame):
2442 terminated.add(os.wait())
2445 terminated.add(os.wait())
2443 prevhandler = None
2446 prevhandler = None
2444 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2447 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2445 if SIGCHLD is not None:
2448 if SIGCHLD is not None:
2446 prevhandler = signal.signal(SIGCHLD, handler)
2449 prevhandler = signal.signal(SIGCHLD, handler)
2447 try:
2450 try:
2448 pid = spawndetached(args)
2451 pid = spawndetached(args)
2449 while not condfn():
2452 while not condfn():
2450 if ((pid in terminated or not testpid(pid))
2453 if ((pid in terminated or not testpid(pid))
2451 and not condfn()):
2454 and not condfn()):
2452 return -1
2455 return -1
2453 time.sleep(0.1)
2456 time.sleep(0.1)
2454 return pid
2457 return pid
2455 finally:
2458 finally:
2456 if prevhandler is not None:
2459 if prevhandler is not None:
2457 signal.signal(signal.SIGCHLD, prevhandler)
2460 signal.signal(signal.SIGCHLD, prevhandler)
2458
2461
2459 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2462 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2460 """Return the result of interpolating items in the mapping into string s.
2463 """Return the result of interpolating items in the mapping into string s.
2461
2464
2462 prefix is a single character string, or a two character string with
2465 prefix is a single character string, or a two character string with
2463 a backslash as the first character if the prefix needs to be escaped in
2466 a backslash as the first character if the prefix needs to be escaped in
2464 a regular expression.
2467 a regular expression.
2465
2468
2466 fn is an optional function that will be applied to the replacement text
2469 fn is an optional function that will be applied to the replacement text
2467 just before replacement.
2470 just before replacement.
2468
2471
2469 escape_prefix is an optional flag that allows using doubled prefix for
2472 escape_prefix is an optional flag that allows using doubled prefix for
2470 its escaping.
2473 its escaping.
2471 """
2474 """
2472 fn = fn or (lambda s: s)
2475 fn = fn or (lambda s: s)
2473 patterns = '|'.join(mapping.keys())
2476 patterns = '|'.join(mapping.keys())
2474 if escape_prefix:
2477 if escape_prefix:
2475 patterns += '|' + prefix
2478 patterns += '|' + prefix
2476 if len(prefix) > 1:
2479 if len(prefix) > 1:
2477 prefix_char = prefix[1:]
2480 prefix_char = prefix[1:]
2478 else:
2481 else:
2479 prefix_char = prefix
2482 prefix_char = prefix
2480 mapping[prefix_char] = prefix_char
2483 mapping[prefix_char] = prefix_char
2481 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2484 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2482 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2485 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2483
2486
2484 def getport(port):
2487 def getport(port):
2485 """Return the port for a given network service.
2488 """Return the port for a given network service.
2486
2489
2487 If port is an integer, it's returned as is. If it's a string, it's
2490 If port is an integer, it's returned as is. If it's a string, it's
2488 looked up using socket.getservbyname(). If there's no matching
2491 looked up using socket.getservbyname(). If there's no matching
2489 service, error.Abort is raised.
2492 service, error.Abort is raised.
2490 """
2493 """
2491 try:
2494 try:
2492 return int(port)
2495 return int(port)
2493 except ValueError:
2496 except ValueError:
2494 pass
2497 pass
2495
2498
2496 try:
2499 try:
2497 return socket.getservbyname(port)
2500 return socket.getservbyname(port)
2498 except socket.error:
2501 except socket.error:
2499 raise Abort(_("no port number associated with service '%s'") % port)
2502 raise Abort(_("no port number associated with service '%s'") % port)
2500
2503
2501 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2504 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2502 '0': False, 'no': False, 'false': False, 'off': False,
2505 '0': False, 'no': False, 'false': False, 'off': False,
2503 'never': False}
2506 'never': False}
2504
2507
2505 def parsebool(s):
2508 def parsebool(s):
2506 """Parse s into a boolean.
2509 """Parse s into a boolean.
2507
2510
2508 If s is not a valid boolean, returns None.
2511 If s is not a valid boolean, returns None.
2509 """
2512 """
2510 return _booleans.get(s.lower(), None)
2513 return _booleans.get(s.lower(), None)
2511
2514
2512 _hextochr = dict((a + b, chr(int(a + b, 16)))
2515 _hextochr = dict((a + b, chr(int(a + b, 16)))
2513 for a in string.hexdigits for b in string.hexdigits)
2516 for a in string.hexdigits for b in string.hexdigits)
2514
2517
2515 class url(object):
2518 class url(object):
2516 r"""Reliable URL parser.
2519 r"""Reliable URL parser.
2517
2520
2518 This parses URLs and provides attributes for the following
2521 This parses URLs and provides attributes for the following
2519 components:
2522 components:
2520
2523
2521 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2524 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2522
2525
2523 Missing components are set to None. The only exception is
2526 Missing components are set to None. The only exception is
2524 fragment, which is set to '' if present but empty.
2527 fragment, which is set to '' if present but empty.
2525
2528
2526 If parsefragment is False, fragment is included in query. If
2529 If parsefragment is False, fragment is included in query. If
2527 parsequery is False, query is included in path. If both are
2530 parsequery is False, query is included in path. If both are
2528 False, both fragment and query are included in path.
2531 False, both fragment and query are included in path.
2529
2532
2530 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2533 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2531
2534
2532 Note that for backward compatibility reasons, bundle URLs do not
2535 Note that for backward compatibility reasons, bundle URLs do not
2533 take host names. That means 'bundle://../' has a path of '../'.
2536 take host names. That means 'bundle://../' has a path of '../'.
2534
2537
2535 Examples:
2538 Examples:
2536
2539
2537 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2540 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2538 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2541 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2539 >>> url('ssh://[::1]:2200//home/joe/repo')
2542 >>> url('ssh://[::1]:2200//home/joe/repo')
2540 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2543 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2541 >>> url('file:///home/joe/repo')
2544 >>> url('file:///home/joe/repo')
2542 <url scheme: 'file', path: '/home/joe/repo'>
2545 <url scheme: 'file', path: '/home/joe/repo'>
2543 >>> url('file:///c:/temp/foo/')
2546 >>> url('file:///c:/temp/foo/')
2544 <url scheme: 'file', path: 'c:/temp/foo/'>
2547 <url scheme: 'file', path: 'c:/temp/foo/'>
2545 >>> url('bundle:foo')
2548 >>> url('bundle:foo')
2546 <url scheme: 'bundle', path: 'foo'>
2549 <url scheme: 'bundle', path: 'foo'>
2547 >>> url('bundle://../foo')
2550 >>> url('bundle://../foo')
2548 <url scheme: 'bundle', path: '../foo'>
2551 <url scheme: 'bundle', path: '../foo'>
2549 >>> url(r'c:\foo\bar')
2552 >>> url(r'c:\foo\bar')
2550 <url path: 'c:\\foo\\bar'>
2553 <url path: 'c:\\foo\\bar'>
2551 >>> url(r'\\blah\blah\blah')
2554 >>> url(r'\\blah\blah\blah')
2552 <url path: '\\\\blah\\blah\\blah'>
2555 <url path: '\\\\blah\\blah\\blah'>
2553 >>> url(r'\\blah\blah\blah#baz')
2556 >>> url(r'\\blah\blah\blah#baz')
2554 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2557 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2555 >>> url(r'file:///C:\users\me')
2558 >>> url(r'file:///C:\users\me')
2556 <url scheme: 'file', path: 'C:\\users\\me'>
2559 <url scheme: 'file', path: 'C:\\users\\me'>
2557
2560
2558 Authentication credentials:
2561 Authentication credentials:
2559
2562
2560 >>> url('ssh://joe:xyz@x/repo')
2563 >>> url('ssh://joe:xyz@x/repo')
2561 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2564 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2562 >>> url('ssh://joe@x/repo')
2565 >>> url('ssh://joe@x/repo')
2563 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2566 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2564
2567
2565 Query strings and fragments:
2568 Query strings and fragments:
2566
2569
2567 >>> url('http://host/a?b#c')
2570 >>> url('http://host/a?b#c')
2568 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2571 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2569 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2572 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2570 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2573 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2571
2574
2572 Empty path:
2575 Empty path:
2573
2576
2574 >>> url('')
2577 >>> url('')
2575 <url path: ''>
2578 <url path: ''>
2576 >>> url('#a')
2579 >>> url('#a')
2577 <url path: '', fragment: 'a'>
2580 <url path: '', fragment: 'a'>
2578 >>> url('http://host/')
2581 >>> url('http://host/')
2579 <url scheme: 'http', host: 'host', path: ''>
2582 <url scheme: 'http', host: 'host', path: ''>
2580 >>> url('http://host/#a')
2583 >>> url('http://host/#a')
2581 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2584 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2582
2585
2583 Only scheme:
2586 Only scheme:
2584
2587
2585 >>> url('http:')
2588 >>> url('http:')
2586 <url scheme: 'http'>
2589 <url scheme: 'http'>
2587 """
2590 """
2588
2591
2589 _safechars = "!~*'()+"
2592 _safechars = "!~*'()+"
2590 _safepchars = "/!~*'()+:\\"
2593 _safepchars = "/!~*'()+:\\"
2591 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2594 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2592
2595
2593 def __init__(self, path, parsequery=True, parsefragment=True):
2596 def __init__(self, path, parsequery=True, parsefragment=True):
2594 # We slowly chomp away at path until we have only the path left
2597 # We slowly chomp away at path until we have only the path left
2595 self.scheme = self.user = self.passwd = self.host = None
2598 self.scheme = self.user = self.passwd = self.host = None
2596 self.port = self.path = self.query = self.fragment = None
2599 self.port = self.path = self.query = self.fragment = None
2597 self._localpath = True
2600 self._localpath = True
2598 self._hostport = ''
2601 self._hostport = ''
2599 self._origpath = path
2602 self._origpath = path
2600
2603
2601 if parsefragment and '#' in path:
2604 if parsefragment and '#' in path:
2602 path, self.fragment = path.split('#', 1)
2605 path, self.fragment = path.split('#', 1)
2603
2606
2604 # special case for Windows drive letters and UNC paths
2607 # special case for Windows drive letters and UNC paths
2605 if hasdriveletter(path) or path.startswith('\\\\'):
2608 if hasdriveletter(path) or path.startswith('\\\\'):
2606 self.path = path
2609 self.path = path
2607 return
2610 return
2608
2611
2609 # For compatibility reasons, we can't handle bundle paths as
2612 # For compatibility reasons, we can't handle bundle paths as
2610 # normal URLS
2613 # normal URLS
2611 if path.startswith('bundle:'):
2614 if path.startswith('bundle:'):
2612 self.scheme = 'bundle'
2615 self.scheme = 'bundle'
2613 path = path[7:]
2616 path = path[7:]
2614 if path.startswith('//'):
2617 if path.startswith('//'):
2615 path = path[2:]
2618 path = path[2:]
2616 self.path = path
2619 self.path = path
2617 return
2620 return
2618
2621
2619 if self._matchscheme(path):
2622 if self._matchscheme(path):
2620 parts = path.split(':', 1)
2623 parts = path.split(':', 1)
2621 if parts[0]:
2624 if parts[0]:
2622 self.scheme, path = parts
2625 self.scheme, path = parts
2623 self._localpath = False
2626 self._localpath = False
2624
2627
2625 if not path:
2628 if not path:
2626 path = None
2629 path = None
2627 if self._localpath:
2630 if self._localpath:
2628 self.path = ''
2631 self.path = ''
2629 return
2632 return
2630 else:
2633 else:
2631 if self._localpath:
2634 if self._localpath:
2632 self.path = path
2635 self.path = path
2633 return
2636 return
2634
2637
2635 if parsequery and '?' in path:
2638 if parsequery and '?' in path:
2636 path, self.query = path.split('?', 1)
2639 path, self.query = path.split('?', 1)
2637 if not path:
2640 if not path:
2638 path = None
2641 path = None
2639 if not self.query:
2642 if not self.query:
2640 self.query = None
2643 self.query = None
2641
2644
2642 # // is required to specify a host/authority
2645 # // is required to specify a host/authority
2643 if path and path.startswith('//'):
2646 if path and path.startswith('//'):
2644 parts = path[2:].split('/', 1)
2647 parts = path[2:].split('/', 1)
2645 if len(parts) > 1:
2648 if len(parts) > 1:
2646 self.host, path = parts
2649 self.host, path = parts
2647 else:
2650 else:
2648 self.host = parts[0]
2651 self.host = parts[0]
2649 path = None
2652 path = None
2650 if not self.host:
2653 if not self.host:
2651 self.host = None
2654 self.host = None
2652 # path of file:///d is /d
2655 # path of file:///d is /d
2653 # path of file:///d:/ is d:/, not /d:/
2656 # path of file:///d:/ is d:/, not /d:/
2654 if path and not hasdriveletter(path):
2657 if path and not hasdriveletter(path):
2655 path = '/' + path
2658 path = '/' + path
2656
2659
2657 if self.host and '@' in self.host:
2660 if self.host and '@' in self.host:
2658 self.user, self.host = self.host.rsplit('@', 1)
2661 self.user, self.host = self.host.rsplit('@', 1)
2659 if ':' in self.user:
2662 if ':' in self.user:
2660 self.user, self.passwd = self.user.split(':', 1)
2663 self.user, self.passwd = self.user.split(':', 1)
2661 if not self.host:
2664 if not self.host:
2662 self.host = None
2665 self.host = None
2663
2666
2664 # Don't split on colons in IPv6 addresses without ports
2667 # Don't split on colons in IPv6 addresses without ports
2665 if (self.host and ':' in self.host and
2668 if (self.host and ':' in self.host and
2666 not (self.host.startswith('[') and self.host.endswith(']'))):
2669 not (self.host.startswith('[') and self.host.endswith(']'))):
2667 self._hostport = self.host
2670 self._hostport = self.host
2668 self.host, self.port = self.host.rsplit(':', 1)
2671 self.host, self.port = self.host.rsplit(':', 1)
2669 if not self.host:
2672 if not self.host:
2670 self.host = None
2673 self.host = None
2671
2674
2672 if (self.host and self.scheme == 'file' and
2675 if (self.host and self.scheme == 'file' and
2673 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2676 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2674 raise Abort(_('file:// URLs can only refer to localhost'))
2677 raise Abort(_('file:// URLs can only refer to localhost'))
2675
2678
2676 self.path = path
2679 self.path = path
2677
2680
2678 # leave the query string escaped
2681 # leave the query string escaped
2679 for a in ('user', 'passwd', 'host', 'port',
2682 for a in ('user', 'passwd', 'host', 'port',
2680 'path', 'fragment'):
2683 'path', 'fragment'):
2681 v = getattr(self, a)
2684 v = getattr(self, a)
2682 if v is not None:
2685 if v is not None:
2683 setattr(self, a, urlreq.unquote(v))
2686 setattr(self, a, urlreq.unquote(v))
2684
2687
2685 def __repr__(self):
2688 def __repr__(self):
2686 attrs = []
2689 attrs = []
2687 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2690 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2688 'query', 'fragment'):
2691 'query', 'fragment'):
2689 v = getattr(self, a)
2692 v = getattr(self, a)
2690 if v is not None:
2693 if v is not None:
2691 attrs.append('%s: %r' % (a, v))
2694 attrs.append('%s: %r' % (a, v))
2692 return '<url %s>' % ', '.join(attrs)
2695 return '<url %s>' % ', '.join(attrs)
2693
2696
2694 def __str__(self):
2697 def __str__(self):
2695 r"""Join the URL's components back into a URL string.
2698 r"""Join the URL's components back into a URL string.
2696
2699
2697 Examples:
2700 Examples:
2698
2701
2699 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2702 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2700 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2703 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2701 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2704 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2702 'http://user:pw@host:80/?foo=bar&baz=42'
2705 'http://user:pw@host:80/?foo=bar&baz=42'
2703 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2706 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2704 'http://user:pw@host:80/?foo=bar%3dbaz'
2707 'http://user:pw@host:80/?foo=bar%3dbaz'
2705 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2708 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2706 'ssh://user:pw@[::1]:2200//home/joe#'
2709 'ssh://user:pw@[::1]:2200//home/joe#'
2707 >>> str(url('http://localhost:80//'))
2710 >>> str(url('http://localhost:80//'))
2708 'http://localhost:80//'
2711 'http://localhost:80//'
2709 >>> str(url('http://localhost:80/'))
2712 >>> str(url('http://localhost:80/'))
2710 'http://localhost:80/'
2713 'http://localhost:80/'
2711 >>> str(url('http://localhost:80'))
2714 >>> str(url('http://localhost:80'))
2712 'http://localhost:80/'
2715 'http://localhost:80/'
2713 >>> str(url('bundle:foo'))
2716 >>> str(url('bundle:foo'))
2714 'bundle:foo'
2717 'bundle:foo'
2715 >>> str(url('bundle://../foo'))
2718 >>> str(url('bundle://../foo'))
2716 'bundle:../foo'
2719 'bundle:../foo'
2717 >>> str(url('path'))
2720 >>> str(url('path'))
2718 'path'
2721 'path'
2719 >>> str(url('file:///tmp/foo/bar'))
2722 >>> str(url('file:///tmp/foo/bar'))
2720 'file:///tmp/foo/bar'
2723 'file:///tmp/foo/bar'
2721 >>> str(url('file:///c:/tmp/foo/bar'))
2724 >>> str(url('file:///c:/tmp/foo/bar'))
2722 'file:///c:/tmp/foo/bar'
2725 'file:///c:/tmp/foo/bar'
2723 >>> print url(r'bundle:foo\bar')
2726 >>> print url(r'bundle:foo\bar')
2724 bundle:foo\bar
2727 bundle:foo\bar
2725 >>> print url(r'file:///D:\data\hg')
2728 >>> print url(r'file:///D:\data\hg')
2726 file:///D:\data\hg
2729 file:///D:\data\hg
2727 """
2730 """
2728 return encoding.strfromlocal(self.__bytes__())
2731 return encoding.strfromlocal(self.__bytes__())
2729
2732
2730 def __bytes__(self):
2733 def __bytes__(self):
2731 if self._localpath:
2734 if self._localpath:
2732 s = self.path
2735 s = self.path
2733 if self.scheme == 'bundle':
2736 if self.scheme == 'bundle':
2734 s = 'bundle:' + s
2737 s = 'bundle:' + s
2735 if self.fragment:
2738 if self.fragment:
2736 s += '#' + self.fragment
2739 s += '#' + self.fragment
2737 return s
2740 return s
2738
2741
2739 s = self.scheme + ':'
2742 s = self.scheme + ':'
2740 if self.user or self.passwd or self.host:
2743 if self.user or self.passwd or self.host:
2741 s += '//'
2744 s += '//'
2742 elif self.scheme and (not self.path or self.path.startswith('/')
2745 elif self.scheme and (not self.path or self.path.startswith('/')
2743 or hasdriveletter(self.path)):
2746 or hasdriveletter(self.path)):
2744 s += '//'
2747 s += '//'
2745 if hasdriveletter(self.path):
2748 if hasdriveletter(self.path):
2746 s += '/'
2749 s += '/'
2747 if self.user:
2750 if self.user:
2748 s += urlreq.quote(self.user, safe=self._safechars)
2751 s += urlreq.quote(self.user, safe=self._safechars)
2749 if self.passwd:
2752 if self.passwd:
2750 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2753 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2751 if self.user or self.passwd:
2754 if self.user or self.passwd:
2752 s += '@'
2755 s += '@'
2753 if self.host:
2756 if self.host:
2754 if not (self.host.startswith('[') and self.host.endswith(']')):
2757 if not (self.host.startswith('[') and self.host.endswith(']')):
2755 s += urlreq.quote(self.host)
2758 s += urlreq.quote(self.host)
2756 else:
2759 else:
2757 s += self.host
2760 s += self.host
2758 if self.port:
2761 if self.port:
2759 s += ':' + urlreq.quote(self.port)
2762 s += ':' + urlreq.quote(self.port)
2760 if self.host:
2763 if self.host:
2761 s += '/'
2764 s += '/'
2762 if self.path:
2765 if self.path:
2763 # TODO: similar to the query string, we should not unescape the
2766 # TODO: similar to the query string, we should not unescape the
2764 # path when we store it, the path might contain '%2f' = '/',
2767 # path when we store it, the path might contain '%2f' = '/',
2765 # which we should *not* escape.
2768 # which we should *not* escape.
2766 s += urlreq.quote(self.path, safe=self._safepchars)
2769 s += urlreq.quote(self.path, safe=self._safepchars)
2767 if self.query:
2770 if self.query:
2768 # we store the query in escaped form.
2771 # we store the query in escaped form.
2769 s += '?' + self.query
2772 s += '?' + self.query
2770 if self.fragment is not None:
2773 if self.fragment is not None:
2771 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2774 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2772 return s
2775 return s
2773
2776
2774 def authinfo(self):
2777 def authinfo(self):
2775 user, passwd = self.user, self.passwd
2778 user, passwd = self.user, self.passwd
2776 try:
2779 try:
2777 self.user, self.passwd = None, None
2780 self.user, self.passwd = None, None
2778 s = str(self)
2781 s = str(self)
2779 finally:
2782 finally:
2780 self.user, self.passwd = user, passwd
2783 self.user, self.passwd = user, passwd
2781 if not self.user:
2784 if not self.user:
2782 return (s, None)
2785 return (s, None)
2783 # authinfo[1] is passed to urllib2 password manager, and its
2786 # authinfo[1] is passed to urllib2 password manager, and its
2784 # URIs must not contain credentials. The host is passed in the
2787 # URIs must not contain credentials. The host is passed in the
2785 # URIs list because Python < 2.4.3 uses only that to search for
2788 # URIs list because Python < 2.4.3 uses only that to search for
2786 # a password.
2789 # a password.
2787 return (s, (None, (s, self.host),
2790 return (s, (None, (s, self.host),
2788 self.user, self.passwd or ''))
2791 self.user, self.passwd or ''))
2789
2792
2790 def isabs(self):
2793 def isabs(self):
2791 if self.scheme and self.scheme != 'file':
2794 if self.scheme and self.scheme != 'file':
2792 return True # remote URL
2795 return True # remote URL
2793 if hasdriveletter(self.path):
2796 if hasdriveletter(self.path):
2794 return True # absolute for our purposes - can't be joined()
2797 return True # absolute for our purposes - can't be joined()
2795 if self.path.startswith(r'\\'):
2798 if self.path.startswith(r'\\'):
2796 return True # Windows UNC path
2799 return True # Windows UNC path
2797 if self.path.startswith('/'):
2800 if self.path.startswith('/'):
2798 return True # POSIX-style
2801 return True # POSIX-style
2799 return False
2802 return False
2800
2803
2801 def localpath(self):
2804 def localpath(self):
2802 if self.scheme == 'file' or self.scheme == 'bundle':
2805 if self.scheme == 'file' or self.scheme == 'bundle':
2803 path = self.path or '/'
2806 path = self.path or '/'
2804 # For Windows, we need to promote hosts containing drive
2807 # For Windows, we need to promote hosts containing drive
2805 # letters to paths with drive letters.
2808 # letters to paths with drive letters.
2806 if hasdriveletter(self._hostport):
2809 if hasdriveletter(self._hostport):
2807 path = self._hostport + '/' + self.path
2810 path = self._hostport + '/' + self.path
2808 elif (self.host is not None and self.path
2811 elif (self.host is not None and self.path
2809 and not hasdriveletter(path)):
2812 and not hasdriveletter(path)):
2810 path = '/' + path
2813 path = '/' + path
2811 return path
2814 return path
2812 return self._origpath
2815 return self._origpath
2813
2816
2814 def islocal(self):
2817 def islocal(self):
2815 '''whether localpath will return something that posixfile can open'''
2818 '''whether localpath will return something that posixfile can open'''
2816 return (not self.scheme or self.scheme == 'file'
2819 return (not self.scheme or self.scheme == 'file'
2817 or self.scheme == 'bundle')
2820 or self.scheme == 'bundle')
2818
2821
2819 def hasscheme(path):
2822 def hasscheme(path):
2820 return bool(url(path).scheme)
2823 return bool(url(path).scheme)
2821
2824
2822 def hasdriveletter(path):
2825 def hasdriveletter(path):
2823 return path and path[1:2] == ':' and path[0:1].isalpha()
2826 return path and path[1:2] == ':' and path[0:1].isalpha()
2824
2827
2825 def urllocalpath(path):
2828 def urllocalpath(path):
2826 return url(path, parsequery=False, parsefragment=False).localpath()
2829 return url(path, parsequery=False, parsefragment=False).localpath()
2827
2830
2828 def hidepassword(u):
2831 def hidepassword(u):
2829 '''hide user credential in a url string'''
2832 '''hide user credential in a url string'''
2830 u = url(u)
2833 u = url(u)
2831 if u.passwd:
2834 if u.passwd:
2832 u.passwd = '***'
2835 u.passwd = '***'
2833 return str(u)
2836 return str(u)
2834
2837
2835 def removeauth(u):
2838 def removeauth(u):
2836 '''remove all authentication information from a url string'''
2839 '''remove all authentication information from a url string'''
2837 u = url(u)
2840 u = url(u)
2838 u.user = u.passwd = None
2841 u.user = u.passwd = None
2839 return str(u)
2842 return str(u)
2840
2843
2841 timecount = unitcountfn(
2844 timecount = unitcountfn(
2842 (1, 1e3, _('%.0f s')),
2845 (1, 1e3, _('%.0f s')),
2843 (100, 1, _('%.1f s')),
2846 (100, 1, _('%.1f s')),
2844 (10, 1, _('%.2f s')),
2847 (10, 1, _('%.2f s')),
2845 (1, 1, _('%.3f s')),
2848 (1, 1, _('%.3f s')),
2846 (100, 0.001, _('%.1f ms')),
2849 (100, 0.001, _('%.1f ms')),
2847 (10, 0.001, _('%.2f ms')),
2850 (10, 0.001, _('%.2f ms')),
2848 (1, 0.001, _('%.3f ms')),
2851 (1, 0.001, _('%.3f ms')),
2849 (100, 0.000001, _('%.1f us')),
2852 (100, 0.000001, _('%.1f us')),
2850 (10, 0.000001, _('%.2f us')),
2853 (10, 0.000001, _('%.2f us')),
2851 (1, 0.000001, _('%.3f us')),
2854 (1, 0.000001, _('%.3f us')),
2852 (100, 0.000000001, _('%.1f ns')),
2855 (100, 0.000000001, _('%.1f ns')),
2853 (10, 0.000000001, _('%.2f ns')),
2856 (10, 0.000000001, _('%.2f ns')),
2854 (1, 0.000000001, _('%.3f ns')),
2857 (1, 0.000000001, _('%.3f ns')),
2855 )
2858 )
2856
2859
2857 _timenesting = [0]
2860 _timenesting = [0]
2858
2861
2859 def timed(func):
2862 def timed(func):
2860 '''Report the execution time of a function call to stderr.
2863 '''Report the execution time of a function call to stderr.
2861
2864
2862 During development, use as a decorator when you need to measure
2865 During development, use as a decorator when you need to measure
2863 the cost of a function, e.g. as follows:
2866 the cost of a function, e.g. as follows:
2864
2867
2865 @util.timed
2868 @util.timed
2866 def foo(a, b, c):
2869 def foo(a, b, c):
2867 pass
2870 pass
2868 '''
2871 '''
2869
2872
2870 def wrapper(*args, **kwargs):
2873 def wrapper(*args, **kwargs):
2871 start = timer()
2874 start = timer()
2872 indent = 2
2875 indent = 2
2873 _timenesting[0] += indent
2876 _timenesting[0] += indent
2874 try:
2877 try:
2875 return func(*args, **kwargs)
2878 return func(*args, **kwargs)
2876 finally:
2879 finally:
2877 elapsed = timer() - start
2880 elapsed = timer() - start
2878 _timenesting[0] -= indent
2881 _timenesting[0] -= indent
2879 stderr.write('%s%s: %s\n' %
2882 stderr.write('%s%s: %s\n' %
2880 (' ' * _timenesting[0], func.__name__,
2883 (' ' * _timenesting[0], func.__name__,
2881 timecount(elapsed)))
2884 timecount(elapsed)))
2882 return wrapper
2885 return wrapper
2883
2886
2884 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2887 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2885 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2888 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2886
2889
2887 def sizetoint(s):
2890 def sizetoint(s):
2888 '''Convert a space specifier to a byte count.
2891 '''Convert a space specifier to a byte count.
2889
2892
2890 >>> sizetoint('30')
2893 >>> sizetoint('30')
2891 30
2894 30
2892 >>> sizetoint('2.2kb')
2895 >>> sizetoint('2.2kb')
2893 2252
2896 2252
2894 >>> sizetoint('6M')
2897 >>> sizetoint('6M')
2895 6291456
2898 6291456
2896 '''
2899 '''
2897 t = s.strip().lower()
2900 t = s.strip().lower()
2898 try:
2901 try:
2899 for k, u in _sizeunits:
2902 for k, u in _sizeunits:
2900 if t.endswith(k):
2903 if t.endswith(k):
2901 return int(float(t[:-len(k)]) * u)
2904 return int(float(t[:-len(k)]) * u)
2902 return int(t)
2905 return int(t)
2903 except ValueError:
2906 except ValueError:
2904 raise error.ParseError(_("couldn't parse size: %s") % s)
2907 raise error.ParseError(_("couldn't parse size: %s") % s)
2905
2908
2906 class hooks(object):
2909 class hooks(object):
2907 '''A collection of hook functions that can be used to extend a
2910 '''A collection of hook functions that can be used to extend a
2908 function's behavior. Hooks are called in lexicographic order,
2911 function's behavior. Hooks are called in lexicographic order,
2909 based on the names of their sources.'''
2912 based on the names of their sources.'''
2910
2913
2911 def __init__(self):
2914 def __init__(self):
2912 self._hooks = []
2915 self._hooks = []
2913
2916
2914 def add(self, source, hook):
2917 def add(self, source, hook):
2915 self._hooks.append((source, hook))
2918 self._hooks.append((source, hook))
2916
2919
2917 def __call__(self, *args):
2920 def __call__(self, *args):
2918 self._hooks.sort(key=lambda x: x[0])
2921 self._hooks.sort(key=lambda x: x[0])
2919 results = []
2922 results = []
2920 for source, hook in self._hooks:
2923 for source, hook in self._hooks:
2921 results.append(hook(*args))
2924 results.append(hook(*args))
2922 return results
2925 return results
2923
2926
2924 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2927 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2925 '''Yields lines for a nicely formatted stacktrace.
2928 '''Yields lines for a nicely formatted stacktrace.
2926 Skips the 'skip' last entries, then return the last 'depth' entries.
2929 Skips the 'skip' last entries, then return the last 'depth' entries.
2927 Each file+linenumber is formatted according to fileline.
2930 Each file+linenumber is formatted according to fileline.
2928 Each line is formatted according to line.
2931 Each line is formatted according to line.
2929 If line is None, it yields:
2932 If line is None, it yields:
2930 length of longest filepath+line number,
2933 length of longest filepath+line number,
2931 filepath+linenumber,
2934 filepath+linenumber,
2932 function
2935 function
2933
2936
2934 Not be used in production code but very convenient while developing.
2937 Not be used in production code but very convenient while developing.
2935 '''
2938 '''
2936 entries = [(fileline % (fn, ln), func)
2939 entries = [(fileline % (fn, ln), func)
2937 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2940 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2938 ][-depth:]
2941 ][-depth:]
2939 if entries:
2942 if entries:
2940 fnmax = max(len(entry[0]) for entry in entries)
2943 fnmax = max(len(entry[0]) for entry in entries)
2941 for fnln, func in entries:
2944 for fnln, func in entries:
2942 if line is None:
2945 if line is None:
2943 yield (fnmax, fnln, func)
2946 yield (fnmax, fnln, func)
2944 else:
2947 else:
2945 yield line % (fnmax, fnln, func)
2948 yield line % (fnmax, fnln, func)
2946
2949
2947 def debugstacktrace(msg='stacktrace', skip=0,
2950 def debugstacktrace(msg='stacktrace', skip=0,
2948 f=stderr, otherf=stdout, depth=0):
2951 f=stderr, otherf=stdout, depth=0):
2949 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2952 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2950 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2953 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2951 By default it will flush stdout first.
2954 By default it will flush stdout first.
2952 It can be used everywhere and intentionally does not require an ui object.
2955 It can be used everywhere and intentionally does not require an ui object.
2953 Not be used in production code but very convenient while developing.
2956 Not be used in production code but very convenient while developing.
2954 '''
2957 '''
2955 if otherf:
2958 if otherf:
2956 otherf.flush()
2959 otherf.flush()
2957 f.write('%s at:\n' % msg.rstrip())
2960 f.write('%s at:\n' % msg.rstrip())
2958 for line in getstackframes(skip + 1, depth=depth):
2961 for line in getstackframes(skip + 1, depth=depth):
2959 f.write(line)
2962 f.write(line)
2960 f.flush()
2963 f.flush()
2961
2964
2962 class dirs(object):
2965 class dirs(object):
2963 '''a multiset of directory names from a dirstate or manifest'''
2966 '''a multiset of directory names from a dirstate or manifest'''
2964
2967
2965 def __init__(self, map, skip=None):
2968 def __init__(self, map, skip=None):
2966 self._dirs = {}
2969 self._dirs = {}
2967 addpath = self.addpath
2970 addpath = self.addpath
2968 if safehasattr(map, 'iteritems') and skip is not None:
2971 if safehasattr(map, 'iteritems') and skip is not None:
2969 for f, s in map.iteritems():
2972 for f, s in map.iteritems():
2970 if s[0] != skip:
2973 if s[0] != skip:
2971 addpath(f)
2974 addpath(f)
2972 else:
2975 else:
2973 for f in map:
2976 for f in map:
2974 addpath(f)
2977 addpath(f)
2975
2978
2976 def addpath(self, path):
2979 def addpath(self, path):
2977 dirs = self._dirs
2980 dirs = self._dirs
2978 for base in finddirs(path):
2981 for base in finddirs(path):
2979 if base in dirs:
2982 if base in dirs:
2980 dirs[base] += 1
2983 dirs[base] += 1
2981 return
2984 return
2982 dirs[base] = 1
2985 dirs[base] = 1
2983
2986
2984 def delpath(self, path):
2987 def delpath(self, path):
2985 dirs = self._dirs
2988 dirs = self._dirs
2986 for base in finddirs(path):
2989 for base in finddirs(path):
2987 if dirs[base] > 1:
2990 if dirs[base] > 1:
2988 dirs[base] -= 1
2991 dirs[base] -= 1
2989 return
2992 return
2990 del dirs[base]
2993 del dirs[base]
2991
2994
2992 def __iter__(self):
2995 def __iter__(self):
2993 return iter(self._dirs)
2996 return iter(self._dirs)
2994
2997
2995 def __contains__(self, d):
2998 def __contains__(self, d):
2996 return d in self._dirs
2999 return d in self._dirs
2997
3000
2998 if safehasattr(parsers, 'dirs'):
3001 if safehasattr(parsers, 'dirs'):
2999 dirs = parsers.dirs
3002 dirs = parsers.dirs
3000
3003
3001 def finddirs(path):
3004 def finddirs(path):
3002 pos = path.rfind('/')
3005 pos = path.rfind('/')
3003 while pos != -1:
3006 while pos != -1:
3004 yield path[:pos]
3007 yield path[:pos]
3005 pos = path.rfind('/', 0, pos)
3008 pos = path.rfind('/', 0, pos)
3006
3009
3007 class ctxmanager(object):
3010 class ctxmanager(object):
3008 '''A context manager for use in 'with' blocks to allow multiple
3011 '''A context manager for use in 'with' blocks to allow multiple
3009 contexts to be entered at once. This is both safer and more
3012 contexts to be entered at once. This is both safer and more
3010 flexible than contextlib.nested.
3013 flexible than contextlib.nested.
3011
3014
3012 Once Mercurial supports Python 2.7+, this will become mostly
3015 Once Mercurial supports Python 2.7+, this will become mostly
3013 unnecessary.
3016 unnecessary.
3014 '''
3017 '''
3015
3018
3016 def __init__(self, *args):
3019 def __init__(self, *args):
3017 '''Accepts a list of no-argument functions that return context
3020 '''Accepts a list of no-argument functions that return context
3018 managers. These will be invoked at __call__ time.'''
3021 managers. These will be invoked at __call__ time.'''
3019 self._pending = args
3022 self._pending = args
3020 self._atexit = []
3023 self._atexit = []
3021
3024
3022 def __enter__(self):
3025 def __enter__(self):
3023 return self
3026 return self
3024
3027
3025 def enter(self):
3028 def enter(self):
3026 '''Create and enter context managers in the order in which they were
3029 '''Create and enter context managers in the order in which they were
3027 passed to the constructor.'''
3030 passed to the constructor.'''
3028 values = []
3031 values = []
3029 for func in self._pending:
3032 for func in self._pending:
3030 obj = func()
3033 obj = func()
3031 values.append(obj.__enter__())
3034 values.append(obj.__enter__())
3032 self._atexit.append(obj.__exit__)
3035 self._atexit.append(obj.__exit__)
3033 del self._pending
3036 del self._pending
3034 return values
3037 return values
3035
3038
3036 def atexit(self, func, *args, **kwargs):
3039 def atexit(self, func, *args, **kwargs):
3037 '''Add a function to call when this context manager exits. The
3040 '''Add a function to call when this context manager exits. The
3038 ordering of multiple atexit calls is unspecified, save that
3041 ordering of multiple atexit calls is unspecified, save that
3039 they will happen before any __exit__ functions.'''
3042 they will happen before any __exit__ functions.'''
3040 def wrapper(exc_type, exc_val, exc_tb):
3043 def wrapper(exc_type, exc_val, exc_tb):
3041 func(*args, **kwargs)
3044 func(*args, **kwargs)
3042 self._atexit.append(wrapper)
3045 self._atexit.append(wrapper)
3043 return func
3046 return func
3044
3047
3045 def __exit__(self, exc_type, exc_val, exc_tb):
3048 def __exit__(self, exc_type, exc_val, exc_tb):
3046 '''Context managers are exited in the reverse order from which
3049 '''Context managers are exited in the reverse order from which
3047 they were created.'''
3050 they were created.'''
3048 received = exc_type is not None
3051 received = exc_type is not None
3049 suppressed = False
3052 suppressed = False
3050 pending = None
3053 pending = None
3051 self._atexit.reverse()
3054 self._atexit.reverse()
3052 for exitfunc in self._atexit:
3055 for exitfunc in self._atexit:
3053 try:
3056 try:
3054 if exitfunc(exc_type, exc_val, exc_tb):
3057 if exitfunc(exc_type, exc_val, exc_tb):
3055 suppressed = True
3058 suppressed = True
3056 exc_type = None
3059 exc_type = None
3057 exc_val = None
3060 exc_val = None
3058 exc_tb = None
3061 exc_tb = None
3059 except BaseException:
3062 except BaseException:
3060 pending = sys.exc_info()
3063 pending = sys.exc_info()
3061 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3064 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3062 del self._atexit
3065 del self._atexit
3063 if pending:
3066 if pending:
3064 raise exc_val
3067 raise exc_val
3065 return received and suppressed
3068 return received and suppressed
3066
3069
3067 # compression code
3070 # compression code
3068
3071
3069 SERVERROLE = 'server'
3072 SERVERROLE = 'server'
3070 CLIENTROLE = 'client'
3073 CLIENTROLE = 'client'
3071
3074
3072 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3075 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3073 (u'name', u'serverpriority',
3076 (u'name', u'serverpriority',
3074 u'clientpriority'))
3077 u'clientpriority'))
3075
3078
3076 class compressormanager(object):
3079 class compressormanager(object):
3077 """Holds registrations of various compression engines.
3080 """Holds registrations of various compression engines.
3078
3081
3079 This class essentially abstracts the differences between compression
3082 This class essentially abstracts the differences between compression
3080 engines to allow new compression formats to be added easily, possibly from
3083 engines to allow new compression formats to be added easily, possibly from
3081 extensions.
3084 extensions.
3082
3085
3083 Compressors are registered against the global instance by calling its
3086 Compressors are registered against the global instance by calling its
3084 ``register()`` method.
3087 ``register()`` method.
3085 """
3088 """
3086 def __init__(self):
3089 def __init__(self):
3087 self._engines = {}
3090 self._engines = {}
3088 # Bundle spec human name to engine name.
3091 # Bundle spec human name to engine name.
3089 self._bundlenames = {}
3092 self._bundlenames = {}
3090 # Internal bundle identifier to engine name.
3093 # Internal bundle identifier to engine name.
3091 self._bundletypes = {}
3094 self._bundletypes = {}
3092 # Revlog header to engine name.
3095 # Revlog header to engine name.
3093 self._revlogheaders = {}
3096 self._revlogheaders = {}
3094 # Wire proto identifier to engine name.
3097 # Wire proto identifier to engine name.
3095 self._wiretypes = {}
3098 self._wiretypes = {}
3096
3099
3097 def __getitem__(self, key):
3100 def __getitem__(self, key):
3098 return self._engines[key]
3101 return self._engines[key]
3099
3102
3100 def __contains__(self, key):
3103 def __contains__(self, key):
3101 return key in self._engines
3104 return key in self._engines
3102
3105
3103 def __iter__(self):
3106 def __iter__(self):
3104 return iter(self._engines.keys())
3107 return iter(self._engines.keys())
3105
3108
3106 def register(self, engine):
3109 def register(self, engine):
3107 """Register a compression engine with the manager.
3110 """Register a compression engine with the manager.
3108
3111
3109 The argument must be a ``compressionengine`` instance.
3112 The argument must be a ``compressionengine`` instance.
3110 """
3113 """
3111 if not isinstance(engine, compressionengine):
3114 if not isinstance(engine, compressionengine):
3112 raise ValueError(_('argument must be a compressionengine'))
3115 raise ValueError(_('argument must be a compressionengine'))
3113
3116
3114 name = engine.name()
3117 name = engine.name()
3115
3118
3116 if name in self._engines:
3119 if name in self._engines:
3117 raise error.Abort(_('compression engine %s already registered') %
3120 raise error.Abort(_('compression engine %s already registered') %
3118 name)
3121 name)
3119
3122
3120 bundleinfo = engine.bundletype()
3123 bundleinfo = engine.bundletype()
3121 if bundleinfo:
3124 if bundleinfo:
3122 bundlename, bundletype = bundleinfo
3125 bundlename, bundletype = bundleinfo
3123
3126
3124 if bundlename in self._bundlenames:
3127 if bundlename in self._bundlenames:
3125 raise error.Abort(_('bundle name %s already registered') %
3128 raise error.Abort(_('bundle name %s already registered') %
3126 bundlename)
3129 bundlename)
3127 if bundletype in self._bundletypes:
3130 if bundletype in self._bundletypes:
3128 raise error.Abort(_('bundle type %s already registered by %s') %
3131 raise error.Abort(_('bundle type %s already registered by %s') %
3129 (bundletype, self._bundletypes[bundletype]))
3132 (bundletype, self._bundletypes[bundletype]))
3130
3133
3131 # No external facing name declared.
3134 # No external facing name declared.
3132 if bundlename:
3135 if bundlename:
3133 self._bundlenames[bundlename] = name
3136 self._bundlenames[bundlename] = name
3134
3137
3135 self._bundletypes[bundletype] = name
3138 self._bundletypes[bundletype] = name
3136
3139
3137 wiresupport = engine.wireprotosupport()
3140 wiresupport = engine.wireprotosupport()
3138 if wiresupport:
3141 if wiresupport:
3139 wiretype = wiresupport.name
3142 wiretype = wiresupport.name
3140 if wiretype in self._wiretypes:
3143 if wiretype in self._wiretypes:
3141 raise error.Abort(_('wire protocol compression %s already '
3144 raise error.Abort(_('wire protocol compression %s already '
3142 'registered by %s') %
3145 'registered by %s') %
3143 (wiretype, self._wiretypes[wiretype]))
3146 (wiretype, self._wiretypes[wiretype]))
3144
3147
3145 self._wiretypes[wiretype] = name
3148 self._wiretypes[wiretype] = name
3146
3149
3147 revlogheader = engine.revlogheader()
3150 revlogheader = engine.revlogheader()
3148 if revlogheader and revlogheader in self._revlogheaders:
3151 if revlogheader and revlogheader in self._revlogheaders:
3149 raise error.Abort(_('revlog header %s already registered by %s') %
3152 raise error.Abort(_('revlog header %s already registered by %s') %
3150 (revlogheader, self._revlogheaders[revlogheader]))
3153 (revlogheader, self._revlogheaders[revlogheader]))
3151
3154
3152 if revlogheader:
3155 if revlogheader:
3153 self._revlogheaders[revlogheader] = name
3156 self._revlogheaders[revlogheader] = name
3154
3157
3155 self._engines[name] = engine
3158 self._engines[name] = engine
3156
3159
3157 @property
3160 @property
3158 def supportedbundlenames(self):
3161 def supportedbundlenames(self):
3159 return set(self._bundlenames.keys())
3162 return set(self._bundlenames.keys())
3160
3163
3161 @property
3164 @property
3162 def supportedbundletypes(self):
3165 def supportedbundletypes(self):
3163 return set(self._bundletypes.keys())
3166 return set(self._bundletypes.keys())
3164
3167
3165 def forbundlename(self, bundlename):
3168 def forbundlename(self, bundlename):
3166 """Obtain a compression engine registered to a bundle name.
3169 """Obtain a compression engine registered to a bundle name.
3167
3170
3168 Will raise KeyError if the bundle type isn't registered.
3171 Will raise KeyError if the bundle type isn't registered.
3169
3172
3170 Will abort if the engine is known but not available.
3173 Will abort if the engine is known but not available.
3171 """
3174 """
3172 engine = self._engines[self._bundlenames[bundlename]]
3175 engine = self._engines[self._bundlenames[bundlename]]
3173 if not engine.available():
3176 if not engine.available():
3174 raise error.Abort(_('compression engine %s could not be loaded') %
3177 raise error.Abort(_('compression engine %s could not be loaded') %
3175 engine.name())
3178 engine.name())
3176 return engine
3179 return engine
3177
3180
3178 def forbundletype(self, bundletype):
3181 def forbundletype(self, bundletype):
3179 """Obtain a compression engine registered to a bundle type.
3182 """Obtain a compression engine registered to a bundle type.
3180
3183
3181 Will raise KeyError if the bundle type isn't registered.
3184 Will raise KeyError if the bundle type isn't registered.
3182
3185
3183 Will abort if the engine is known but not available.
3186 Will abort if the engine is known but not available.
3184 """
3187 """
3185 engine = self._engines[self._bundletypes[bundletype]]
3188 engine = self._engines[self._bundletypes[bundletype]]
3186 if not engine.available():
3189 if not engine.available():
3187 raise error.Abort(_('compression engine %s could not be loaded') %
3190 raise error.Abort(_('compression engine %s could not be loaded') %
3188 engine.name())
3191 engine.name())
3189 return engine
3192 return engine
3190
3193
3191 def supportedwireengines(self, role, onlyavailable=True):
3194 def supportedwireengines(self, role, onlyavailable=True):
3192 """Obtain compression engines that support the wire protocol.
3195 """Obtain compression engines that support the wire protocol.
3193
3196
3194 Returns a list of engines in prioritized order, most desired first.
3197 Returns a list of engines in prioritized order, most desired first.
3195
3198
3196 If ``onlyavailable`` is set, filter out engines that can't be
3199 If ``onlyavailable`` is set, filter out engines that can't be
3197 loaded.
3200 loaded.
3198 """
3201 """
3199 assert role in (SERVERROLE, CLIENTROLE)
3202 assert role in (SERVERROLE, CLIENTROLE)
3200
3203
3201 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3204 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3202
3205
3203 engines = [self._engines[e] for e in self._wiretypes.values()]
3206 engines = [self._engines[e] for e in self._wiretypes.values()]
3204 if onlyavailable:
3207 if onlyavailable:
3205 engines = [e for e in engines if e.available()]
3208 engines = [e for e in engines if e.available()]
3206
3209
3207 def getkey(e):
3210 def getkey(e):
3208 # Sort first by priority, highest first. In case of tie, sort
3211 # Sort first by priority, highest first. In case of tie, sort
3209 # alphabetically. This is arbitrary, but ensures output is
3212 # alphabetically. This is arbitrary, but ensures output is
3210 # stable.
3213 # stable.
3211 w = e.wireprotosupport()
3214 w = e.wireprotosupport()
3212 return -1 * getattr(w, attr), w.name
3215 return -1 * getattr(w, attr), w.name
3213
3216
3214 return list(sorted(engines, key=getkey))
3217 return list(sorted(engines, key=getkey))
3215
3218
3216 def forwiretype(self, wiretype):
3219 def forwiretype(self, wiretype):
3217 engine = self._engines[self._wiretypes[wiretype]]
3220 engine = self._engines[self._wiretypes[wiretype]]
3218 if not engine.available():
3221 if not engine.available():
3219 raise error.Abort(_('compression engine %s could not be loaded') %
3222 raise error.Abort(_('compression engine %s could not be loaded') %
3220 engine.name())
3223 engine.name())
3221 return engine
3224 return engine
3222
3225
3223 def forrevlogheader(self, header):
3226 def forrevlogheader(self, header):
3224 """Obtain a compression engine registered to a revlog header.
3227 """Obtain a compression engine registered to a revlog header.
3225
3228
3226 Will raise KeyError if the revlog header value isn't registered.
3229 Will raise KeyError if the revlog header value isn't registered.
3227 """
3230 """
3228 return self._engines[self._revlogheaders[header]]
3231 return self._engines[self._revlogheaders[header]]
3229
3232
3230 compengines = compressormanager()
3233 compengines = compressormanager()
3231
3234
3232 class compressionengine(object):
3235 class compressionengine(object):
3233 """Base class for compression engines.
3236 """Base class for compression engines.
3234
3237
3235 Compression engines must implement the interface defined by this class.
3238 Compression engines must implement the interface defined by this class.
3236 """
3239 """
3237 def name(self):
3240 def name(self):
3238 """Returns the name of the compression engine.
3241 """Returns the name of the compression engine.
3239
3242
3240 This is the key the engine is registered under.
3243 This is the key the engine is registered under.
3241
3244
3242 This method must be implemented.
3245 This method must be implemented.
3243 """
3246 """
3244 raise NotImplementedError()
3247 raise NotImplementedError()
3245
3248
3246 def available(self):
3249 def available(self):
3247 """Whether the compression engine is available.
3250 """Whether the compression engine is available.
3248
3251
3249 The intent of this method is to allow optional compression engines
3252 The intent of this method is to allow optional compression engines
3250 that may not be available in all installations (such as engines relying
3253 that may not be available in all installations (such as engines relying
3251 on C extensions that may not be present).
3254 on C extensions that may not be present).
3252 """
3255 """
3253 return True
3256 return True
3254
3257
3255 def bundletype(self):
3258 def bundletype(self):
3256 """Describes bundle identifiers for this engine.
3259 """Describes bundle identifiers for this engine.
3257
3260
3258 If this compression engine isn't supported for bundles, returns None.
3261 If this compression engine isn't supported for bundles, returns None.
3259
3262
3260 If this engine can be used for bundles, returns a 2-tuple of strings of
3263 If this engine can be used for bundles, returns a 2-tuple of strings of
3261 the user-facing "bundle spec" compression name and an internal
3264 the user-facing "bundle spec" compression name and an internal
3262 identifier used to denote the compression format within bundles. To
3265 identifier used to denote the compression format within bundles. To
3263 exclude the name from external usage, set the first element to ``None``.
3266 exclude the name from external usage, set the first element to ``None``.
3264
3267
3265 If bundle compression is supported, the class must also implement
3268 If bundle compression is supported, the class must also implement
3266 ``compressstream`` and `decompressorreader``.
3269 ``compressstream`` and `decompressorreader``.
3267 """
3270 """
3268 return None
3271 return None
3269
3272
3270 def wireprotosupport(self):
3273 def wireprotosupport(self):
3271 """Declare support for this compression format on the wire protocol.
3274 """Declare support for this compression format on the wire protocol.
3272
3275
3273 If this compression engine isn't supported for compressing wire
3276 If this compression engine isn't supported for compressing wire
3274 protocol payloads, returns None.
3277 protocol payloads, returns None.
3275
3278
3276 Otherwise, returns ``compenginewireprotosupport`` with the following
3279 Otherwise, returns ``compenginewireprotosupport`` with the following
3277 fields:
3280 fields:
3278
3281
3279 * String format identifier
3282 * String format identifier
3280 * Integer priority for the server
3283 * Integer priority for the server
3281 * Integer priority for the client
3284 * Integer priority for the client
3282
3285
3283 The integer priorities are used to order the advertisement of format
3286 The integer priorities are used to order the advertisement of format
3284 support by server and client. The highest integer is advertised
3287 support by server and client. The highest integer is advertised
3285 first. Integers with non-positive values aren't advertised.
3288 first. Integers with non-positive values aren't advertised.
3286
3289
3287 The priority values are somewhat arbitrary and only used for default
3290 The priority values are somewhat arbitrary and only used for default
3288 ordering. The relative order can be changed via config options.
3291 ordering. The relative order can be changed via config options.
3289
3292
3290 If wire protocol compression is supported, the class must also implement
3293 If wire protocol compression is supported, the class must also implement
3291 ``compressstream`` and ``decompressorreader``.
3294 ``compressstream`` and ``decompressorreader``.
3292 """
3295 """
3293 return None
3296 return None
3294
3297
3295 def revlogheader(self):
3298 def revlogheader(self):
3296 """Header added to revlog chunks that identifies this engine.
3299 """Header added to revlog chunks that identifies this engine.
3297
3300
3298 If this engine can be used to compress revlogs, this method should
3301 If this engine can be used to compress revlogs, this method should
3299 return the bytes used to identify chunks compressed with this engine.
3302 return the bytes used to identify chunks compressed with this engine.
3300 Else, the method should return ``None`` to indicate it does not
3303 Else, the method should return ``None`` to indicate it does not
3301 participate in revlog compression.
3304 participate in revlog compression.
3302 """
3305 """
3303 return None
3306 return None
3304
3307
3305 def compressstream(self, it, opts=None):
3308 def compressstream(self, it, opts=None):
3306 """Compress an iterator of chunks.
3309 """Compress an iterator of chunks.
3307
3310
3308 The method receives an iterator (ideally a generator) of chunks of
3311 The method receives an iterator (ideally a generator) of chunks of
3309 bytes to be compressed. It returns an iterator (ideally a generator)
3312 bytes to be compressed. It returns an iterator (ideally a generator)
3310 of bytes of chunks representing the compressed output.
3313 of bytes of chunks representing the compressed output.
3311
3314
3312 Optionally accepts an argument defining how to perform compression.
3315 Optionally accepts an argument defining how to perform compression.
3313 Each engine treats this argument differently.
3316 Each engine treats this argument differently.
3314 """
3317 """
3315 raise NotImplementedError()
3318 raise NotImplementedError()
3316
3319
3317 def decompressorreader(self, fh):
3320 def decompressorreader(self, fh):
3318 """Perform decompression on a file object.
3321 """Perform decompression on a file object.
3319
3322
3320 Argument is an object with a ``read(size)`` method that returns
3323 Argument is an object with a ``read(size)`` method that returns
3321 compressed data. Return value is an object with a ``read(size)`` that
3324 compressed data. Return value is an object with a ``read(size)`` that
3322 returns uncompressed data.
3325 returns uncompressed data.
3323 """
3326 """
3324 raise NotImplementedError()
3327 raise NotImplementedError()
3325
3328
3326 def revlogcompressor(self, opts=None):
3329 def revlogcompressor(self, opts=None):
3327 """Obtain an object that can be used to compress revlog entries.
3330 """Obtain an object that can be used to compress revlog entries.
3328
3331
3329 The object has a ``compress(data)`` method that compresses binary
3332 The object has a ``compress(data)`` method that compresses binary
3330 data. This method returns compressed binary data or ``None`` if
3333 data. This method returns compressed binary data or ``None`` if
3331 the data could not be compressed (too small, not compressible, etc).
3334 the data could not be compressed (too small, not compressible, etc).
3332 The returned data should have a header uniquely identifying this
3335 The returned data should have a header uniquely identifying this
3333 compression format so decompression can be routed to this engine.
3336 compression format so decompression can be routed to this engine.
3334 This header should be identified by the ``revlogheader()`` return
3337 This header should be identified by the ``revlogheader()`` return
3335 value.
3338 value.
3336
3339
3337 The object has a ``decompress(data)`` method that decompresses
3340 The object has a ``decompress(data)`` method that decompresses
3338 data. The method will only be called if ``data`` begins with
3341 data. The method will only be called if ``data`` begins with
3339 ``revlogheader()``. The method should return the raw, uncompressed
3342 ``revlogheader()``. The method should return the raw, uncompressed
3340 data or raise a ``RevlogError``.
3343 data or raise a ``RevlogError``.
3341
3344
3342 The object is reusable but is not thread safe.
3345 The object is reusable but is not thread safe.
3343 """
3346 """
3344 raise NotImplementedError()
3347 raise NotImplementedError()
3345
3348
3346 class _zlibengine(compressionengine):
3349 class _zlibengine(compressionengine):
3347 def name(self):
3350 def name(self):
3348 return 'zlib'
3351 return 'zlib'
3349
3352
3350 def bundletype(self):
3353 def bundletype(self):
3351 return 'gzip', 'GZ'
3354 return 'gzip', 'GZ'
3352
3355
3353 def wireprotosupport(self):
3356 def wireprotosupport(self):
3354 return compewireprotosupport('zlib', 20, 20)
3357 return compewireprotosupport('zlib', 20, 20)
3355
3358
3356 def revlogheader(self):
3359 def revlogheader(self):
3357 return 'x'
3360 return 'x'
3358
3361
3359 def compressstream(self, it, opts=None):
3362 def compressstream(self, it, opts=None):
3360 opts = opts or {}
3363 opts = opts or {}
3361
3364
3362 z = zlib.compressobj(opts.get('level', -1))
3365 z = zlib.compressobj(opts.get('level', -1))
3363 for chunk in it:
3366 for chunk in it:
3364 data = z.compress(chunk)
3367 data = z.compress(chunk)
3365 # Not all calls to compress emit data. It is cheaper to inspect
3368 # Not all calls to compress emit data. It is cheaper to inspect
3366 # here than to feed empty chunks through generator.
3369 # here than to feed empty chunks through generator.
3367 if data:
3370 if data:
3368 yield data
3371 yield data
3369
3372
3370 yield z.flush()
3373 yield z.flush()
3371
3374
3372 def decompressorreader(self, fh):
3375 def decompressorreader(self, fh):
3373 def gen():
3376 def gen():
3374 d = zlib.decompressobj()
3377 d = zlib.decompressobj()
3375 for chunk in filechunkiter(fh):
3378 for chunk in filechunkiter(fh):
3376 while chunk:
3379 while chunk:
3377 # Limit output size to limit memory.
3380 # Limit output size to limit memory.
3378 yield d.decompress(chunk, 2 ** 18)
3381 yield d.decompress(chunk, 2 ** 18)
3379 chunk = d.unconsumed_tail
3382 chunk = d.unconsumed_tail
3380
3383
3381 return chunkbuffer(gen())
3384 return chunkbuffer(gen())
3382
3385
3383 class zlibrevlogcompressor(object):
3386 class zlibrevlogcompressor(object):
3384 def compress(self, data):
3387 def compress(self, data):
3385 insize = len(data)
3388 insize = len(data)
3386 # Caller handles empty input case.
3389 # Caller handles empty input case.
3387 assert insize > 0
3390 assert insize > 0
3388
3391
3389 if insize < 44:
3392 if insize < 44:
3390 return None
3393 return None
3391
3394
3392 elif insize <= 1000000:
3395 elif insize <= 1000000:
3393 compressed = zlib.compress(data)
3396 compressed = zlib.compress(data)
3394 if len(compressed) < insize:
3397 if len(compressed) < insize:
3395 return compressed
3398 return compressed
3396 return None
3399 return None
3397
3400
3398 # zlib makes an internal copy of the input buffer, doubling
3401 # zlib makes an internal copy of the input buffer, doubling
3399 # memory usage for large inputs. So do streaming compression
3402 # memory usage for large inputs. So do streaming compression
3400 # on large inputs.
3403 # on large inputs.
3401 else:
3404 else:
3402 z = zlib.compressobj()
3405 z = zlib.compressobj()
3403 parts = []
3406 parts = []
3404 pos = 0
3407 pos = 0
3405 while pos < insize:
3408 while pos < insize:
3406 pos2 = pos + 2**20
3409 pos2 = pos + 2**20
3407 parts.append(z.compress(data[pos:pos2]))
3410 parts.append(z.compress(data[pos:pos2]))
3408 pos = pos2
3411 pos = pos2
3409 parts.append(z.flush())
3412 parts.append(z.flush())
3410
3413
3411 if sum(map(len, parts)) < insize:
3414 if sum(map(len, parts)) < insize:
3412 return ''.join(parts)
3415 return ''.join(parts)
3413 return None
3416 return None
3414
3417
3415 def decompress(self, data):
3418 def decompress(self, data):
3416 try:
3419 try:
3417 return zlib.decompress(data)
3420 return zlib.decompress(data)
3418 except zlib.error as e:
3421 except zlib.error as e:
3419 raise error.RevlogError(_('revlog decompress error: %s') %
3422 raise error.RevlogError(_('revlog decompress error: %s') %
3420 str(e))
3423 str(e))
3421
3424
3422 def revlogcompressor(self, opts=None):
3425 def revlogcompressor(self, opts=None):
3423 return self.zlibrevlogcompressor()
3426 return self.zlibrevlogcompressor()
3424
3427
3425 compengines.register(_zlibengine())
3428 compengines.register(_zlibengine())
3426
3429
3427 class _bz2engine(compressionengine):
3430 class _bz2engine(compressionengine):
3428 def name(self):
3431 def name(self):
3429 return 'bz2'
3432 return 'bz2'
3430
3433
3431 def bundletype(self):
3434 def bundletype(self):
3432 return 'bzip2', 'BZ'
3435 return 'bzip2', 'BZ'
3433
3436
3434 # We declare a protocol name but don't advertise by default because
3437 # We declare a protocol name but don't advertise by default because
3435 # it is slow.
3438 # it is slow.
3436 def wireprotosupport(self):
3439 def wireprotosupport(self):
3437 return compewireprotosupport('bzip2', 0, 0)
3440 return compewireprotosupport('bzip2', 0, 0)
3438
3441
3439 def compressstream(self, it, opts=None):
3442 def compressstream(self, it, opts=None):
3440 opts = opts or {}
3443 opts = opts or {}
3441 z = bz2.BZ2Compressor(opts.get('level', 9))
3444 z = bz2.BZ2Compressor(opts.get('level', 9))
3442 for chunk in it:
3445 for chunk in it:
3443 data = z.compress(chunk)
3446 data = z.compress(chunk)
3444 if data:
3447 if data:
3445 yield data
3448 yield data
3446
3449
3447 yield z.flush()
3450 yield z.flush()
3448
3451
3449 def decompressorreader(self, fh):
3452 def decompressorreader(self, fh):
3450 def gen():
3453 def gen():
3451 d = bz2.BZ2Decompressor()
3454 d = bz2.BZ2Decompressor()
3452 for chunk in filechunkiter(fh):
3455 for chunk in filechunkiter(fh):
3453 yield d.decompress(chunk)
3456 yield d.decompress(chunk)
3454
3457
3455 return chunkbuffer(gen())
3458 return chunkbuffer(gen())
3456
3459
3457 compengines.register(_bz2engine())
3460 compengines.register(_bz2engine())
3458
3461
3459 class _truncatedbz2engine(compressionengine):
3462 class _truncatedbz2engine(compressionengine):
3460 def name(self):
3463 def name(self):
3461 return 'bz2truncated'
3464 return 'bz2truncated'
3462
3465
3463 def bundletype(self):
3466 def bundletype(self):
3464 return None, '_truncatedBZ'
3467 return None, '_truncatedBZ'
3465
3468
3466 # We don't implement compressstream because it is hackily handled elsewhere.
3469 # We don't implement compressstream because it is hackily handled elsewhere.
3467
3470
3468 def decompressorreader(self, fh):
3471 def decompressorreader(self, fh):
3469 def gen():
3472 def gen():
3470 # The input stream doesn't have the 'BZ' header. So add it back.
3473 # The input stream doesn't have the 'BZ' header. So add it back.
3471 d = bz2.BZ2Decompressor()
3474 d = bz2.BZ2Decompressor()
3472 d.decompress('BZ')
3475 d.decompress('BZ')
3473 for chunk in filechunkiter(fh):
3476 for chunk in filechunkiter(fh):
3474 yield d.decompress(chunk)
3477 yield d.decompress(chunk)
3475
3478
3476 return chunkbuffer(gen())
3479 return chunkbuffer(gen())
3477
3480
3478 compengines.register(_truncatedbz2engine())
3481 compengines.register(_truncatedbz2engine())
3479
3482
3480 class _noopengine(compressionengine):
3483 class _noopengine(compressionengine):
3481 def name(self):
3484 def name(self):
3482 return 'none'
3485 return 'none'
3483
3486
3484 def bundletype(self):
3487 def bundletype(self):
3485 return 'none', 'UN'
3488 return 'none', 'UN'
3486
3489
3487 # Clients always support uncompressed payloads. Servers don't because
3490 # Clients always support uncompressed payloads. Servers don't because
3488 # unless you are on a fast network, uncompressed payloads can easily
3491 # unless you are on a fast network, uncompressed payloads can easily
3489 # saturate your network pipe.
3492 # saturate your network pipe.
3490 def wireprotosupport(self):
3493 def wireprotosupport(self):
3491 return compewireprotosupport('none', 0, 10)
3494 return compewireprotosupport('none', 0, 10)
3492
3495
3493 # We don't implement revlogheader because it is handled specially
3496 # We don't implement revlogheader because it is handled specially
3494 # in the revlog class.
3497 # in the revlog class.
3495
3498
3496 def compressstream(self, it, opts=None):
3499 def compressstream(self, it, opts=None):
3497 return it
3500 return it
3498
3501
3499 def decompressorreader(self, fh):
3502 def decompressorreader(self, fh):
3500 return fh
3503 return fh
3501
3504
3502 class nooprevlogcompressor(object):
3505 class nooprevlogcompressor(object):
3503 def compress(self, data):
3506 def compress(self, data):
3504 return None
3507 return None
3505
3508
3506 def revlogcompressor(self, opts=None):
3509 def revlogcompressor(self, opts=None):
3507 return self.nooprevlogcompressor()
3510 return self.nooprevlogcompressor()
3508
3511
3509 compengines.register(_noopengine())
3512 compengines.register(_noopengine())
3510
3513
3511 class _zstdengine(compressionengine):
3514 class _zstdengine(compressionengine):
3512 def name(self):
3515 def name(self):
3513 return 'zstd'
3516 return 'zstd'
3514
3517
3515 @propertycache
3518 @propertycache
3516 def _module(self):
3519 def _module(self):
3517 # Not all installs have the zstd module available. So defer importing
3520 # Not all installs have the zstd module available. So defer importing
3518 # until first access.
3521 # until first access.
3519 try:
3522 try:
3520 from . import zstd
3523 from . import zstd
3521 # Force delayed import.
3524 # Force delayed import.
3522 zstd.__version__
3525 zstd.__version__
3523 return zstd
3526 return zstd
3524 except ImportError:
3527 except ImportError:
3525 return None
3528 return None
3526
3529
3527 def available(self):
3530 def available(self):
3528 return bool(self._module)
3531 return bool(self._module)
3529
3532
3530 def bundletype(self):
3533 def bundletype(self):
3531 return 'zstd', 'ZS'
3534 return 'zstd', 'ZS'
3532
3535
3533 def wireprotosupport(self):
3536 def wireprotosupport(self):
3534 return compewireprotosupport('zstd', 50, 50)
3537 return compewireprotosupport('zstd', 50, 50)
3535
3538
3536 def revlogheader(self):
3539 def revlogheader(self):
3537 return '\x28'
3540 return '\x28'
3538
3541
3539 def compressstream(self, it, opts=None):
3542 def compressstream(self, it, opts=None):
3540 opts = opts or {}
3543 opts = opts or {}
3541 # zstd level 3 is almost always significantly faster than zlib
3544 # zstd level 3 is almost always significantly faster than zlib
3542 # while providing no worse compression. It strikes a good balance
3545 # while providing no worse compression. It strikes a good balance
3543 # between speed and compression.
3546 # between speed and compression.
3544 level = opts.get('level', 3)
3547 level = opts.get('level', 3)
3545
3548
3546 zstd = self._module
3549 zstd = self._module
3547 z = zstd.ZstdCompressor(level=level).compressobj()
3550 z = zstd.ZstdCompressor(level=level).compressobj()
3548 for chunk in it:
3551 for chunk in it:
3549 data = z.compress(chunk)
3552 data = z.compress(chunk)
3550 if data:
3553 if data:
3551 yield data
3554 yield data
3552
3555
3553 yield z.flush()
3556 yield z.flush()
3554
3557
3555 def decompressorreader(self, fh):
3558 def decompressorreader(self, fh):
3556 zstd = self._module
3559 zstd = self._module
3557 dctx = zstd.ZstdDecompressor()
3560 dctx = zstd.ZstdDecompressor()
3558 return chunkbuffer(dctx.read_from(fh))
3561 return chunkbuffer(dctx.read_from(fh))
3559
3562
3560 class zstdrevlogcompressor(object):
3563 class zstdrevlogcompressor(object):
3561 def __init__(self, zstd, level=3):
3564 def __init__(self, zstd, level=3):
3562 # Writing the content size adds a few bytes to the output. However,
3565 # Writing the content size adds a few bytes to the output. However,
3563 # it allows decompression to be more optimal since we can
3566 # it allows decompression to be more optimal since we can
3564 # pre-allocate a buffer to hold the result.
3567 # pre-allocate a buffer to hold the result.
3565 self._cctx = zstd.ZstdCompressor(level=level,
3568 self._cctx = zstd.ZstdCompressor(level=level,
3566 write_content_size=True)
3569 write_content_size=True)
3567 self._dctx = zstd.ZstdDecompressor()
3570 self._dctx = zstd.ZstdDecompressor()
3568 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3571 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3569 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3572 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3570
3573
3571 def compress(self, data):
3574 def compress(self, data):
3572 insize = len(data)
3575 insize = len(data)
3573 # Caller handles empty input case.
3576 # Caller handles empty input case.
3574 assert insize > 0
3577 assert insize > 0
3575
3578
3576 if insize < 50:
3579 if insize < 50:
3577 return None
3580 return None
3578
3581
3579 elif insize <= 1000000:
3582 elif insize <= 1000000:
3580 compressed = self._cctx.compress(data)
3583 compressed = self._cctx.compress(data)
3581 if len(compressed) < insize:
3584 if len(compressed) < insize:
3582 return compressed
3585 return compressed
3583 return None
3586 return None
3584 else:
3587 else:
3585 z = self._cctx.compressobj()
3588 z = self._cctx.compressobj()
3586 chunks = []
3589 chunks = []
3587 pos = 0
3590 pos = 0
3588 while pos < insize:
3591 while pos < insize:
3589 pos2 = pos + self._compinsize
3592 pos2 = pos + self._compinsize
3590 chunk = z.compress(data[pos:pos2])
3593 chunk = z.compress(data[pos:pos2])
3591 if chunk:
3594 if chunk:
3592 chunks.append(chunk)
3595 chunks.append(chunk)
3593 pos = pos2
3596 pos = pos2
3594 chunks.append(z.flush())
3597 chunks.append(z.flush())
3595
3598
3596 if sum(map(len, chunks)) < insize:
3599 if sum(map(len, chunks)) < insize:
3597 return ''.join(chunks)
3600 return ''.join(chunks)
3598 return None
3601 return None
3599
3602
3600 def decompress(self, data):
3603 def decompress(self, data):
3601 insize = len(data)
3604 insize = len(data)
3602
3605
3603 try:
3606 try:
3604 # This was measured to be faster than other streaming
3607 # This was measured to be faster than other streaming
3605 # decompressors.
3608 # decompressors.
3606 dobj = self._dctx.decompressobj()
3609 dobj = self._dctx.decompressobj()
3607 chunks = []
3610 chunks = []
3608 pos = 0
3611 pos = 0
3609 while pos < insize:
3612 while pos < insize:
3610 pos2 = pos + self._decompinsize
3613 pos2 = pos + self._decompinsize
3611 chunk = dobj.decompress(data[pos:pos2])
3614 chunk = dobj.decompress(data[pos:pos2])
3612 if chunk:
3615 if chunk:
3613 chunks.append(chunk)
3616 chunks.append(chunk)
3614 pos = pos2
3617 pos = pos2
3615 # Frame should be exhausted, so no finish() API.
3618 # Frame should be exhausted, so no finish() API.
3616
3619
3617 return ''.join(chunks)
3620 return ''.join(chunks)
3618 except Exception as e:
3621 except Exception as e:
3619 raise error.RevlogError(_('revlog decompress error: %s') %
3622 raise error.RevlogError(_('revlog decompress error: %s') %
3620 str(e))
3623 str(e))
3621
3624
3622 def revlogcompressor(self, opts=None):
3625 def revlogcompressor(self, opts=None):
3623 opts = opts or {}
3626 opts = opts or {}
3624 return self.zstdrevlogcompressor(self._module,
3627 return self.zstdrevlogcompressor(self._module,
3625 level=opts.get('level', 3))
3628 level=opts.get('level', 3))
3626
3629
3627 compengines.register(_zstdengine())
3630 compengines.register(_zstdengine())
3628
3631
3629 # convenient shortcut
3632 # convenient shortcut
3630 dst = debugstacktrace
3633 dst = debugstacktrace
@@ -1,633 +1,636
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import errno
3 import errno
4 import os
4 import os
5 import re
5 import re
6 import socket
6 import socket
7 import stat
7 import stat
8 import subprocess
8 import subprocess
9 import sys
9 import sys
10 import tempfile
10 import tempfile
11
11
12 tempprefix = 'hg-hghave-'
12 tempprefix = 'hg-hghave-'
13
13
14 checks = {
14 checks = {
15 "true": (lambda: True, "yak shaving"),
15 "true": (lambda: True, "yak shaving"),
16 "false": (lambda: False, "nail clipper"),
16 "false": (lambda: False, "nail clipper"),
17 }
17 }
18
18
19 def check(name, desc):
19 def check(name, desc):
20 """Registers a check function for a feature."""
20 """Registers a check function for a feature."""
21 def decorator(func):
21 def decorator(func):
22 checks[name] = (func, desc)
22 checks[name] = (func, desc)
23 return func
23 return func
24 return decorator
24 return decorator
25
25
26 def checkvers(name, desc, vers):
26 def checkvers(name, desc, vers):
27 """Registers a check function for each of a series of versions.
27 """Registers a check function for each of a series of versions.
28
28
29 vers can be a list or an iterator"""
29 vers can be a list or an iterator"""
30 def decorator(func):
30 def decorator(func):
31 def funcv(v):
31 def funcv(v):
32 def f():
32 def f():
33 return func(v)
33 return func(v)
34 return f
34 return f
35 for v in vers:
35 for v in vers:
36 v = str(v)
36 v = str(v)
37 f = funcv(v)
37 f = funcv(v)
38 checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v)
38 checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v)
39 return func
39 return func
40 return decorator
40 return decorator
41
41
42 def checkfeatures(features):
42 def checkfeatures(features):
43 result = {
43 result = {
44 'error': [],
44 'error': [],
45 'missing': [],
45 'missing': [],
46 'skipped': [],
46 'skipped': [],
47 }
47 }
48
48
49 for feature in features:
49 for feature in features:
50 negate = feature.startswith('no-')
50 negate = feature.startswith('no-')
51 if negate:
51 if negate:
52 feature = feature[3:]
52 feature = feature[3:]
53
53
54 if feature not in checks:
54 if feature not in checks:
55 result['missing'].append(feature)
55 result['missing'].append(feature)
56 continue
56 continue
57
57
58 check, desc = checks[feature]
58 check, desc = checks[feature]
59 try:
59 try:
60 available = check()
60 available = check()
61 except Exception:
61 except Exception:
62 result['error'].append('hghave check failed: %s' % feature)
62 result['error'].append('hghave check failed: %s' % feature)
63 continue
63 continue
64
64
65 if not negate and not available:
65 if not negate and not available:
66 result['skipped'].append('missing feature: %s' % desc)
66 result['skipped'].append('missing feature: %s' % desc)
67 elif negate and available:
67 elif negate and available:
68 result['skipped'].append('system supports %s' % desc)
68 result['skipped'].append('system supports %s' % desc)
69
69
70 return result
70 return result
71
71
72 def require(features):
72 def require(features):
73 """Require that features are available, exiting if not."""
73 """Require that features are available, exiting if not."""
74 result = checkfeatures(features)
74 result = checkfeatures(features)
75
75
76 for missing in result['missing']:
76 for missing in result['missing']:
77 sys.stderr.write('skipped: unknown feature: %s\n' % missing)
77 sys.stderr.write('skipped: unknown feature: %s\n' % missing)
78 for msg in result['skipped']:
78 for msg in result['skipped']:
79 sys.stderr.write('skipped: %s\n' % msg)
79 sys.stderr.write('skipped: %s\n' % msg)
80 for msg in result['error']:
80 for msg in result['error']:
81 sys.stderr.write('%s\n' % msg)
81 sys.stderr.write('%s\n' % msg)
82
82
83 if result['missing']:
83 if result['missing']:
84 sys.exit(2)
84 sys.exit(2)
85
85
86 if result['skipped'] or result['error']:
86 if result['skipped'] or result['error']:
87 sys.exit(1)
87 sys.exit(1)
88
88
89 def matchoutput(cmd, regexp, ignorestatus=False):
89 def matchoutput(cmd, regexp, ignorestatus=False):
90 """Return the match object if cmd executes successfully and its output
90 """Return the match object if cmd executes successfully and its output
91 is matched by the supplied regular expression.
91 is matched by the supplied regular expression.
92 """
92 """
93 r = re.compile(regexp)
93 r = re.compile(regexp)
94 try:
94 try:
95 p = subprocess.Popen(
95 p = subprocess.Popen(
96 cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
96 cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
97 except OSError as e:
97 except OSError as e:
98 if e.errno != errno.ENOENT:
98 if e.errno != errno.ENOENT:
99 raise
99 raise
100 ret = -1
100 ret = -1
101 ret = p.wait()
101 ret = p.wait()
102 s = p.stdout.read()
102 s = p.stdout.read()
103 return (ignorestatus or not ret) and r.search(s)
103 return (ignorestatus or not ret) and r.search(s)
104
104
105 @check("baz", "GNU Arch baz client")
105 @check("baz", "GNU Arch baz client")
106 def has_baz():
106 def has_baz():
107 return matchoutput('baz --version 2>&1', br'baz Bazaar version')
107 return matchoutput('baz --version 2>&1', br'baz Bazaar version')
108
108
109 @check("bzr", "Canonical's Bazaar client")
109 @check("bzr", "Canonical's Bazaar client")
110 def has_bzr():
110 def has_bzr():
111 try:
111 try:
112 import bzrlib
112 import bzrlib
113 import bzrlib.bzrdir
113 import bzrlib.bzrdir
114 import bzrlib.errors
114 import bzrlib.errors
115 import bzrlib.revision
115 import bzrlib.revision
116 import bzrlib.revisionspec
116 import bzrlib.revisionspec
117 bzrlib.revisionspec.RevisionSpec
117 bzrlib.revisionspec.RevisionSpec
118 return bzrlib.__doc__ is not None
118 return bzrlib.__doc__ is not None
119 except (AttributeError, ImportError):
119 except (AttributeError, ImportError):
120 return False
120 return False
121
121
122 @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,))
122 @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,))
123 def has_bzr_range(v):
123 def has_bzr_range(v):
124 major, minor = v.split('.')[0:2]
124 major, minor = v.split('.')[0:2]
125 try:
125 try:
126 import bzrlib
126 import bzrlib
127 return (bzrlib.__doc__ is not None
127 return (bzrlib.__doc__ is not None
128 and bzrlib.version_info[:2] >= (int(major), int(minor)))
128 and bzrlib.version_info[:2] >= (int(major), int(minor)))
129 except ImportError:
129 except ImportError:
130 return False
130 return False
131
131
132 @check("chg", "running with chg")
132 @check("chg", "running with chg")
133 def has_chg():
133 def has_chg():
134 return 'CHGHG' in os.environ
134 return 'CHGHG' in os.environ
135
135
136 @check("cvs", "cvs client/server")
136 @check("cvs", "cvs client/server")
137 def has_cvs():
137 def has_cvs():
138 re = br'Concurrent Versions System.*?server'
138 re = br'Concurrent Versions System.*?server'
139 return matchoutput('cvs --version 2>&1', re) and not has_msys()
139 return matchoutput('cvs --version 2>&1', re) and not has_msys()
140
140
141 @check("cvs112", "cvs client/server 1.12.* (not cvsnt)")
141 @check("cvs112", "cvs client/server 1.12.* (not cvsnt)")
142 def has_cvs112():
142 def has_cvs112():
143 re = br'Concurrent Versions System \(CVS\) 1.12.*?server'
143 re = br'Concurrent Versions System \(CVS\) 1.12.*?server'
144 return matchoutput('cvs --version 2>&1', re) and not has_msys()
144 return matchoutput('cvs --version 2>&1', re) and not has_msys()
145
145
146 @check("cvsnt", "cvsnt client/server")
146 @check("cvsnt", "cvsnt client/server")
147 def has_cvsnt():
147 def has_cvsnt():
148 re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)'
148 re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)'
149 return matchoutput('cvsnt --version 2>&1', re)
149 return matchoutput('cvsnt --version 2>&1', re)
150
150
151 @check("darcs", "darcs client")
151 @check("darcs", "darcs client")
152 def has_darcs():
152 def has_darcs():
153 return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True)
153 return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True)
154
154
155 @check("mtn", "monotone client (>= 1.0)")
155 @check("mtn", "monotone client (>= 1.0)")
156 def has_mtn():
156 def has_mtn():
157 return matchoutput('mtn --version', br'monotone', True) and not matchoutput(
157 return matchoutput('mtn --version', br'monotone', True) and not matchoutput(
158 'mtn --version', br'monotone 0\.', True)
158 'mtn --version', br'monotone 0\.', True)
159
159
160 @check("eol-in-paths", "end-of-lines in paths")
160 @check("eol-in-paths", "end-of-lines in paths")
161 def has_eol_in_paths():
161 def has_eol_in_paths():
162 try:
162 try:
163 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r')
163 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r')
164 os.close(fd)
164 os.close(fd)
165 os.remove(path)
165 os.remove(path)
166 return True
166 return True
167 except (IOError, OSError):
167 except (IOError, OSError):
168 return False
168 return False
169
169
170 @check("execbit", "executable bit")
170 @check("execbit", "executable bit")
171 def has_executablebit():
171 def has_executablebit():
172 try:
172 try:
173 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
173 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
174 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
174 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
175 try:
175 try:
176 os.close(fh)
176 os.close(fh)
177 m = os.stat(fn).st_mode & 0o777
177 m = os.stat(fn).st_mode & 0o777
178 new_file_has_exec = m & EXECFLAGS
178 new_file_has_exec = m & EXECFLAGS
179 os.chmod(fn, m ^ EXECFLAGS)
179 os.chmod(fn, m ^ EXECFLAGS)
180 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m)
180 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m)
181 finally:
181 finally:
182 os.unlink(fn)
182 os.unlink(fn)
183 except (IOError, OSError):
183 except (IOError, OSError):
184 # we don't care, the user probably won't be able to commit anyway
184 # we don't care, the user probably won't be able to commit anyway
185 return False
185 return False
186 return not (new_file_has_exec or exec_flags_cannot_flip)
186 return not (new_file_has_exec or exec_flags_cannot_flip)
187
187
188 @check("icasefs", "case insensitive file system")
188 @check("icasefs", "case insensitive file system")
189 def has_icasefs():
189 def has_icasefs():
190 # Stolen from mercurial.util
190 # Stolen from mercurial.util
191 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
191 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
192 os.close(fd)
192 os.close(fd)
193 try:
193 try:
194 s1 = os.stat(path)
194 s1 = os.stat(path)
195 d, b = os.path.split(path)
195 d, b = os.path.split(path)
196 p2 = os.path.join(d, b.upper())
196 p2 = os.path.join(d, b.upper())
197 if path == p2:
197 if path == p2:
198 p2 = os.path.join(d, b.lower())
198 p2 = os.path.join(d, b.lower())
199 try:
199 try:
200 s2 = os.stat(p2)
200 s2 = os.stat(p2)
201 return s2 == s1
201 return s2 == s1
202 except OSError:
202 except OSError:
203 return False
203 return False
204 finally:
204 finally:
205 os.remove(path)
205 os.remove(path)
206
206
207 @check("fifo", "named pipes")
207 @check("fifo", "named pipes")
208 def has_fifo():
208 def has_fifo():
209 if getattr(os, "mkfifo", None) is None:
209 if getattr(os, "mkfifo", None) is None:
210 return False
210 return False
211 name = tempfile.mktemp(dir='.', prefix=tempprefix)
211 name = tempfile.mktemp(dir='.', prefix=tempprefix)
212 try:
212 try:
213 os.mkfifo(name)
213 os.mkfifo(name)
214 os.unlink(name)
214 os.unlink(name)
215 return True
215 return True
216 except OSError:
216 except OSError:
217 return False
217 return False
218
218
219 @check("killdaemons", 'killdaemons.py support')
219 @check("killdaemons", 'killdaemons.py support')
220 def has_killdaemons():
220 def has_killdaemons():
221 return True
221 return True
222
222
223 @check("cacheable", "cacheable filesystem")
223 @check("cacheable", "cacheable filesystem")
224 def has_cacheable_fs():
224 def has_cacheable_fs():
225 from mercurial import util
225 from mercurial import util
226
226
227 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
227 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
228 os.close(fd)
228 os.close(fd)
229 try:
229 try:
230 return util.cachestat(path).cacheable()
230 return util.cachestat(path).cacheable()
231 finally:
231 finally:
232 os.remove(path)
232 os.remove(path)
233
233
234 @check("lsprof", "python lsprof module")
234 @check("lsprof", "python lsprof module")
235 def has_lsprof():
235 def has_lsprof():
236 try:
236 try:
237 import _lsprof
237 import _lsprof
238 _lsprof.Profiler # silence unused import warning
238 _lsprof.Profiler # silence unused import warning
239 return True
239 return True
240 except ImportError:
240 except ImportError:
241 return False
241 return False
242
242
243 def gethgversion():
243 def gethgversion():
244 m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)')
244 m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)')
245 if not m:
245 if not m:
246 return (0, 0)
246 return (0, 0)
247 return (int(m.group(1)), int(m.group(2)))
247 return (int(m.group(1)), int(m.group(2)))
248
248
249 @checkvers("hg", "Mercurial >= %s",
249 @checkvers("hg", "Mercurial >= %s",
250 list([(1.0 * x) / 10 for x in range(9, 40)]))
250 list([(1.0 * x) / 10 for x in range(9, 40)]))
251 def has_hg_range(v):
251 def has_hg_range(v):
252 major, minor = v.split('.')[0:2]
252 major, minor = v.split('.')[0:2]
253 return gethgversion() >= (int(major), int(minor))
253 return gethgversion() >= (int(major), int(minor))
254
254
255 @check("hg08", "Mercurial >= 0.8")
255 @check("hg08", "Mercurial >= 0.8")
256 def has_hg08():
256 def has_hg08():
257 if checks["hg09"][0]():
257 if checks["hg09"][0]():
258 return True
258 return True
259 return matchoutput('hg help annotate 2>&1', '--date')
259 return matchoutput('hg help annotate 2>&1', '--date')
260
260
261 @check("hg07", "Mercurial >= 0.7")
261 @check("hg07", "Mercurial >= 0.7")
262 def has_hg07():
262 def has_hg07():
263 if checks["hg08"][0]():
263 if checks["hg08"][0]():
264 return True
264 return True
265 return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM')
265 return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM')
266
266
267 @check("hg06", "Mercurial >= 0.6")
267 @check("hg06", "Mercurial >= 0.6")
268 def has_hg06():
268 def has_hg06():
269 if checks["hg07"][0]():
269 if checks["hg07"][0]():
270 return True
270 return True
271 return matchoutput('hg --version --quiet 2>&1', 'Mercurial version')
271 return matchoutput('hg --version --quiet 2>&1', 'Mercurial version')
272
272
273 @check("gettext", "GNU Gettext (msgfmt)")
273 @check("gettext", "GNU Gettext (msgfmt)")
274 def has_gettext():
274 def has_gettext():
275 return matchoutput('msgfmt --version', br'GNU gettext-tools')
275 return matchoutput('msgfmt --version', br'GNU gettext-tools')
276
276
277 @check("git", "git command line client")
277 @check("git", "git command line client")
278 def has_git():
278 def has_git():
279 return matchoutput('git --version 2>&1', br'^git version')
279 return matchoutput('git --version 2>&1', br'^git version')
280
280
281 @check("docutils", "Docutils text processing library")
281 @check("docutils", "Docutils text processing library")
282 def has_docutils():
282 def has_docutils():
283 try:
283 try:
284 import docutils.core
284 import docutils.core
285 docutils.core.publish_cmdline # silence unused import
285 docutils.core.publish_cmdline # silence unused import
286 return True
286 return True
287 except ImportError:
287 except ImportError:
288 return False
288 return False
289
289
290 def getsvnversion():
290 def getsvnversion():
291 m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)')
291 m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)')
292 if not m:
292 if not m:
293 return (0, 0)
293 return (0, 0)
294 return (int(m.group(1)), int(m.group(2)))
294 return (int(m.group(1)), int(m.group(2)))
295
295
296 @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5))
296 @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5))
297 def has_svn_range(v):
297 def has_svn_range(v):
298 major, minor = v.split('.')[0:2]
298 major, minor = v.split('.')[0:2]
299 return getsvnversion() >= (int(major), int(minor))
299 return getsvnversion() >= (int(major), int(minor))
300
300
301 @check("svn", "subversion client and admin tools")
301 @check("svn", "subversion client and admin tools")
302 def has_svn():
302 def has_svn():
303 return matchoutput('svn --version 2>&1', br'^svn, version') and \
303 return matchoutput('svn --version 2>&1', br'^svn, version') and \
304 matchoutput('svnadmin --version 2>&1', br'^svnadmin, version')
304 matchoutput('svnadmin --version 2>&1', br'^svnadmin, version')
305
305
306 @check("svn-bindings", "subversion python bindings")
306 @check("svn-bindings", "subversion python bindings")
307 def has_svn_bindings():
307 def has_svn_bindings():
308 try:
308 try:
309 import svn.core
309 import svn.core
310 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
310 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
311 if version < (1, 4):
311 if version < (1, 4):
312 return False
312 return False
313 return True
313 return True
314 except ImportError:
314 except ImportError:
315 return False
315 return False
316
316
317 @check("p4", "Perforce server and client")
317 @check("p4", "Perforce server and client")
318 def has_p4():
318 def has_p4():
319 return (matchoutput('p4 -V', br'Rev\. P4/') and
319 return (matchoutput('p4 -V', br'Rev\. P4/') and
320 matchoutput('p4d -V', br'Rev\. P4D/'))
320 matchoutput('p4d -V', br'Rev\. P4D/'))
321
321
322 @check("symlink", "symbolic links")
322 @check("symlink", "symbolic links")
323 def has_symlink():
323 def has_symlink():
324 if getattr(os, "symlink", None) is None:
324 if getattr(os, "symlink", None) is None:
325 return False
325 return False
326 name = tempfile.mktemp(dir='.', prefix=tempprefix)
326 name = tempfile.mktemp(dir='.', prefix=tempprefix)
327 try:
327 try:
328 os.symlink(".", name)
328 os.symlink(".", name)
329 os.unlink(name)
329 os.unlink(name)
330 return True
330 return True
331 except (OSError, AttributeError):
331 except (OSError, AttributeError):
332 return False
332 return False
333
333
334 @check("hardlink", "hardlinks")
334 @check("hardlink", "hardlinks")
335 def has_hardlink():
335 def has_hardlink():
336 from mercurial import util
336 from mercurial import util
337 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
337 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
338 os.close(fh)
338 os.close(fh)
339 name = tempfile.mktemp(dir='.', prefix=tempprefix)
339 name = tempfile.mktemp(dir='.', prefix=tempprefix)
340 try:
340 try:
341 util.oslink(fn, name)
341 util.oslink(fn, name)
342 os.unlink(name)
342 os.unlink(name)
343 return True
343 return True
344 except OSError:
344 except OSError:
345 return False
345 return False
346 finally:
346 finally:
347 os.unlink(fn)
347 os.unlink(fn)
348
348
349 @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems")
349 @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems")
350 def has_hardlink_whitelisted():
350 def has_hardlink_whitelisted():
351 from mercurial import util
351 from mercurial import util
352 try:
352 fstype = util.getfstype('.')
353 fstype = util.getfstype('.')
354 except OSError:
355 return False
353 return fstype in util._hardlinkfswhitelist
356 return fstype in util._hardlinkfswhitelist
354
357
355 @check("rmcwd", "can remove current working directory")
358 @check("rmcwd", "can remove current working directory")
356 def has_rmcwd():
359 def has_rmcwd():
357 ocwd = os.getcwd()
360 ocwd = os.getcwd()
358 temp = tempfile.mkdtemp(dir='.', prefix=tempprefix)
361 temp = tempfile.mkdtemp(dir='.', prefix=tempprefix)
359 try:
362 try:
360 os.chdir(temp)
363 os.chdir(temp)
361 # On Linux, 'rmdir .' isn't allowed, but the other names are okay.
364 # On Linux, 'rmdir .' isn't allowed, but the other names are okay.
362 # On Solaris and Windows, the cwd can't be removed by any names.
365 # On Solaris and Windows, the cwd can't be removed by any names.
363 os.rmdir(os.getcwd())
366 os.rmdir(os.getcwd())
364 return True
367 return True
365 except OSError:
368 except OSError:
366 return False
369 return False
367 finally:
370 finally:
368 os.chdir(ocwd)
371 os.chdir(ocwd)
369 # clean up temp dir on platforms where cwd can't be removed
372 # clean up temp dir on platforms where cwd can't be removed
370 try:
373 try:
371 os.rmdir(temp)
374 os.rmdir(temp)
372 except OSError:
375 except OSError:
373 pass
376 pass
374
377
375 @check("tla", "GNU Arch tla client")
378 @check("tla", "GNU Arch tla client")
376 def has_tla():
379 def has_tla():
377 return matchoutput('tla --version 2>&1', br'The GNU Arch Revision')
380 return matchoutput('tla --version 2>&1', br'The GNU Arch Revision')
378
381
379 @check("gpg", "gpg client")
382 @check("gpg", "gpg client")
380 def has_gpg():
383 def has_gpg():
381 return matchoutput('gpg --version 2>&1', br'GnuPG')
384 return matchoutput('gpg --version 2>&1', br'GnuPG')
382
385
383 @check("gpg2", "gpg client v2")
386 @check("gpg2", "gpg client v2")
384 def has_gpg2():
387 def has_gpg2():
385 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.')
388 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.')
386
389
387 @check("gpg21", "gpg client v2.1+")
390 @check("gpg21", "gpg client v2.1+")
388 def has_gpg21():
391 def has_gpg21():
389 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)')
392 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)')
390
393
391 @check("unix-permissions", "unix-style permissions")
394 @check("unix-permissions", "unix-style permissions")
392 def has_unix_permissions():
395 def has_unix_permissions():
393 d = tempfile.mkdtemp(dir='.', prefix=tempprefix)
396 d = tempfile.mkdtemp(dir='.', prefix=tempprefix)
394 try:
397 try:
395 fname = os.path.join(d, 'foo')
398 fname = os.path.join(d, 'foo')
396 for umask in (0o77, 0o07, 0o22):
399 for umask in (0o77, 0o07, 0o22):
397 os.umask(umask)
400 os.umask(umask)
398 f = open(fname, 'w')
401 f = open(fname, 'w')
399 f.close()
402 f.close()
400 mode = os.stat(fname).st_mode
403 mode = os.stat(fname).st_mode
401 os.unlink(fname)
404 os.unlink(fname)
402 if mode & 0o777 != ~umask & 0o666:
405 if mode & 0o777 != ~umask & 0o666:
403 return False
406 return False
404 return True
407 return True
405 finally:
408 finally:
406 os.rmdir(d)
409 os.rmdir(d)
407
410
408 @check("unix-socket", "AF_UNIX socket family")
411 @check("unix-socket", "AF_UNIX socket family")
409 def has_unix_socket():
412 def has_unix_socket():
410 return getattr(socket, 'AF_UNIX', None) is not None
413 return getattr(socket, 'AF_UNIX', None) is not None
411
414
412 @check("root", "root permissions")
415 @check("root", "root permissions")
413 def has_root():
416 def has_root():
414 return getattr(os, 'geteuid', None) and os.geteuid() == 0
417 return getattr(os, 'geteuid', None) and os.geteuid() == 0
415
418
416 @check("pyflakes", "Pyflakes python linter")
419 @check("pyflakes", "Pyflakes python linter")
417 def has_pyflakes():
420 def has_pyflakes():
418 return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"",
421 return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"",
419 br"<stdin>:1: 're' imported but unused",
422 br"<stdin>:1: 're' imported but unused",
420 True)
423 True)
421
424
422 @check("pylint", "Pylint python linter")
425 @check("pylint", "Pylint python linter")
423 def has_pylint():
426 def has_pylint():
424 return matchoutput("pylint --help",
427 return matchoutput("pylint --help",
425 br"Usage: pylint",
428 br"Usage: pylint",
426 True)
429 True)
427
430
428 @check("pygments", "Pygments source highlighting library")
431 @check("pygments", "Pygments source highlighting library")
429 def has_pygments():
432 def has_pygments():
430 try:
433 try:
431 import pygments
434 import pygments
432 pygments.highlight # silence unused import warning
435 pygments.highlight # silence unused import warning
433 return True
436 return True
434 except ImportError:
437 except ImportError:
435 return False
438 return False
436
439
437 @check("outer-repo", "outer repo")
440 @check("outer-repo", "outer repo")
438 def has_outer_repo():
441 def has_outer_repo():
439 # failing for other reasons than 'no repo' imply that there is a repo
442 # failing for other reasons than 'no repo' imply that there is a repo
440 return not matchoutput('hg root 2>&1',
443 return not matchoutput('hg root 2>&1',
441 br'abort: no repository found', True)
444 br'abort: no repository found', True)
442
445
443 @check("ssl", "ssl module available")
446 @check("ssl", "ssl module available")
444 def has_ssl():
447 def has_ssl():
445 try:
448 try:
446 import ssl
449 import ssl
447 ssl.CERT_NONE
450 ssl.CERT_NONE
448 return True
451 return True
449 except ImportError:
452 except ImportError:
450 return False
453 return False
451
454
452 @check("sslcontext", "python >= 2.7.9 ssl")
455 @check("sslcontext", "python >= 2.7.9 ssl")
453 def has_sslcontext():
456 def has_sslcontext():
454 try:
457 try:
455 import ssl
458 import ssl
456 ssl.SSLContext
459 ssl.SSLContext
457 return True
460 return True
458 except (ImportError, AttributeError):
461 except (ImportError, AttributeError):
459 return False
462 return False
460
463
461 @check("defaultcacerts", "can verify SSL certs by system's CA certs store")
464 @check("defaultcacerts", "can verify SSL certs by system's CA certs store")
462 def has_defaultcacerts():
465 def has_defaultcacerts():
463 from mercurial import sslutil, ui as uimod
466 from mercurial import sslutil, ui as uimod
464 ui = uimod.ui.load()
467 ui = uimod.ui.load()
465 return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts
468 return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts
466
469
467 @check("defaultcacertsloaded", "detected presence of loaded system CA certs")
470 @check("defaultcacertsloaded", "detected presence of loaded system CA certs")
468 def has_defaultcacertsloaded():
471 def has_defaultcacertsloaded():
469 import ssl
472 import ssl
470 from mercurial import sslutil, ui as uimod
473 from mercurial import sslutil, ui as uimod
471
474
472 if not has_defaultcacerts():
475 if not has_defaultcacerts():
473 return False
476 return False
474 if not has_sslcontext():
477 if not has_sslcontext():
475 return False
478 return False
476
479
477 ui = uimod.ui.load()
480 ui = uimod.ui.load()
478 cafile = sslutil._defaultcacerts(ui)
481 cafile = sslutil._defaultcacerts(ui)
479 ctx = ssl.create_default_context()
482 ctx = ssl.create_default_context()
480 if cafile:
483 if cafile:
481 ctx.load_verify_locations(cafile=cafile)
484 ctx.load_verify_locations(cafile=cafile)
482 else:
485 else:
483 ctx.load_default_certs()
486 ctx.load_default_certs()
484
487
485 return len(ctx.get_ca_certs()) > 0
488 return len(ctx.get_ca_certs()) > 0
486
489
487 @check("tls1.2", "TLS 1.2 protocol support")
490 @check("tls1.2", "TLS 1.2 protocol support")
488 def has_tls1_2():
491 def has_tls1_2():
489 from mercurial import sslutil
492 from mercurial import sslutil
490 return 'tls1.2' in sslutil.supportedprotocols
493 return 'tls1.2' in sslutil.supportedprotocols
491
494
492 @check("windows", "Windows")
495 @check("windows", "Windows")
493 def has_windows():
496 def has_windows():
494 return os.name == 'nt'
497 return os.name == 'nt'
495
498
496 @check("system-sh", "system() uses sh")
499 @check("system-sh", "system() uses sh")
497 def has_system_sh():
500 def has_system_sh():
498 return os.name != 'nt'
501 return os.name != 'nt'
499
502
500 @check("serve", "platform and python can manage 'hg serve -d'")
503 @check("serve", "platform and python can manage 'hg serve -d'")
501 def has_serve():
504 def has_serve():
502 return os.name != 'nt' # gross approximation
505 return os.name != 'nt' # gross approximation
503
506
504 @check("test-repo", "running tests from repository")
507 @check("test-repo", "running tests from repository")
505 def has_test_repo():
508 def has_test_repo():
506 t = os.environ["TESTDIR"]
509 t = os.environ["TESTDIR"]
507 return os.path.isdir(os.path.join(t, "..", ".hg"))
510 return os.path.isdir(os.path.join(t, "..", ".hg"))
508
511
509 @check("tic", "terminfo compiler and curses module")
512 @check("tic", "terminfo compiler and curses module")
510 def has_tic():
513 def has_tic():
511 try:
514 try:
512 import curses
515 import curses
513 curses.COLOR_BLUE
516 curses.COLOR_BLUE
514 return matchoutput('test -x "`which tic`"', br'')
517 return matchoutput('test -x "`which tic`"', br'')
515 except ImportError:
518 except ImportError:
516 return False
519 return False
517
520
518 @check("msys", "Windows with MSYS")
521 @check("msys", "Windows with MSYS")
519 def has_msys():
522 def has_msys():
520 return os.getenv('MSYSTEM')
523 return os.getenv('MSYSTEM')
521
524
522 @check("aix", "AIX")
525 @check("aix", "AIX")
523 def has_aix():
526 def has_aix():
524 return sys.platform.startswith("aix")
527 return sys.platform.startswith("aix")
525
528
526 @check("osx", "OS X")
529 @check("osx", "OS X")
527 def has_osx():
530 def has_osx():
528 return sys.platform == 'darwin'
531 return sys.platform == 'darwin'
529
532
530 @check("osxpackaging", "OS X packaging tools")
533 @check("osxpackaging", "OS X packaging tools")
531 def has_osxpackaging():
534 def has_osxpackaging():
532 try:
535 try:
533 return (matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1)
536 return (matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1)
534 and matchoutput(
537 and matchoutput(
535 'productbuild', br'Usage: productbuild ',
538 'productbuild', br'Usage: productbuild ',
536 ignorestatus=1)
539 ignorestatus=1)
537 and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1)
540 and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1)
538 and matchoutput(
541 and matchoutput(
539 'xar --help', br'Usage: xar', ignorestatus=1))
542 'xar --help', br'Usage: xar', ignorestatus=1))
540 except ImportError:
543 except ImportError:
541 return False
544 return False
542
545
543 @check("docker", "docker support")
546 @check("docker", "docker support")
544 def has_docker():
547 def has_docker():
545 pat = br'A self-sufficient runtime for'
548 pat = br'A self-sufficient runtime for'
546 if matchoutput('docker --help', pat):
549 if matchoutput('docker --help', pat):
547 if 'linux' not in sys.platform:
550 if 'linux' not in sys.platform:
548 # TODO: in theory we should be able to test docker-based
551 # TODO: in theory we should be able to test docker-based
549 # package creation on non-linux using boot2docker, but in
552 # package creation on non-linux using boot2docker, but in
550 # practice that requires extra coordination to make sure
553 # practice that requires extra coordination to make sure
551 # $TESTTEMP is going to be visible at the same path to the
554 # $TESTTEMP is going to be visible at the same path to the
552 # boot2docker VM. If we figure out how to verify that, we
555 # boot2docker VM. If we figure out how to verify that, we
553 # can use the following instead of just saying False:
556 # can use the following instead of just saying False:
554 # return 'DOCKER_HOST' in os.environ
557 # return 'DOCKER_HOST' in os.environ
555 return False
558 return False
556
559
557 return True
560 return True
558 return False
561 return False
559
562
560 @check("debhelper", "debian packaging tools")
563 @check("debhelper", "debian packaging tools")
561 def has_debhelper():
564 def has_debhelper():
562 dpkg = matchoutput('dpkg --version',
565 dpkg = matchoutput('dpkg --version',
563 br"Debian `dpkg' package management program")
566 br"Debian `dpkg' package management program")
564 dh = matchoutput('dh --help',
567 dh = matchoutput('dh --help',
565 br'dh is a part of debhelper.', ignorestatus=True)
568 br'dh is a part of debhelper.', ignorestatus=True)
566 dh_py2 = matchoutput('dh_python2 --help',
569 dh_py2 = matchoutput('dh_python2 --help',
567 br'other supported Python versions')
570 br'other supported Python versions')
568 return dpkg and dh and dh_py2
571 return dpkg and dh and dh_py2
569
572
570 @check("demandimport", "demandimport enabled")
573 @check("demandimport", "demandimport enabled")
571 def has_demandimport():
574 def has_demandimport():
572 return os.environ.get('HGDEMANDIMPORT') != 'disable'
575 return os.environ.get('HGDEMANDIMPORT') != 'disable'
573
576
574 @check("absimport", "absolute_import in __future__")
577 @check("absimport", "absolute_import in __future__")
575 def has_absimport():
578 def has_absimport():
576 import __future__
579 import __future__
577 from mercurial import util
580 from mercurial import util
578 return util.safehasattr(__future__, "absolute_import")
581 return util.safehasattr(__future__, "absolute_import")
579
582
580 @check("py27+", "running with Python 2.7+")
583 @check("py27+", "running with Python 2.7+")
581 def has_python27ornewer():
584 def has_python27ornewer():
582 return sys.version_info[0:2] >= (2, 7)
585 return sys.version_info[0:2] >= (2, 7)
583
586
584 @check("py3k", "running with Python 3.x")
587 @check("py3k", "running with Python 3.x")
585 def has_py3k():
588 def has_py3k():
586 return 3 == sys.version_info[0]
589 return 3 == sys.version_info[0]
587
590
588 @check("py3exe", "a Python 3.x interpreter is available")
591 @check("py3exe", "a Python 3.x interpreter is available")
589 def has_python3exe():
592 def has_python3exe():
590 return 'PYTHON3' in os.environ
593 return 'PYTHON3' in os.environ
591
594
592 @check("py3pygments", "Pygments available on Python 3.x")
595 @check("py3pygments", "Pygments available on Python 3.x")
593 def has_py3pygments():
596 def has_py3pygments():
594 if has_py3k():
597 if has_py3k():
595 return has_pygments()
598 return has_pygments()
596 elif has_python3exe():
599 elif has_python3exe():
597 # just check exit status (ignoring output)
600 # just check exit status (ignoring output)
598 py3 = os.environ['PYTHON3']
601 py3 = os.environ['PYTHON3']
599 return matchoutput('%s -c "import pygments"' % py3, br'')
602 return matchoutput('%s -c "import pygments"' % py3, br'')
600 return False
603 return False
601
604
602 @check("pure", "running with pure Python code")
605 @check("pure", "running with pure Python code")
603 def has_pure():
606 def has_pure():
604 return any([
607 return any([
605 os.environ.get("HGMODULEPOLICY") == "py",
608 os.environ.get("HGMODULEPOLICY") == "py",
606 os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure",
609 os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure",
607 ])
610 ])
608
611
609 @check("slow", "allow slow tests")
612 @check("slow", "allow slow tests")
610 def has_slow():
613 def has_slow():
611 return os.environ.get('HGTEST_SLOW') == 'slow'
614 return os.environ.get('HGTEST_SLOW') == 'slow'
612
615
613 @check("hypothesis", "Hypothesis automated test generation")
616 @check("hypothesis", "Hypothesis automated test generation")
614 def has_hypothesis():
617 def has_hypothesis():
615 try:
618 try:
616 import hypothesis
619 import hypothesis
617 hypothesis.given
620 hypothesis.given
618 return True
621 return True
619 except ImportError:
622 except ImportError:
620 return False
623 return False
621
624
622 @check("unziplinks", "unzip(1) understands and extracts symlinks")
625 @check("unziplinks", "unzip(1) understands and extracts symlinks")
623 def unzip_understands_symlinks():
626 def unzip_understands_symlinks():
624 return matchoutput('unzip --help', br'Info-ZIP')
627 return matchoutput('unzip --help', br'Info-ZIP')
625
628
626 @check("zstd", "zstd Python module available")
629 @check("zstd", "zstd Python module available")
627 def has_zstd():
630 def has_zstd():
628 try:
631 try:
629 import mercurial.zstd
632 import mercurial.zstd
630 mercurial.zstd.__version__
633 mercurial.zstd.__version__
631 return True
634 return True
632 except ImportError:
635 except ImportError:
633 return False
636 return False
General Comments 0
You need to be logged in to leave comments. Login now