##// END OF EJS Templates
scmutil: rewrite dirs in C, use if available...
Bryan O'Sullivan -
r18900:02ee846b default
parent child Browse files
Show More
@@ -0,0 +1,298
1 /*
2 dirs.c - dynamic directory diddling for dirstates
3
4 Copyright 2013 Facebook
5
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
8 */
9
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
12 #include "util.h"
13
14 /*
15 * This is a multiset of directory names, built from the files that
16 * appear in a dirstate or manifest.
17 */
18 typedef struct {
19 PyObject_HEAD
20 PyObject *dict;
21 } dirsObject;
22
23 static inline Py_ssize_t _finddir(PyObject *path, Py_ssize_t pos)
24 {
25 const char *s = PyString_AS_STRING(path);
26
27 while (pos != -1) {
28 if (s[pos] == '/')
29 break;
30 pos -= 1;
31 }
32
33 return pos;
34 }
35
36 static int _addpath(PyObject *dirs, PyObject *path)
37 {
38 Py_ssize_t pos = PyString_GET_SIZE(path);
39 PyObject *newval = NULL, *key = NULL;
40 int ret = -1;
41
42 while ((pos = _finddir(path, pos - 1)) != -1) {
43 PyObject *val;
44 long v = 0;
45
46 key = PyString_FromStringAndSize(PyString_AS_STRING(path), pos);
47
48 if (key == NULL)
49 goto bail;
50
51 val = PyDict_GetItem(dirs, key);
52 if (val != NULL)
53 v = PyInt_AS_LONG(val);
54
55 newval = PyInt_FromLong(v + 1);
56
57 if (newval == NULL)
58 goto bail;
59
60 ret = PyDict_SetItem(dirs, key, newval);
61 if (ret == -1)
62 goto bail;
63 Py_CLEAR(key);
64 Py_CLEAR(newval);
65 }
66 ret = 0;
67
68 bail:
69 Py_XDECREF(key);
70 Py_XDECREF(newval);
71
72 return ret;
73 }
74
75 static int _delpath(PyObject *dirs, PyObject *path)
76 {
77 Py_ssize_t pos = PyString_GET_SIZE(path);
78 PyObject *newval = NULL, *key = NULL;
79 int ret = -1;
80
81 while ((pos = _finddir(path, pos - 1)) != -1) {
82 PyObject *val;
83 long v;
84
85 key = PyString_FromStringAndSize(PyString_AS_STRING(path), pos);
86
87 if (key == NULL)
88 goto bail;
89
90 val = PyDict_GetItem(dirs, key);
91 if (val == NULL) {
92 PyErr_SetString(PyExc_ValueError,
93 "expected a value, found none");
94 goto bail;
95 }
96 v = PyInt_AS_LONG(val);
97
98 if (v <= 1) {
99 if (PyDict_DelItem(dirs, key) == -1)
100 goto bail;
101 continue;
102 }
103 newval = PyInt_FromLong(v - 1);
104
105 if (newval == NULL)
106 goto bail;
107
108 ret = PyDict_SetItem(dirs, key, newval);
109 if (ret == -1)
110 goto bail;
111 Py_CLEAR(key);
112 Py_CLEAR(newval);
113 }
114 ret = 0;
115
116 bail:
117 Py_XDECREF(key);
118 Py_XDECREF(newval);
119
120 return ret;
121 }
122
123 static int dirs_fromdict(PyObject *dirs, PyObject *source, char skipchar)
124 {
125 PyObject *key, *value;
126 Py_ssize_t pos = 0;
127
128 while (PyDict_Next(source, &pos, &key, &value)) {
129 if (!PyString_Check(key)) {
130 PyErr_SetString(PyExc_TypeError, "expected string key");
131 return -1;
132 }
133 if (skipchar) {
134 PyObject *st;
135
136 if (!PyTuple_Check(value) ||
137 PyTuple_GET_SIZE(value) == 0) {
138 PyErr_SetString(PyExc_TypeError,
139 "expected non-empty tuple");
140 return -1;
141 }
142
143 st = PyTuple_GET_ITEM(value, 0);
144
145 if (!PyString_Check(st) || PyString_GET_SIZE(st) == 0) {
146 PyErr_SetString(PyExc_TypeError,
147 "expected non-empty string "
148 "at tuple index 0");
149 return -1;
150 }
151
152 if (PyString_AS_STRING(st)[0] == skipchar)
153 continue;
154 }
155
156 if (_addpath(dirs, key) == -1)
157 return -1;
158 }
159
160 return 0;
161 }
162
163 static int dirs_fromiter(PyObject *dirs, PyObject *source)
164 {
165 PyObject *iter, *item = NULL;
166 int ret;
167
168 iter = PyObject_GetIter(source);
169 if (iter == NULL)
170 return -1;
171
172 while ((item = PyIter_Next(iter)) != NULL) {
173 if (!PyString_Check(item)) {
174 PyErr_SetString(PyExc_TypeError, "expected string");
175 break;
176 }
177
178 if (_addpath(dirs, item) == -1)
179 break;
180 Py_CLEAR(item);
181 }
182
183 ret = PyErr_Occurred() ? -1 : 0;
184 Py_XDECREF(item);
185 return ret;
186 }
187
188 /*
189 * Calculate a refcounted set of directory names for the files in a
190 * dirstate.
191 */
192 static int dirs_init(dirsObject *self, PyObject *args)
193 {
194 PyObject *dirs = NULL, *source = NULL;
195 char skipchar = 0;
196 int ret = -1;
197
198 self->dict = NULL;
199
200 if (!PyArg_ParseTuple(args, "|Oc:__init__", &source, &skipchar))
201 return -1;
202
203 dirs = PyDict_New();
204
205 if (dirs == NULL)
206 return -1;
207
208 if (source == NULL)
209 ret = 0;
210 else if (PyDict_Check(source))
211 ret = dirs_fromdict(dirs, source, skipchar);
212 else if (skipchar)
213 PyErr_SetString(PyExc_ValueError,
214 "skip character is only supported "
215 "with a dict source");
216 else
217 ret = dirs_fromiter(dirs, source);
218
219 if (ret == -1)
220 Py_XDECREF(dirs);
221 else
222 self->dict = dirs;
223
224 return ret;
225 }
226
227 PyObject *dirs_addpath(dirsObject *self, PyObject *args)
228 {
229 PyObject *path;
230
231 if (!PyArg_ParseTuple(args, "O!:addpath", &PyString_Type, &path))
232 return NULL;
233
234 if (_addpath(self->dict, path) == -1)
235 return NULL;
236
237 Py_RETURN_NONE;
238 }
239
240 static PyObject *dirs_delpath(dirsObject *self, PyObject *args)
241 {
242 PyObject *path;
243
244 if (!PyArg_ParseTuple(args, "O!:delpath", &PyString_Type, &path))
245 return NULL;
246
247 if (_delpath(self->dict, path) == -1)
248 return NULL;
249
250 Py_RETURN_NONE;
251 }
252
253 static int dirs_contains(dirsObject *self, PyObject *value)
254 {
255 return PyString_Check(value) ? PyDict_Contains(self->dict, value) : 0;
256 }
257
258 static void dirs_dealloc(dirsObject *self)
259 {
260 Py_XDECREF(self->dict);
261 PyObject_Del(self);
262 }
263
264 static PyObject *dirs_iter(dirsObject *self)
265 {
266 return PyObject_GetIter(self->dict);
267 }
268
269 static PySequenceMethods dirs_sequence_methods;
270
271 static PyMethodDef dirs_methods[] = {
272 {"addpath", (PyCFunction)dirs_addpath, METH_VARARGS, "add a path"},
273 {"delpath", (PyCFunction)dirs_delpath, METH_VARARGS, "remove a path"},
274 {NULL} /* Sentinel */
275 };
276
277 static PyTypeObject dirsType = { PyObject_HEAD_INIT(NULL) };
278
279 void dirs_module_init(PyObject *mod)
280 {
281 dirs_sequence_methods.sq_contains = (objobjproc)dirs_contains;
282 dirsType.tp_name = "parsers.dirs";
283 dirsType.tp_new = PyType_GenericNew;
284 dirsType.tp_basicsize = sizeof(dirsObject);
285 dirsType.tp_dealloc = (destructor)dirs_dealloc;
286 dirsType.tp_as_sequence = &dirs_sequence_methods;
287 dirsType.tp_flags = Py_TPFLAGS_DEFAULT;
288 dirsType.tp_doc = "dirs";
289 dirsType.tp_iter = (getiterfunc)dirs_iter;
290 dirsType.tp_methods = dirs_methods;
291 dirsType.tp_init = (initproc)dirs_init;
292
293 if (PyType_Ready(&dirsType) < 0)
294 return;
295 Py_INCREF(&dirsType);
296
297 PyModule_AddObject(mod, "dirs", (PyObject *)&dirsType);
298 }
@@ -1,1569 +1,1573
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <ctype.h>
12 12 #include <stddef.h>
13 13 #include <string.h>
14 14
15 15 #include "util.h"
16 16
17 17 static inline int hexdigit(const char *p, Py_ssize_t off)
18 18 {
19 19 char c = p[off];
20 20
21 21 if (c >= '0' && c <= '9')
22 22 return c - '0';
23 23 if (c >= 'a' && c <= 'f')
24 24 return c - 'a' + 10;
25 25 if (c >= 'A' && c <= 'F')
26 26 return c - 'A' + 10;
27 27
28 28 PyErr_SetString(PyExc_ValueError, "input contains non-hex character");
29 29 return 0;
30 30 }
31 31
32 32 /*
33 33 * Turn a hex-encoded string into binary.
34 34 */
35 35 static PyObject *unhexlify(const char *str, int len)
36 36 {
37 37 PyObject *ret;
38 38 char *d;
39 39 int i;
40 40
41 41 ret = PyBytes_FromStringAndSize(NULL, len / 2);
42 42
43 43 if (!ret)
44 44 return NULL;
45 45
46 46 d = PyBytes_AsString(ret);
47 47
48 48 for (i = 0; i < len;) {
49 49 int hi = hexdigit(str, i++);
50 50 int lo = hexdigit(str, i++);
51 51 *d++ = (hi << 4) | lo;
52 52 }
53 53
54 54 return ret;
55 55 }
56 56
57 57 /*
58 58 * This code assumes that a manifest is stitched together with newline
59 59 * ('\n') characters.
60 60 */
61 61 static PyObject *parse_manifest(PyObject *self, PyObject *args)
62 62 {
63 63 PyObject *mfdict, *fdict;
64 64 char *str, *cur, *start, *zero;
65 65 int len;
66 66
67 67 if (!PyArg_ParseTuple(args, "O!O!s#:parse_manifest",
68 68 &PyDict_Type, &mfdict,
69 69 &PyDict_Type, &fdict,
70 70 &str, &len))
71 71 goto quit;
72 72
73 73 for (start = cur = str, zero = NULL; cur < str + len; cur++) {
74 74 PyObject *file = NULL, *node = NULL;
75 75 PyObject *flags = NULL;
76 76 ptrdiff_t nlen;
77 77
78 78 if (!*cur) {
79 79 zero = cur;
80 80 continue;
81 81 }
82 82 else if (*cur != '\n')
83 83 continue;
84 84
85 85 if (!zero) {
86 86 PyErr_SetString(PyExc_ValueError,
87 87 "manifest entry has no separator");
88 88 goto quit;
89 89 }
90 90
91 91 file = PyBytes_FromStringAndSize(start, zero - start);
92 92
93 93 if (!file)
94 94 goto bail;
95 95
96 96 nlen = cur - zero - 1;
97 97
98 98 node = unhexlify(zero + 1, nlen > 40 ? 40 : (int)nlen);
99 99 if (!node)
100 100 goto bail;
101 101
102 102 if (nlen > 40) {
103 103 flags = PyBytes_FromStringAndSize(zero + 41,
104 104 nlen - 40);
105 105 if (!flags)
106 106 goto bail;
107 107
108 108 if (PyDict_SetItem(fdict, file, flags) == -1)
109 109 goto bail;
110 110 }
111 111
112 112 if (PyDict_SetItem(mfdict, file, node) == -1)
113 113 goto bail;
114 114
115 115 start = cur + 1;
116 116 zero = NULL;
117 117
118 118 Py_XDECREF(flags);
119 119 Py_XDECREF(node);
120 120 Py_XDECREF(file);
121 121 continue;
122 122 bail:
123 123 Py_XDECREF(flags);
124 124 Py_XDECREF(node);
125 125 Py_XDECREF(file);
126 126 goto quit;
127 127 }
128 128
129 129 if (len > 0 && *(cur - 1) != '\n') {
130 130 PyErr_SetString(PyExc_ValueError,
131 131 "manifest contains trailing garbage");
132 132 goto quit;
133 133 }
134 134
135 135 Py_INCREF(Py_None);
136 136 return Py_None;
137 137 quit:
138 138 return NULL;
139 139 }
140 140
141 141 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
142 142 {
143 143 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
144 144 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
145 145 char *str, *cur, *end, *cpos;
146 146 int state, mode, size, mtime;
147 147 unsigned int flen;
148 148 int len;
149 149
150 150 if (!PyArg_ParseTuple(args, "O!O!s#:parse_dirstate",
151 151 &PyDict_Type, &dmap,
152 152 &PyDict_Type, &cmap,
153 153 &str, &len))
154 154 goto quit;
155 155
156 156 /* read parents */
157 157 if (len < 40)
158 158 goto quit;
159 159
160 160 parents = Py_BuildValue("s#s#", str, 20, str + 20, 20);
161 161 if (!parents)
162 162 goto quit;
163 163
164 164 /* read filenames */
165 165 cur = str + 40;
166 166 end = str + len;
167 167
168 168 while (cur < end - 17) {
169 169 /* unpack header */
170 170 state = *cur;
171 171 mode = getbe32(cur + 1);
172 172 size = getbe32(cur + 5);
173 173 mtime = getbe32(cur + 9);
174 174 flen = getbe32(cur + 13);
175 175 cur += 17;
176 176 if (cur + flen > end || cur + flen < cur) {
177 177 PyErr_SetString(PyExc_ValueError, "overflow in dirstate");
178 178 goto quit;
179 179 }
180 180
181 181 entry = Py_BuildValue("ciii", state, mode, size, mtime);
182 182 if (!entry)
183 183 goto quit;
184 184 PyObject_GC_UnTrack(entry); /* don't waste time with this */
185 185
186 186 cpos = memchr(cur, 0, flen);
187 187 if (cpos) {
188 188 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
189 189 cname = PyBytes_FromStringAndSize(cpos + 1,
190 190 flen - (cpos - cur) - 1);
191 191 if (!fname || !cname ||
192 192 PyDict_SetItem(cmap, fname, cname) == -1 ||
193 193 PyDict_SetItem(dmap, fname, entry) == -1)
194 194 goto quit;
195 195 Py_DECREF(cname);
196 196 } else {
197 197 fname = PyBytes_FromStringAndSize(cur, flen);
198 198 if (!fname ||
199 199 PyDict_SetItem(dmap, fname, entry) == -1)
200 200 goto quit;
201 201 }
202 202 cur += flen;
203 203 Py_DECREF(fname);
204 204 Py_DECREF(entry);
205 205 fname = cname = entry = NULL;
206 206 }
207 207
208 208 ret = parents;
209 209 Py_INCREF(ret);
210 210 quit:
211 211 Py_XDECREF(fname);
212 212 Py_XDECREF(cname);
213 213 Py_XDECREF(entry);
214 214 Py_XDECREF(parents);
215 215 return ret;
216 216 }
217 217
218 218 static inline int getintat(PyObject *tuple, int off, uint32_t *v)
219 219 {
220 220 PyObject *o = PyTuple_GET_ITEM(tuple, off);
221 221 long val;
222 222
223 223 if (PyInt_Check(o))
224 224 val = PyInt_AS_LONG(o);
225 225 else if (PyLong_Check(o)) {
226 226 val = PyLong_AsLong(o);
227 227 if (val == -1 && PyErr_Occurred())
228 228 return -1;
229 229 } else {
230 230 PyErr_SetString(PyExc_TypeError, "expected an int or long");
231 231 return -1;
232 232 }
233 233 if (LONG_MAX > INT_MAX && (val > INT_MAX || val < INT_MIN)) {
234 234 PyErr_SetString(PyExc_OverflowError,
235 235 "Python value to large to convert to uint32_t");
236 236 return -1;
237 237 }
238 238 *v = (uint32_t)val;
239 239 return 0;
240 240 }
241 241
242 242 static PyObject *dirstate_unset;
243 243
244 244 /*
245 245 * Efficiently pack a dirstate object into its on-disk format.
246 246 */
247 247 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
248 248 {
249 249 PyObject *packobj = NULL;
250 250 PyObject *map, *copymap, *pl;
251 251 Py_ssize_t nbytes, pos, l;
252 252 PyObject *k, *v, *pn;
253 253 char *p, *s;
254 254 double now;
255 255
256 256 if (!PyArg_ParseTuple(args, "O!O!Od:pack_dirstate",
257 257 &PyDict_Type, &map, &PyDict_Type, &copymap,
258 258 &pl, &now))
259 259 return NULL;
260 260
261 261 if (!PySequence_Check(pl) || PySequence_Size(pl) != 2) {
262 262 PyErr_SetString(PyExc_TypeError, "expected 2-element sequence");
263 263 return NULL;
264 264 }
265 265
266 266 /* Figure out how much we need to allocate. */
267 267 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
268 268 PyObject *c;
269 269 if (!PyString_Check(k)) {
270 270 PyErr_SetString(PyExc_TypeError, "expected string key");
271 271 goto bail;
272 272 }
273 273 nbytes += PyString_GET_SIZE(k) + 17;
274 274 c = PyDict_GetItem(copymap, k);
275 275 if (c) {
276 276 if (!PyString_Check(c)) {
277 277 PyErr_SetString(PyExc_TypeError,
278 278 "expected string key");
279 279 goto bail;
280 280 }
281 281 nbytes += PyString_GET_SIZE(c) + 1;
282 282 }
283 283 }
284 284
285 285 packobj = PyString_FromStringAndSize(NULL, nbytes);
286 286 if (packobj == NULL)
287 287 goto bail;
288 288
289 289 p = PyString_AS_STRING(packobj);
290 290
291 291 pn = PySequence_ITEM(pl, 0);
292 292 if (PyString_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
293 293 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
294 294 goto bail;
295 295 }
296 296 memcpy(p, s, l);
297 297 p += 20;
298 298 pn = PySequence_ITEM(pl, 1);
299 299 if (PyString_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
300 300 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
301 301 goto bail;
302 302 }
303 303 memcpy(p, s, l);
304 304 p += 20;
305 305
306 306 for (pos = 0; PyDict_Next(map, &pos, &k, &v); ) {
307 307 uint32_t mode, size, mtime;
308 308 Py_ssize_t len, l;
309 309 PyObject *o;
310 310 char *s, *t;
311 311
312 312 if (!PyTuple_Check(v) || PyTuple_GET_SIZE(v) != 4) {
313 313 PyErr_SetString(PyExc_TypeError, "expected a 4-tuple");
314 314 goto bail;
315 315 }
316 316 o = PyTuple_GET_ITEM(v, 0);
317 317 if (PyString_AsStringAndSize(o, &s, &l) == -1 || l != 1) {
318 318 PyErr_SetString(PyExc_TypeError, "expected one byte");
319 319 goto bail;
320 320 }
321 321 *p++ = *s;
322 322 if (getintat(v, 1, &mode) == -1)
323 323 goto bail;
324 324 if (getintat(v, 2, &size) == -1)
325 325 goto bail;
326 326 if (getintat(v, 3, &mtime) == -1)
327 327 goto bail;
328 328 if (*s == 'n' && mtime == (uint32_t)now) {
329 329 /* See pure/parsers.py:pack_dirstate for why we do
330 330 * this. */
331 331 if (PyDict_SetItem(map, k, dirstate_unset) == -1)
332 332 goto bail;
333 333 mode = 0, size = -1, mtime = -1;
334 334 }
335 335 putbe32(mode, p);
336 336 putbe32(size, p + 4);
337 337 putbe32(mtime, p + 8);
338 338 t = p + 12;
339 339 p += 16;
340 340 len = PyString_GET_SIZE(k);
341 341 memcpy(p, PyString_AS_STRING(k), len);
342 342 p += len;
343 343 o = PyDict_GetItem(copymap, k);
344 344 if (o) {
345 345 *p++ = '\0';
346 346 l = PyString_GET_SIZE(o);
347 347 memcpy(p, PyString_AS_STRING(o), l);
348 348 p += l;
349 349 len += l + 1;
350 350 }
351 351 putbe32((uint32_t)len, t);
352 352 }
353 353
354 354 pos = p - PyString_AS_STRING(packobj);
355 355 if (pos != nbytes) {
356 356 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
357 357 (long)pos, (long)nbytes);
358 358 goto bail;
359 359 }
360 360
361 361 return packobj;
362 362 bail:
363 363 Py_XDECREF(packobj);
364 364 return NULL;
365 365 }
366 366
367 367 /*
368 368 * A base-16 trie for fast node->rev mapping.
369 369 *
370 370 * Positive value is index of the next node in the trie
371 371 * Negative value is a leaf: -(rev + 1)
372 372 * Zero is empty
373 373 */
374 374 typedef struct {
375 375 int children[16];
376 376 } nodetree;
377 377
378 378 /*
379 379 * This class has two behaviours.
380 380 *
381 381 * When used in a list-like way (with integer keys), we decode an
382 382 * entry in a RevlogNG index file on demand. Our last entry is a
383 383 * sentinel, always a nullid. We have limited support for
384 384 * integer-keyed insert and delete, only at elements right before the
385 385 * sentinel.
386 386 *
387 387 * With string keys, we lazily perform a reverse mapping from node to
388 388 * rev, using a base-16 trie.
389 389 */
390 390 typedef struct {
391 391 PyObject_HEAD
392 392 /* Type-specific fields go here. */
393 393 PyObject *data; /* raw bytes of index */
394 394 PyObject **cache; /* cached tuples */
395 395 const char **offsets; /* populated on demand */
396 396 Py_ssize_t raw_length; /* original number of elements */
397 397 Py_ssize_t length; /* current number of elements */
398 398 PyObject *added; /* populated on demand */
399 399 PyObject *headrevs; /* cache, invalidated on changes */
400 400 nodetree *nt; /* base-16 trie */
401 401 int ntlength; /* # nodes in use */
402 402 int ntcapacity; /* # nodes allocated */
403 403 int ntdepth; /* maximum depth of tree */
404 404 int ntsplits; /* # splits performed */
405 405 int ntrev; /* last rev scanned */
406 406 int ntlookups; /* # lookups */
407 407 int ntmisses; /* # lookups that miss the cache */
408 408 int inlined;
409 409 } indexObject;
410 410
411 411 static Py_ssize_t index_length(const indexObject *self)
412 412 {
413 413 if (self->added == NULL)
414 414 return self->length;
415 415 return self->length + PyList_GET_SIZE(self->added);
416 416 }
417 417
418 418 static PyObject *nullentry;
419 419 static const char nullid[20];
420 420
421 421 static long inline_scan(indexObject *self, const char **offsets);
422 422
423 423 #if LONG_MAX == 0x7fffffffL
424 424 static char *tuple_format = "Kiiiiiis#";
425 425 #else
426 426 static char *tuple_format = "kiiiiiis#";
427 427 #endif
428 428
429 429 /* A RevlogNG v1 index entry is 64 bytes long. */
430 430 static const long v1_hdrsize = 64;
431 431
432 432 /*
433 433 * Return a pointer to the beginning of a RevlogNG record.
434 434 */
435 435 static const char *index_deref(indexObject *self, Py_ssize_t pos)
436 436 {
437 437 if (self->inlined && pos > 0) {
438 438 if (self->offsets == NULL) {
439 439 self->offsets = malloc(self->raw_length *
440 440 sizeof(*self->offsets));
441 441 if (self->offsets == NULL)
442 442 return (const char *)PyErr_NoMemory();
443 443 inline_scan(self, self->offsets);
444 444 }
445 445 return self->offsets[pos];
446 446 }
447 447
448 448 return PyString_AS_STRING(self->data) + pos * v1_hdrsize;
449 449 }
450 450
451 451 /*
452 452 * RevlogNG format (all in big endian, data may be inlined):
453 453 * 6 bytes: offset
454 454 * 2 bytes: flags
455 455 * 4 bytes: compressed length
456 456 * 4 bytes: uncompressed length
457 457 * 4 bytes: base revision
458 458 * 4 bytes: link revision
459 459 * 4 bytes: parent 1 revision
460 460 * 4 bytes: parent 2 revision
461 461 * 32 bytes: nodeid (only 20 bytes used)
462 462 */
463 463 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
464 464 {
465 465 uint64_t offset_flags;
466 466 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
467 467 const char *c_node_id;
468 468 const char *data;
469 469 Py_ssize_t length = index_length(self);
470 470 PyObject *entry;
471 471
472 472 if (pos < 0)
473 473 pos += length;
474 474
475 475 if (pos < 0 || pos >= length) {
476 476 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
477 477 return NULL;
478 478 }
479 479
480 480 if (pos == length - 1) {
481 481 Py_INCREF(nullentry);
482 482 return nullentry;
483 483 }
484 484
485 485 if (pos >= self->length - 1) {
486 486 PyObject *obj;
487 487 obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
488 488 Py_INCREF(obj);
489 489 return obj;
490 490 }
491 491
492 492 if (self->cache) {
493 493 if (self->cache[pos]) {
494 494 Py_INCREF(self->cache[pos]);
495 495 return self->cache[pos];
496 496 }
497 497 } else {
498 498 self->cache = calloc(self->raw_length, sizeof(PyObject *));
499 499 if (self->cache == NULL)
500 500 return PyErr_NoMemory();
501 501 }
502 502
503 503 data = index_deref(self, pos);
504 504 if (data == NULL)
505 505 return NULL;
506 506
507 507 offset_flags = getbe32(data + 4);
508 508 if (pos == 0) /* mask out version number for the first entry */
509 509 offset_flags &= 0xFFFF;
510 510 else {
511 511 uint32_t offset_high = getbe32(data);
512 512 offset_flags |= ((uint64_t)offset_high) << 32;
513 513 }
514 514
515 515 comp_len = getbe32(data + 8);
516 516 uncomp_len = getbe32(data + 12);
517 517 base_rev = getbe32(data + 16);
518 518 link_rev = getbe32(data + 20);
519 519 parent_1 = getbe32(data + 24);
520 520 parent_2 = getbe32(data + 28);
521 521 c_node_id = data + 32;
522 522
523 523 entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
524 524 uncomp_len, base_rev, link_rev,
525 525 parent_1, parent_2, c_node_id, 20);
526 526
527 527 if (entry)
528 528 PyObject_GC_UnTrack(entry);
529 529
530 530 self->cache[pos] = entry;
531 531 Py_INCREF(entry);
532 532
533 533 return entry;
534 534 }
535 535
536 536 /*
537 537 * Return the 20-byte SHA of the node corresponding to the given rev.
538 538 */
539 539 static const char *index_node(indexObject *self, Py_ssize_t pos)
540 540 {
541 541 Py_ssize_t length = index_length(self);
542 542 const char *data;
543 543
544 544 if (pos == length - 1 || pos == INT_MAX)
545 545 return nullid;
546 546
547 547 if (pos >= length)
548 548 return NULL;
549 549
550 550 if (pos >= self->length - 1) {
551 551 PyObject *tuple, *str;
552 552 tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
553 553 str = PyTuple_GetItem(tuple, 7);
554 554 return str ? PyString_AS_STRING(str) : NULL;
555 555 }
556 556
557 557 data = index_deref(self, pos);
558 558 return data ? data + 32 : NULL;
559 559 }
560 560
561 561 static int nt_insert(indexObject *self, const char *node, int rev);
562 562
563 563 static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen)
564 564 {
565 565 if (PyString_AsStringAndSize(obj, node, nodelen) == -1)
566 566 return -1;
567 567 if (*nodelen == 20)
568 568 return 0;
569 569 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
570 570 return -1;
571 571 }
572 572
573 573 static PyObject *index_insert(indexObject *self, PyObject *args)
574 574 {
575 575 PyObject *obj;
576 576 char *node;
577 577 long offset;
578 578 Py_ssize_t len, nodelen;
579 579
580 580 if (!PyArg_ParseTuple(args, "lO", &offset, &obj))
581 581 return NULL;
582 582
583 583 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
584 584 PyErr_SetString(PyExc_TypeError, "8-tuple required");
585 585 return NULL;
586 586 }
587 587
588 588 if (node_check(PyTuple_GET_ITEM(obj, 7), &node, &nodelen) == -1)
589 589 return NULL;
590 590
591 591 len = index_length(self);
592 592
593 593 if (offset < 0)
594 594 offset += len;
595 595
596 596 if (offset != len - 1) {
597 597 PyErr_SetString(PyExc_IndexError,
598 598 "insert only supported at index -1");
599 599 return NULL;
600 600 }
601 601
602 602 if (offset > INT_MAX) {
603 603 PyErr_SetString(PyExc_ValueError,
604 604 "currently only 2**31 revs supported");
605 605 return NULL;
606 606 }
607 607
608 608 if (self->added == NULL) {
609 609 self->added = PyList_New(0);
610 610 if (self->added == NULL)
611 611 return NULL;
612 612 }
613 613
614 614 if (PyList_Append(self->added, obj) == -1)
615 615 return NULL;
616 616
617 617 if (self->nt)
618 618 nt_insert(self, node, (int)offset);
619 619
620 620 Py_CLEAR(self->headrevs);
621 621 Py_RETURN_NONE;
622 622 }
623 623
624 624 static void _index_clearcaches(indexObject *self)
625 625 {
626 626 if (self->cache) {
627 627 Py_ssize_t i;
628 628
629 629 for (i = 0; i < self->raw_length; i++)
630 630 Py_CLEAR(self->cache[i]);
631 631 free(self->cache);
632 632 self->cache = NULL;
633 633 }
634 634 if (self->offsets) {
635 635 free(self->offsets);
636 636 self->offsets = NULL;
637 637 }
638 638 if (self->nt) {
639 639 free(self->nt);
640 640 self->nt = NULL;
641 641 }
642 642 Py_CLEAR(self->headrevs);
643 643 }
644 644
645 645 static PyObject *index_clearcaches(indexObject *self)
646 646 {
647 647 _index_clearcaches(self);
648 648 self->ntlength = self->ntcapacity = 0;
649 649 self->ntdepth = self->ntsplits = 0;
650 650 self->ntrev = -1;
651 651 self->ntlookups = self->ntmisses = 0;
652 652 Py_RETURN_NONE;
653 653 }
654 654
655 655 static PyObject *index_stats(indexObject *self)
656 656 {
657 657 PyObject *obj = PyDict_New();
658 658
659 659 if (obj == NULL)
660 660 return NULL;
661 661
662 662 #define istat(__n, __d) \
663 663 if (PyDict_SetItemString(obj, __d, PyInt_FromSsize_t(self->__n)) == -1) \
664 664 goto bail;
665 665
666 666 if (self->added) {
667 667 Py_ssize_t len = PyList_GET_SIZE(self->added);
668 668 if (PyDict_SetItemString(obj, "index entries added",
669 669 PyInt_FromSsize_t(len)) == -1)
670 670 goto bail;
671 671 }
672 672
673 673 if (self->raw_length != self->length - 1)
674 674 istat(raw_length, "revs on disk");
675 675 istat(length, "revs in memory");
676 676 istat(ntcapacity, "node trie capacity");
677 677 istat(ntdepth, "node trie depth");
678 678 istat(ntlength, "node trie count");
679 679 istat(ntlookups, "node trie lookups");
680 680 istat(ntmisses, "node trie misses");
681 681 istat(ntrev, "node trie last rev scanned");
682 682 istat(ntsplits, "node trie splits");
683 683
684 684 #undef istat
685 685
686 686 return obj;
687 687
688 688 bail:
689 689 Py_XDECREF(obj);
690 690 return NULL;
691 691 }
692 692
693 693 /*
694 694 * When we cache a list, we want to be sure the caller can't mutate
695 695 * the cached copy.
696 696 */
697 697 static PyObject *list_copy(PyObject *list)
698 698 {
699 699 Py_ssize_t len = PyList_GET_SIZE(list);
700 700 PyObject *newlist = PyList_New(len);
701 701 Py_ssize_t i;
702 702
703 703 if (newlist == NULL)
704 704 return NULL;
705 705
706 706 for (i = 0; i < len; i++) {
707 707 PyObject *obj = PyList_GET_ITEM(list, i);
708 708 Py_INCREF(obj);
709 709 PyList_SET_ITEM(newlist, i, obj);
710 710 }
711 711
712 712 return newlist;
713 713 }
714 714
715 715 static PyObject *index_headrevs(indexObject *self)
716 716 {
717 717 Py_ssize_t i, len, addlen;
718 718 char *nothead = NULL;
719 719 PyObject *heads;
720 720
721 721 if (self->headrevs)
722 722 return list_copy(self->headrevs);
723 723
724 724 len = index_length(self) - 1;
725 725 heads = PyList_New(0);
726 726 if (heads == NULL)
727 727 goto bail;
728 728 if (len == 0) {
729 729 PyObject *nullid = PyInt_FromLong(-1);
730 730 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
731 731 Py_XDECREF(nullid);
732 732 goto bail;
733 733 }
734 734 goto done;
735 735 }
736 736
737 737 nothead = calloc(len, 1);
738 738 if (nothead == NULL)
739 739 goto bail;
740 740
741 741 for (i = 0; i < self->raw_length; i++) {
742 742 const char *data = index_deref(self, i);
743 743 int parent_1 = getbe32(data + 24);
744 744 int parent_2 = getbe32(data + 28);
745 745 if (parent_1 >= 0)
746 746 nothead[parent_1] = 1;
747 747 if (parent_2 >= 0)
748 748 nothead[parent_2] = 1;
749 749 }
750 750
751 751 addlen = self->added ? PyList_GET_SIZE(self->added) : 0;
752 752
753 753 for (i = 0; i < addlen; i++) {
754 754 PyObject *rev = PyList_GET_ITEM(self->added, i);
755 755 PyObject *p1 = PyTuple_GET_ITEM(rev, 5);
756 756 PyObject *p2 = PyTuple_GET_ITEM(rev, 6);
757 757 long parent_1, parent_2;
758 758
759 759 if (!PyInt_Check(p1) || !PyInt_Check(p2)) {
760 760 PyErr_SetString(PyExc_TypeError,
761 761 "revlog parents are invalid");
762 762 goto bail;
763 763 }
764 764 parent_1 = PyInt_AS_LONG(p1);
765 765 parent_2 = PyInt_AS_LONG(p2);
766 766 if (parent_1 >= 0)
767 767 nothead[parent_1] = 1;
768 768 if (parent_2 >= 0)
769 769 nothead[parent_2] = 1;
770 770 }
771 771
772 772 for (i = 0; i < len; i++) {
773 773 PyObject *head;
774 774
775 775 if (nothead[i])
776 776 continue;
777 777 head = PyInt_FromLong(i);
778 778 if (head == NULL || PyList_Append(heads, head) == -1) {
779 779 Py_XDECREF(head);
780 780 goto bail;
781 781 }
782 782 }
783 783
784 784 done:
785 785 self->headrevs = heads;
786 786 free(nothead);
787 787 return list_copy(self->headrevs);
788 788 bail:
789 789 Py_XDECREF(heads);
790 790 free(nothead);
791 791 return NULL;
792 792 }
793 793
794 794 static inline int nt_level(const char *node, Py_ssize_t level)
795 795 {
796 796 int v = node[level>>1];
797 797 if (!(level & 1))
798 798 v >>= 4;
799 799 return v & 0xf;
800 800 }
801 801
802 802 /*
803 803 * Return values:
804 804 *
805 805 * -4: match is ambiguous (multiple candidates)
806 806 * -2: not found
807 807 * rest: valid rev
808 808 */
809 809 static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
810 810 int hex)
811 811 {
812 812 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
813 813 int level, maxlevel, off;
814 814
815 815 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
816 816 return -1;
817 817
818 818 if (self->nt == NULL)
819 819 return -2;
820 820
821 821 if (hex)
822 822 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
823 823 else
824 824 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
825 825
826 826 for (level = off = 0; level < maxlevel; level++) {
827 827 int k = getnybble(node, level);
828 828 nodetree *n = &self->nt[off];
829 829 int v = n->children[k];
830 830
831 831 if (v < 0) {
832 832 const char *n;
833 833 Py_ssize_t i;
834 834
835 835 v = -v - 1;
836 836 n = index_node(self, v);
837 837 if (n == NULL)
838 838 return -2;
839 839 for (i = level; i < maxlevel; i++)
840 840 if (getnybble(node, i) != nt_level(n, i))
841 841 return -2;
842 842 return v;
843 843 }
844 844 if (v == 0)
845 845 return -2;
846 846 off = v;
847 847 }
848 848 /* multiple matches against an ambiguous prefix */
849 849 return -4;
850 850 }
851 851
852 852 static int nt_new(indexObject *self)
853 853 {
854 854 if (self->ntlength == self->ntcapacity) {
855 855 self->ntcapacity *= 2;
856 856 self->nt = realloc(self->nt,
857 857 self->ntcapacity * sizeof(nodetree));
858 858 if (self->nt == NULL) {
859 859 PyErr_SetString(PyExc_MemoryError, "out of memory");
860 860 return -1;
861 861 }
862 862 memset(&self->nt[self->ntlength], 0,
863 863 sizeof(nodetree) * (self->ntcapacity - self->ntlength));
864 864 }
865 865 return self->ntlength++;
866 866 }
867 867
868 868 static int nt_insert(indexObject *self, const char *node, int rev)
869 869 {
870 870 int level = 0;
871 871 int off = 0;
872 872
873 873 while (level < 40) {
874 874 int k = nt_level(node, level);
875 875 nodetree *n;
876 876 int v;
877 877
878 878 n = &self->nt[off];
879 879 v = n->children[k];
880 880
881 881 if (v == 0) {
882 882 n->children[k] = -rev - 1;
883 883 return 0;
884 884 }
885 885 if (v < 0) {
886 886 const char *oldnode = index_node(self, -v - 1);
887 887 int noff;
888 888
889 889 if (!oldnode || !memcmp(oldnode, node, 20)) {
890 890 n->children[k] = -rev - 1;
891 891 return 0;
892 892 }
893 893 noff = nt_new(self);
894 894 if (noff == -1)
895 895 return -1;
896 896 /* self->nt may have been changed by realloc */
897 897 self->nt[off].children[k] = noff;
898 898 off = noff;
899 899 n = &self->nt[off];
900 900 n->children[nt_level(oldnode, ++level)] = v;
901 901 if (level > self->ntdepth)
902 902 self->ntdepth = level;
903 903 self->ntsplits += 1;
904 904 } else {
905 905 level += 1;
906 906 off = v;
907 907 }
908 908 }
909 909
910 910 return -1;
911 911 }
912 912
913 913 static int nt_init(indexObject *self)
914 914 {
915 915 if (self->nt == NULL) {
916 916 self->ntcapacity = self->raw_length < 4
917 917 ? 4 : self->raw_length / 2;
918 918 self->nt = calloc(self->ntcapacity, sizeof(nodetree));
919 919 if (self->nt == NULL) {
920 920 PyErr_NoMemory();
921 921 return -1;
922 922 }
923 923 self->ntlength = 1;
924 924 self->ntrev = (int)index_length(self) - 1;
925 925 self->ntlookups = 1;
926 926 self->ntmisses = 0;
927 927 if (nt_insert(self, nullid, INT_MAX) == -1)
928 928 return -1;
929 929 }
930 930 return 0;
931 931 }
932 932
933 933 /*
934 934 * Return values:
935 935 *
936 936 * -3: error (exception set)
937 937 * -2: not found (no exception set)
938 938 * rest: valid rev
939 939 */
940 940 static int index_find_node(indexObject *self,
941 941 const char *node, Py_ssize_t nodelen)
942 942 {
943 943 int rev;
944 944
945 945 self->ntlookups++;
946 946 rev = nt_find(self, node, nodelen, 0);
947 947 if (rev >= -1)
948 948 return rev;
949 949
950 950 if (nt_init(self) == -1)
951 951 return -3;
952 952
953 953 /*
954 954 * For the first handful of lookups, we scan the entire index,
955 955 * and cache only the matching nodes. This optimizes for cases
956 956 * like "hg tip", where only a few nodes are accessed.
957 957 *
958 958 * After that, we cache every node we visit, using a single
959 959 * scan amortized over multiple lookups. This gives the best
960 960 * bulk performance, e.g. for "hg log".
961 961 */
962 962 if (self->ntmisses++ < 4) {
963 963 for (rev = self->ntrev - 1; rev >= 0; rev--) {
964 964 const char *n = index_node(self, rev);
965 965 if (n == NULL)
966 966 return -2;
967 967 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
968 968 if (nt_insert(self, n, rev) == -1)
969 969 return -3;
970 970 break;
971 971 }
972 972 }
973 973 } else {
974 974 for (rev = self->ntrev - 1; rev >= 0; rev--) {
975 975 const char *n = index_node(self, rev);
976 976 if (n == NULL) {
977 977 self->ntrev = rev + 1;
978 978 return -2;
979 979 }
980 980 if (nt_insert(self, n, rev) == -1) {
981 981 self->ntrev = rev + 1;
982 982 return -3;
983 983 }
984 984 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
985 985 break;
986 986 }
987 987 }
988 988 self->ntrev = rev;
989 989 }
990 990
991 991 if (rev >= 0)
992 992 return rev;
993 993 return -2;
994 994 }
995 995
996 996 static PyObject *raise_revlog_error(void)
997 997 {
998 998 static PyObject *errclass;
999 999 PyObject *mod = NULL, *errobj;
1000 1000
1001 1001 if (errclass == NULL) {
1002 1002 PyObject *dict;
1003 1003
1004 1004 mod = PyImport_ImportModule("mercurial.error");
1005 1005 if (mod == NULL)
1006 1006 goto classfail;
1007 1007
1008 1008 dict = PyModule_GetDict(mod);
1009 1009 if (dict == NULL)
1010 1010 goto classfail;
1011 1011
1012 1012 errclass = PyDict_GetItemString(dict, "RevlogError");
1013 1013 if (errclass == NULL) {
1014 1014 PyErr_SetString(PyExc_SystemError,
1015 1015 "could not find RevlogError");
1016 1016 goto classfail;
1017 1017 }
1018 1018 Py_INCREF(errclass);
1019 1019 }
1020 1020
1021 1021 errobj = PyObject_CallFunction(errclass, NULL);
1022 1022 if (errobj == NULL)
1023 1023 return NULL;
1024 1024 PyErr_SetObject(errclass, errobj);
1025 1025 return errobj;
1026 1026
1027 1027 classfail:
1028 1028 Py_XDECREF(mod);
1029 1029 return NULL;
1030 1030 }
1031 1031
1032 1032 static PyObject *index_getitem(indexObject *self, PyObject *value)
1033 1033 {
1034 1034 char *node;
1035 1035 Py_ssize_t nodelen;
1036 1036 int rev;
1037 1037
1038 1038 if (PyInt_Check(value))
1039 1039 return index_get(self, PyInt_AS_LONG(value));
1040 1040
1041 1041 if (node_check(value, &node, &nodelen) == -1)
1042 1042 return NULL;
1043 1043 rev = index_find_node(self, node, nodelen);
1044 1044 if (rev >= -1)
1045 1045 return PyInt_FromLong(rev);
1046 1046 if (rev == -2)
1047 1047 raise_revlog_error();
1048 1048 return NULL;
1049 1049 }
1050 1050
1051 1051 static int nt_partialmatch(indexObject *self, const char *node,
1052 1052 Py_ssize_t nodelen)
1053 1053 {
1054 1054 int rev;
1055 1055
1056 1056 if (nt_init(self) == -1)
1057 1057 return -3;
1058 1058
1059 1059 if (self->ntrev > 0) {
1060 1060 /* ensure that the radix tree is fully populated */
1061 1061 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1062 1062 const char *n = index_node(self, rev);
1063 1063 if (n == NULL)
1064 1064 return -2;
1065 1065 if (nt_insert(self, n, rev) == -1)
1066 1066 return -3;
1067 1067 }
1068 1068 self->ntrev = rev;
1069 1069 }
1070 1070
1071 1071 return nt_find(self, node, nodelen, 1);
1072 1072 }
1073 1073
1074 1074 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1075 1075 {
1076 1076 const char *fullnode;
1077 1077 int nodelen;
1078 1078 char *node;
1079 1079 int rev, i;
1080 1080
1081 1081 if (!PyArg_ParseTuple(args, "s#", &node, &nodelen))
1082 1082 return NULL;
1083 1083
1084 1084 if (nodelen < 4) {
1085 1085 PyErr_SetString(PyExc_ValueError, "key too short");
1086 1086 return NULL;
1087 1087 }
1088 1088
1089 1089 if (nodelen > 40) {
1090 1090 PyErr_SetString(PyExc_ValueError, "key too long");
1091 1091 return NULL;
1092 1092 }
1093 1093
1094 1094 for (i = 0; i < nodelen; i++)
1095 1095 hexdigit(node, i);
1096 1096 if (PyErr_Occurred()) {
1097 1097 /* input contains non-hex characters */
1098 1098 PyErr_Clear();
1099 1099 Py_RETURN_NONE;
1100 1100 }
1101 1101
1102 1102 rev = nt_partialmatch(self, node, nodelen);
1103 1103
1104 1104 switch (rev) {
1105 1105 case -4:
1106 1106 raise_revlog_error();
1107 1107 case -3:
1108 1108 return NULL;
1109 1109 case -2:
1110 1110 Py_RETURN_NONE;
1111 1111 case -1:
1112 1112 return PyString_FromStringAndSize(nullid, 20);
1113 1113 }
1114 1114
1115 1115 fullnode = index_node(self, rev);
1116 1116 if (fullnode == NULL) {
1117 1117 PyErr_Format(PyExc_IndexError,
1118 1118 "could not access rev %d", rev);
1119 1119 return NULL;
1120 1120 }
1121 1121 return PyString_FromStringAndSize(fullnode, 20);
1122 1122 }
1123 1123
1124 1124 static PyObject *index_m_get(indexObject *self, PyObject *args)
1125 1125 {
1126 1126 Py_ssize_t nodelen;
1127 1127 PyObject *val;
1128 1128 char *node;
1129 1129 int rev;
1130 1130
1131 1131 if (!PyArg_ParseTuple(args, "O", &val))
1132 1132 return NULL;
1133 1133 if (node_check(val, &node, &nodelen) == -1)
1134 1134 return NULL;
1135 1135 rev = index_find_node(self, node, nodelen);
1136 1136 if (rev == -3)
1137 1137 return NULL;
1138 1138 if (rev == -2)
1139 1139 Py_RETURN_NONE;
1140 1140 return PyInt_FromLong(rev);
1141 1141 }
1142 1142
1143 1143 static int index_contains(indexObject *self, PyObject *value)
1144 1144 {
1145 1145 char *node;
1146 1146 Py_ssize_t nodelen;
1147 1147
1148 1148 if (PyInt_Check(value)) {
1149 1149 long rev = PyInt_AS_LONG(value);
1150 1150 return rev >= -1 && rev < index_length(self);
1151 1151 }
1152 1152
1153 1153 if (node_check(value, &node, &nodelen) == -1)
1154 1154 return -1;
1155 1155
1156 1156 switch (index_find_node(self, node, nodelen)) {
1157 1157 case -3:
1158 1158 return -1;
1159 1159 case -2:
1160 1160 return 0;
1161 1161 default:
1162 1162 return 1;
1163 1163 }
1164 1164 }
1165 1165
1166 1166 /*
1167 1167 * Invalidate any trie entries introduced by added revs.
1168 1168 */
1169 1169 static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
1170 1170 {
1171 1171 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1172 1172
1173 1173 for (i = start; i < len; i++) {
1174 1174 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1175 1175 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1176 1176
1177 1177 nt_insert(self, PyString_AS_STRING(node), -1);
1178 1178 }
1179 1179
1180 1180 if (start == 0)
1181 1181 Py_CLEAR(self->added);
1182 1182 }
1183 1183
1184 1184 /*
1185 1185 * Delete a numeric range of revs, which must be at the end of the
1186 1186 * range, but exclude the sentinel nullid entry.
1187 1187 */
1188 1188 static int index_slice_del(indexObject *self, PyObject *item)
1189 1189 {
1190 1190 Py_ssize_t start, stop, step, slicelength;
1191 1191 Py_ssize_t length = index_length(self);
1192 1192 int ret = 0;
1193 1193
1194 1194 if (PySlice_GetIndicesEx((PySliceObject*)item, length,
1195 1195 &start, &stop, &step, &slicelength) < 0)
1196 1196 return -1;
1197 1197
1198 1198 if (slicelength <= 0)
1199 1199 return 0;
1200 1200
1201 1201 if ((step < 0 && start < stop) || (step > 0 && start > stop))
1202 1202 stop = start;
1203 1203
1204 1204 if (step < 0) {
1205 1205 stop = start + 1;
1206 1206 start = stop + step*(slicelength - 1) - 1;
1207 1207 step = -step;
1208 1208 }
1209 1209
1210 1210 if (step != 1) {
1211 1211 PyErr_SetString(PyExc_ValueError,
1212 1212 "revlog index delete requires step size of 1");
1213 1213 return -1;
1214 1214 }
1215 1215
1216 1216 if (stop != length - 1) {
1217 1217 PyErr_SetString(PyExc_IndexError,
1218 1218 "revlog index deletion indices are invalid");
1219 1219 return -1;
1220 1220 }
1221 1221
1222 1222 if (start < self->length - 1) {
1223 1223 if (self->nt) {
1224 1224 Py_ssize_t i;
1225 1225
1226 1226 for (i = start + 1; i < self->length - 1; i++) {
1227 1227 const char *node = index_node(self, i);
1228 1228
1229 1229 if (node)
1230 1230 nt_insert(self, node, -1);
1231 1231 }
1232 1232 if (self->added)
1233 1233 nt_invalidate_added(self, 0);
1234 1234 if (self->ntrev > start)
1235 1235 self->ntrev = (int)start;
1236 1236 }
1237 1237 self->length = start + 1;
1238 1238 if (start < self->raw_length) {
1239 1239 if (self->cache) {
1240 1240 Py_ssize_t i;
1241 1241 for (i = start; i < self->raw_length; i++)
1242 1242 Py_CLEAR(self->cache[i]);
1243 1243 }
1244 1244 self->raw_length = start;
1245 1245 }
1246 1246 goto done;
1247 1247 }
1248 1248
1249 1249 if (self->nt) {
1250 1250 nt_invalidate_added(self, start - self->length + 1);
1251 1251 if (self->ntrev > start)
1252 1252 self->ntrev = (int)start;
1253 1253 }
1254 1254 if (self->added)
1255 1255 ret = PyList_SetSlice(self->added, start - self->length + 1,
1256 1256 PyList_GET_SIZE(self->added), NULL);
1257 1257 done:
1258 1258 Py_CLEAR(self->headrevs);
1259 1259 return ret;
1260 1260 }
1261 1261
1262 1262 /*
1263 1263 * Supported ops:
1264 1264 *
1265 1265 * slice deletion
1266 1266 * string assignment (extend node->rev mapping)
1267 1267 * string deletion (shrink node->rev mapping)
1268 1268 */
1269 1269 static int index_assign_subscript(indexObject *self, PyObject *item,
1270 1270 PyObject *value)
1271 1271 {
1272 1272 char *node;
1273 1273 Py_ssize_t nodelen;
1274 1274 long rev;
1275 1275
1276 1276 if (PySlice_Check(item) && value == NULL)
1277 1277 return index_slice_del(self, item);
1278 1278
1279 1279 if (node_check(item, &node, &nodelen) == -1)
1280 1280 return -1;
1281 1281
1282 1282 if (value == NULL)
1283 1283 return self->nt ? nt_insert(self, node, -1) : 0;
1284 1284 rev = PyInt_AsLong(value);
1285 1285 if (rev > INT_MAX || rev < 0) {
1286 1286 if (!PyErr_Occurred())
1287 1287 PyErr_SetString(PyExc_ValueError, "rev out of range");
1288 1288 return -1;
1289 1289 }
1290 1290 return nt_insert(self, node, (int)rev);
1291 1291 }
1292 1292
1293 1293 /*
1294 1294 * Find all RevlogNG entries in an index that has inline data. Update
1295 1295 * the optional "offsets" table with those entries.
1296 1296 */
1297 1297 static long inline_scan(indexObject *self, const char **offsets)
1298 1298 {
1299 1299 const char *data = PyString_AS_STRING(self->data);
1300 1300 const char *end = data + PyString_GET_SIZE(self->data);
1301 1301 long incr = v1_hdrsize;
1302 1302 Py_ssize_t len = 0;
1303 1303
1304 1304 while (data + v1_hdrsize <= end) {
1305 1305 uint32_t comp_len;
1306 1306 const char *old_data;
1307 1307 /* 3rd element of header is length of compressed inline data */
1308 1308 comp_len = getbe32(data + 8);
1309 1309 incr = v1_hdrsize + comp_len;
1310 1310 if (incr < v1_hdrsize)
1311 1311 break;
1312 1312 if (offsets)
1313 1313 offsets[len] = data;
1314 1314 len++;
1315 1315 old_data = data;
1316 1316 data += incr;
1317 1317 if (data <= old_data)
1318 1318 break;
1319 1319 }
1320 1320
1321 1321 if (data != end && data + v1_hdrsize != end) {
1322 1322 if (!PyErr_Occurred())
1323 1323 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1324 1324 return -1;
1325 1325 }
1326 1326
1327 1327 return len;
1328 1328 }
1329 1329
1330 1330 static int index_init(indexObject *self, PyObject *args)
1331 1331 {
1332 1332 PyObject *data_obj, *inlined_obj;
1333 1333 Py_ssize_t size;
1334 1334
1335 1335 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
1336 1336 return -1;
1337 1337 if (!PyString_Check(data_obj)) {
1338 1338 PyErr_SetString(PyExc_TypeError, "data is not a string");
1339 1339 return -1;
1340 1340 }
1341 1341 size = PyString_GET_SIZE(data_obj);
1342 1342
1343 1343 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
1344 1344 self->data = data_obj;
1345 1345 self->cache = NULL;
1346 1346
1347 1347 self->added = NULL;
1348 1348 self->headrevs = NULL;
1349 1349 self->offsets = NULL;
1350 1350 self->nt = NULL;
1351 1351 self->ntlength = self->ntcapacity = 0;
1352 1352 self->ntdepth = self->ntsplits = 0;
1353 1353 self->ntlookups = self->ntmisses = 0;
1354 1354 self->ntrev = -1;
1355 1355 Py_INCREF(self->data);
1356 1356
1357 1357 if (self->inlined) {
1358 1358 long len = inline_scan(self, NULL);
1359 1359 if (len == -1)
1360 1360 goto bail;
1361 1361 self->raw_length = len;
1362 1362 self->length = len + 1;
1363 1363 } else {
1364 1364 if (size % v1_hdrsize) {
1365 1365 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1366 1366 goto bail;
1367 1367 }
1368 1368 self->raw_length = size / v1_hdrsize;
1369 1369 self->length = self->raw_length + 1;
1370 1370 }
1371 1371
1372 1372 return 0;
1373 1373 bail:
1374 1374 return -1;
1375 1375 }
1376 1376
1377 1377 static PyObject *index_nodemap(indexObject *self)
1378 1378 {
1379 1379 Py_INCREF(self);
1380 1380 return (PyObject *)self;
1381 1381 }
1382 1382
1383 1383 static void index_dealloc(indexObject *self)
1384 1384 {
1385 1385 _index_clearcaches(self);
1386 1386 Py_DECREF(self->data);
1387 1387 Py_XDECREF(self->added);
1388 1388 PyObject_Del(self);
1389 1389 }
1390 1390
1391 1391 static PySequenceMethods index_sequence_methods = {
1392 1392 (lenfunc)index_length, /* sq_length */
1393 1393 0, /* sq_concat */
1394 1394 0, /* sq_repeat */
1395 1395 (ssizeargfunc)index_get, /* sq_item */
1396 1396 0, /* sq_slice */
1397 1397 0, /* sq_ass_item */
1398 1398 0, /* sq_ass_slice */
1399 1399 (objobjproc)index_contains, /* sq_contains */
1400 1400 };
1401 1401
1402 1402 static PyMappingMethods index_mapping_methods = {
1403 1403 (lenfunc)index_length, /* mp_length */
1404 1404 (binaryfunc)index_getitem, /* mp_subscript */
1405 1405 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
1406 1406 };
1407 1407
1408 1408 static PyMethodDef index_methods[] = {
1409 1409 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
1410 1410 "clear the index caches"},
1411 1411 {"get", (PyCFunction)index_m_get, METH_VARARGS,
1412 1412 "get an index entry"},
1413 1413 {"headrevs", (PyCFunction)index_headrevs, METH_NOARGS,
1414 1414 "get head revisions"},
1415 1415 {"insert", (PyCFunction)index_insert, METH_VARARGS,
1416 1416 "insert an index entry"},
1417 1417 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
1418 1418 "match a potentially ambiguous node ID"},
1419 1419 {"stats", (PyCFunction)index_stats, METH_NOARGS,
1420 1420 "stats for the index"},
1421 1421 {NULL} /* Sentinel */
1422 1422 };
1423 1423
1424 1424 static PyGetSetDef index_getset[] = {
1425 1425 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
1426 1426 {NULL} /* Sentinel */
1427 1427 };
1428 1428
1429 1429 static PyTypeObject indexType = {
1430 1430 PyObject_HEAD_INIT(NULL)
1431 1431 0, /* ob_size */
1432 1432 "parsers.index", /* tp_name */
1433 1433 sizeof(indexObject), /* tp_basicsize */
1434 1434 0, /* tp_itemsize */
1435 1435 (destructor)index_dealloc, /* tp_dealloc */
1436 1436 0, /* tp_print */
1437 1437 0, /* tp_getattr */
1438 1438 0, /* tp_setattr */
1439 1439 0, /* tp_compare */
1440 1440 0, /* tp_repr */
1441 1441 0, /* tp_as_number */
1442 1442 &index_sequence_methods, /* tp_as_sequence */
1443 1443 &index_mapping_methods, /* tp_as_mapping */
1444 1444 0, /* tp_hash */
1445 1445 0, /* tp_call */
1446 1446 0, /* tp_str */
1447 1447 0, /* tp_getattro */
1448 1448 0, /* tp_setattro */
1449 1449 0, /* tp_as_buffer */
1450 1450 Py_TPFLAGS_DEFAULT, /* tp_flags */
1451 1451 "revlog index", /* tp_doc */
1452 1452 0, /* tp_traverse */
1453 1453 0, /* tp_clear */
1454 1454 0, /* tp_richcompare */
1455 1455 0, /* tp_weaklistoffset */
1456 1456 0, /* tp_iter */
1457 1457 0, /* tp_iternext */
1458 1458 index_methods, /* tp_methods */
1459 1459 0, /* tp_members */
1460 1460 index_getset, /* tp_getset */
1461 1461 0, /* tp_base */
1462 1462 0, /* tp_dict */
1463 1463 0, /* tp_descr_get */
1464 1464 0, /* tp_descr_set */
1465 1465 0, /* tp_dictoffset */
1466 1466 (initproc)index_init, /* tp_init */
1467 1467 0, /* tp_alloc */
1468 1468 };
1469 1469
1470 1470 /*
1471 1471 * returns a tuple of the form (index, index, cache) with elements as
1472 1472 * follows:
1473 1473 *
1474 1474 * index: an index object that lazily parses RevlogNG records
1475 1475 * cache: if data is inlined, a tuple (index_file_content, 0), else None
1476 1476 *
1477 1477 * added complications are for backwards compatibility
1478 1478 */
1479 1479 static PyObject *parse_index2(PyObject *self, PyObject *args)
1480 1480 {
1481 1481 PyObject *tuple = NULL, *cache = NULL;
1482 1482 indexObject *idx;
1483 1483 int ret;
1484 1484
1485 1485 idx = PyObject_New(indexObject, &indexType);
1486 1486 if (idx == NULL)
1487 1487 goto bail;
1488 1488
1489 1489 ret = index_init(idx, args);
1490 1490 if (ret == -1)
1491 1491 goto bail;
1492 1492
1493 1493 if (idx->inlined) {
1494 1494 cache = Py_BuildValue("iO", 0, idx->data);
1495 1495 if (cache == NULL)
1496 1496 goto bail;
1497 1497 } else {
1498 1498 cache = Py_None;
1499 1499 Py_INCREF(cache);
1500 1500 }
1501 1501
1502 1502 tuple = Py_BuildValue("NN", idx, cache);
1503 1503 if (!tuple)
1504 1504 goto bail;
1505 1505 return tuple;
1506 1506
1507 1507 bail:
1508 1508 Py_XDECREF(idx);
1509 1509 Py_XDECREF(cache);
1510 1510 Py_XDECREF(tuple);
1511 1511 return NULL;
1512 1512 }
1513 1513
1514 1514 static char parsers_doc[] = "Efficient content parsing.";
1515 1515
1516 1516 PyObject *encodedir(PyObject *self, PyObject *args);
1517 1517 PyObject *pathencode(PyObject *self, PyObject *args);
1518 1518 PyObject *lowerencode(PyObject *self, PyObject *args);
1519 1519
1520 1520 static PyMethodDef methods[] = {
1521 1521 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1522 1522 {"parse_manifest", parse_manifest, METH_VARARGS, "parse a manifest\n"},
1523 1523 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1524 1524 {"parse_index2", parse_index2, METH_VARARGS, "parse a revlog index\n"},
1525 1525 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1526 1526 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1527 1527 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1528 1528 {NULL, NULL}
1529 1529 };
1530 1530
1531 void dirs_module_init(PyObject *mod);
1532
1531 1533 static void module_init(PyObject *mod)
1532 1534 {
1535 dirs_module_init(mod);
1536
1533 1537 indexType.tp_new = PyType_GenericNew;
1534 1538 if (PyType_Ready(&indexType) < 0)
1535 1539 return;
1536 1540 Py_INCREF(&indexType);
1537 1541
1538 1542 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
1539 1543
1540 1544 nullentry = Py_BuildValue("iiiiiiis#", 0, 0, 0,
1541 1545 -1, -1, -1, -1, nullid, 20);
1542 1546 if (nullentry)
1543 1547 PyObject_GC_UnTrack(nullentry);
1544 1548
1545 1549 dirstate_unset = Py_BuildValue("ciii", 'n', 0, -1, -1);
1546 1550 }
1547 1551
1548 1552 #ifdef IS_PY3K
1549 1553 static struct PyModuleDef parsers_module = {
1550 1554 PyModuleDef_HEAD_INIT,
1551 1555 "parsers",
1552 1556 parsers_doc,
1553 1557 -1,
1554 1558 methods
1555 1559 };
1556 1560
1557 1561 PyMODINIT_FUNC PyInit_parsers(void)
1558 1562 {
1559 1563 PyObject *mod = PyModule_Create(&parsers_module);
1560 1564 module_init(mod);
1561 1565 return mod;
1562 1566 }
1563 1567 #else
1564 1568 PyMODINIT_FUNC initparsers(void)
1565 1569 {
1566 1570 PyObject *mod = Py_InitModule3("parsers", methods, parsers_doc);
1567 1571 module_init(mod);
1568 1572 }
1569 1573 #endif
@@ -1,934 +1,937
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import match as matchmod
12 12 import os, errno, re, stat, glob
13 13
14 14 if os.name == 'nt':
15 15 import scmwindows as scmplatform
16 16 else:
17 17 import scmposix as scmplatform
18 18
19 19 systemrcpath = scmplatform.systemrcpath
20 20 userrcpath = scmplatform.userrcpath
21 21
22 22 def nochangesfound(ui, repo, excluded=None):
23 23 '''Report no changes for push/pull, excluded is None or a list of
24 24 nodes excluded from the push/pull.
25 25 '''
26 26 secretlist = []
27 27 if excluded:
28 28 for n in excluded:
29 29 if n not in repo:
30 30 # discovery should not have included the filtered revision,
31 31 # we have to explicitly exclude it until discovery is cleanup.
32 32 continue
33 33 ctx = repo[n]
34 34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 35 secretlist.append(n)
36 36
37 37 if secretlist:
38 38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 39 % len(secretlist))
40 40 else:
41 41 ui.status(_("no changes found\n"))
42 42
43 43 def checknewlabel(repo, lbl, kind):
44 44 if lbl in ['tip', '.', 'null']:
45 45 raise util.Abort(_("the name '%s' is reserved") % lbl)
46 46 for c in (':', '\0', '\n', '\r'):
47 47 if c in lbl:
48 48 raise util.Abort(_("%r cannot be used in a name") % c)
49 49 try:
50 50 int(lbl)
51 51 raise util.Abort(_("a %s cannot have an integer as its name") % kind)
52 52 except ValueError:
53 53 pass
54 54
55 55 def checkfilename(f):
56 56 '''Check that the filename f is an acceptable filename for a tracked file'''
57 57 if '\r' in f or '\n' in f:
58 58 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
59 59
60 60 def checkportable(ui, f):
61 61 '''Check if filename f is portable and warn or abort depending on config'''
62 62 checkfilename(f)
63 63 abort, warn = checkportabilityalert(ui)
64 64 if abort or warn:
65 65 msg = util.checkwinfilename(f)
66 66 if msg:
67 67 msg = "%s: %r" % (msg, f)
68 68 if abort:
69 69 raise util.Abort(msg)
70 70 ui.warn(_("warning: %s\n") % msg)
71 71
72 72 def checkportabilityalert(ui):
73 73 '''check if the user's config requests nothing, a warning, or abort for
74 74 non-portable filenames'''
75 75 val = ui.config('ui', 'portablefilenames', 'warn')
76 76 lval = val.lower()
77 77 bval = util.parsebool(val)
78 78 abort = os.name == 'nt' or lval == 'abort'
79 79 warn = bval or lval == 'warn'
80 80 if bval is None and not (warn or abort or lval == 'ignore'):
81 81 raise error.ConfigError(
82 82 _("ui.portablefilenames value is invalid ('%s')") % val)
83 83 return abort, warn
84 84
85 85 class casecollisionauditor(object):
86 86 def __init__(self, ui, abort, dirstate):
87 87 self._ui = ui
88 88 self._abort = abort
89 89 allfiles = '\0'.join(dirstate._map)
90 90 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
91 91 self._dirstate = dirstate
92 92 # The purpose of _newfiles is so that we don't complain about
93 93 # case collisions if someone were to call this object with the
94 94 # same filename twice.
95 95 self._newfiles = set()
96 96
97 97 def __call__(self, f):
98 98 fl = encoding.lower(f)
99 99 if (fl in self._loweredfiles and f not in self._dirstate and
100 100 f not in self._newfiles):
101 101 msg = _('possible case-folding collision for %s') % f
102 102 if self._abort:
103 103 raise util.Abort(msg)
104 104 self._ui.warn(_("warning: %s\n") % msg)
105 105 self._loweredfiles.add(fl)
106 106 self._newfiles.add(f)
107 107
108 108 class pathauditor(object):
109 109 '''ensure that a filesystem path contains no banned components.
110 110 the following properties of a path are checked:
111 111
112 112 - ends with a directory separator
113 113 - under top-level .hg
114 114 - starts at the root of a windows drive
115 115 - contains ".."
116 116 - traverses a symlink (e.g. a/symlink_here/b)
117 117 - inside a nested repository (a callback can be used to approve
118 118 some nested repositories, e.g., subrepositories)
119 119 '''
120 120
121 121 def __init__(self, root, callback=None):
122 122 self.audited = set()
123 123 self.auditeddir = set()
124 124 self.root = root
125 125 self.callback = callback
126 126 if os.path.lexists(root) and not util.checkcase(root):
127 127 self.normcase = util.normcase
128 128 else:
129 129 self.normcase = lambda x: x
130 130
131 131 def __call__(self, path):
132 132 '''Check the relative path.
133 133 path may contain a pattern (e.g. foodir/**.txt)'''
134 134
135 135 path = util.localpath(path)
136 136 normpath = self.normcase(path)
137 137 if normpath in self.audited:
138 138 return
139 139 # AIX ignores "/" at end of path, others raise EISDIR.
140 140 if util.endswithsep(path):
141 141 raise util.Abort(_("path ends in directory separator: %s") % path)
142 142 parts = util.splitpath(path)
143 143 if (os.path.splitdrive(path)[0]
144 144 or parts[0].lower() in ('.hg', '.hg.', '')
145 145 or os.pardir in parts):
146 146 raise util.Abort(_("path contains illegal component: %s") % path)
147 147 if '.hg' in path.lower():
148 148 lparts = [p.lower() for p in parts]
149 149 for p in '.hg', '.hg.':
150 150 if p in lparts[1:]:
151 151 pos = lparts.index(p)
152 152 base = os.path.join(*parts[:pos])
153 153 raise util.Abort(_("path '%s' is inside nested repo %r")
154 154 % (path, base))
155 155
156 156 normparts = util.splitpath(normpath)
157 157 assert len(parts) == len(normparts)
158 158
159 159 parts.pop()
160 160 normparts.pop()
161 161 prefixes = []
162 162 while parts:
163 163 prefix = os.sep.join(parts)
164 164 normprefix = os.sep.join(normparts)
165 165 if normprefix in self.auditeddir:
166 166 break
167 167 curpath = os.path.join(self.root, prefix)
168 168 try:
169 169 st = os.lstat(curpath)
170 170 except OSError, err:
171 171 # EINVAL can be raised as invalid path syntax under win32.
172 172 # They must be ignored for patterns can be checked too.
173 173 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
174 174 raise
175 175 else:
176 176 if stat.S_ISLNK(st.st_mode):
177 177 raise util.Abort(
178 178 _('path %r traverses symbolic link %r')
179 179 % (path, prefix))
180 180 elif (stat.S_ISDIR(st.st_mode) and
181 181 os.path.isdir(os.path.join(curpath, '.hg'))):
182 182 if not self.callback or not self.callback(curpath):
183 183 raise util.Abort(_("path '%s' is inside nested "
184 184 "repo %r")
185 185 % (path, prefix))
186 186 prefixes.append(normprefix)
187 187 parts.pop()
188 188 normparts.pop()
189 189
190 190 self.audited.add(normpath)
191 191 # only add prefixes to the cache after checking everything: we don't
192 192 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
193 193 self.auditeddir.update(prefixes)
194 194
195 195 def check(self, path):
196 196 try:
197 197 self(path)
198 198 return True
199 199 except (OSError, util.Abort):
200 200 return False
201 201
202 202 class abstractvfs(object):
203 203 """Abstract base class; cannot be instantiated"""
204 204
205 205 def __init__(self, *args, **kwargs):
206 206 '''Prevent instantiation; don't call this from subclasses.'''
207 207 raise NotImplementedError('attempted instantiating ' + str(type(self)))
208 208
209 209 def tryread(self, path):
210 210 '''gracefully return an empty string for missing files'''
211 211 try:
212 212 return self.read(path)
213 213 except IOError, inst:
214 214 if inst.errno != errno.ENOENT:
215 215 raise
216 216 return ""
217 217
218 218 def read(self, path):
219 219 fp = self(path, 'rb')
220 220 try:
221 221 return fp.read()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 232 def append(self, path, data):
233 233 fp = self(path, 'ab')
234 234 try:
235 235 return fp.write(data)
236 236 finally:
237 237 fp.close()
238 238
239 239 def exists(self, path=None):
240 240 return os.path.exists(self.join(path))
241 241
242 242 def isdir(self, path=None):
243 243 return os.path.isdir(self.join(path))
244 244
245 245 def makedir(self, path=None, notindexed=True):
246 246 return util.makedir(self.join(path), notindexed)
247 247
248 248 def makedirs(self, path=None, mode=None):
249 249 return util.makedirs(self.join(path), mode)
250 250
251 251 def mkdir(self, path=None):
252 252 return os.mkdir(self.join(path))
253 253
254 254 def readdir(self, path=None, stat=None, skip=None):
255 255 return osutil.listdir(self.join(path), stat, skip)
256 256
257 257 def stat(self, path=None):
258 258 return os.stat(self.join(path))
259 259
260 260 class vfs(abstractvfs):
261 261 '''Operate files relative to a base directory
262 262
263 263 This class is used to hide the details of COW semantics and
264 264 remote file access from higher level code.
265 265 '''
266 266 def __init__(self, base, audit=True, expand=False):
267 267 if expand:
268 268 base = os.path.realpath(util.expandpath(base))
269 269 self.base = base
270 270 self._setmustaudit(audit)
271 271 self.createmode = None
272 272 self._trustnlink = None
273 273
274 274 def _getmustaudit(self):
275 275 return self._audit
276 276
277 277 def _setmustaudit(self, onoff):
278 278 self._audit = onoff
279 279 if onoff:
280 280 self.audit = pathauditor(self.base)
281 281 else:
282 282 self.audit = util.always
283 283
284 284 mustaudit = property(_getmustaudit, _setmustaudit)
285 285
286 286 @util.propertycache
287 287 def _cansymlink(self):
288 288 return util.checklink(self.base)
289 289
290 290 @util.propertycache
291 291 def _chmod(self):
292 292 return util.checkexec(self.base)
293 293
294 294 def _fixfilemode(self, name):
295 295 if self.createmode is None or not self._chmod:
296 296 return
297 297 os.chmod(name, self.createmode & 0666)
298 298
299 299 def __call__(self, path, mode="r", text=False, atomictemp=False):
300 300 if self._audit:
301 301 r = util.checkosfilename(path)
302 302 if r:
303 303 raise util.Abort("%s: %r" % (r, path))
304 304 self.audit(path)
305 305 f = self.join(path)
306 306
307 307 if not text and "b" not in mode:
308 308 mode += "b" # for that other OS
309 309
310 310 nlink = -1
311 311 if mode not in ('r', 'rb'):
312 312 dirname, basename = util.split(f)
313 313 # If basename is empty, then the path is malformed because it points
314 314 # to a directory. Let the posixfile() call below raise IOError.
315 315 if basename:
316 316 if atomictemp:
317 317 util.ensuredirs(dirname, self.createmode)
318 318 return util.atomictempfile(f, mode, self.createmode)
319 319 try:
320 320 if 'w' in mode:
321 321 util.unlink(f)
322 322 nlink = 0
323 323 else:
324 324 # nlinks() may behave differently for files on Windows
325 325 # shares if the file is open.
326 326 fd = util.posixfile(f)
327 327 nlink = util.nlinks(f)
328 328 if nlink < 1:
329 329 nlink = 2 # force mktempcopy (issue1922)
330 330 fd.close()
331 331 except (OSError, IOError), e:
332 332 if e.errno != errno.ENOENT:
333 333 raise
334 334 nlink = 0
335 335 util.ensuredirs(dirname, self.createmode)
336 336 if nlink > 0:
337 337 if self._trustnlink is None:
338 338 self._trustnlink = nlink > 1 or util.checknlink(f)
339 339 if nlink > 1 or not self._trustnlink:
340 340 util.rename(util.mktempcopy(f), f)
341 341 fp = util.posixfile(f, mode)
342 342 if nlink == 0:
343 343 self._fixfilemode(f)
344 344 return fp
345 345
346 346 def symlink(self, src, dst):
347 347 self.audit(dst)
348 348 linkname = self.join(dst)
349 349 try:
350 350 os.unlink(linkname)
351 351 except OSError:
352 352 pass
353 353
354 354 util.ensuredirs(os.path.dirname(linkname), self.createmode)
355 355
356 356 if self._cansymlink:
357 357 try:
358 358 os.symlink(src, linkname)
359 359 except OSError, err:
360 360 raise OSError(err.errno, _('could not symlink to %r: %s') %
361 361 (src, err.strerror), linkname)
362 362 else:
363 363 self.write(dst, src)
364 364
365 365 def join(self, path):
366 366 if path:
367 367 return os.path.join(self.base, path)
368 368 else:
369 369 return self.base
370 370
371 371 opener = vfs
372 372
373 373 class auditvfs(object):
374 374 def __init__(self, vfs):
375 375 self.vfs = vfs
376 376
377 377 def _getmustaudit(self):
378 378 return self.vfs.mustaudit
379 379
380 380 def _setmustaudit(self, onoff):
381 381 self.vfs.mustaudit = onoff
382 382
383 383 mustaudit = property(_getmustaudit, _setmustaudit)
384 384
385 385 class filtervfs(abstractvfs, auditvfs):
386 386 '''Wrapper vfs for filtering filenames with a function.'''
387 387
388 388 def __init__(self, vfs, filter):
389 389 auditvfs.__init__(self, vfs)
390 390 self._filter = filter
391 391
392 392 def __call__(self, path, *args, **kwargs):
393 393 return self.vfs(self._filter(path), *args, **kwargs)
394 394
395 395 def join(self, path):
396 396 if path:
397 397 return self.vfs.join(self._filter(path))
398 398 else:
399 399 return self.vfs.join(path)
400 400
401 401 filteropener = filtervfs
402 402
403 403 class readonlyvfs(abstractvfs, auditvfs):
404 404 '''Wrapper vfs preventing any writing.'''
405 405
406 406 def __init__(self, vfs):
407 407 auditvfs.__init__(self, vfs)
408 408
409 409 def __call__(self, path, mode='r', *args, **kw):
410 410 if mode not in ('r', 'rb'):
411 411 raise util.Abort('this vfs is read only')
412 412 return self.vfs(path, mode, *args, **kw)
413 413
414 414
415 415 def canonpath(root, cwd, myname, auditor=None):
416 416 '''return the canonical path of myname, given cwd and root'''
417 417 if util.endswithsep(root):
418 418 rootsep = root
419 419 else:
420 420 rootsep = root + os.sep
421 421 name = myname
422 422 if not os.path.isabs(name):
423 423 name = os.path.join(root, cwd, name)
424 424 name = os.path.normpath(name)
425 425 if auditor is None:
426 426 auditor = pathauditor(root)
427 427 if name != rootsep and name.startswith(rootsep):
428 428 name = name[len(rootsep):]
429 429 auditor(name)
430 430 return util.pconvert(name)
431 431 elif name == root:
432 432 return ''
433 433 else:
434 434 # Determine whether `name' is in the hierarchy at or beneath `root',
435 435 # by iterating name=dirname(name) until that causes no change (can't
436 436 # check name == '/', because that doesn't work on windows). The list
437 437 # `rel' holds the reversed list of components making up the relative
438 438 # file name we want.
439 439 rel = []
440 440 while True:
441 441 try:
442 442 s = util.samefile(name, root)
443 443 except OSError:
444 444 s = False
445 445 if s:
446 446 if not rel:
447 447 # name was actually the same as root (maybe a symlink)
448 448 return ''
449 449 rel.reverse()
450 450 name = os.path.join(*rel)
451 451 auditor(name)
452 452 return util.pconvert(name)
453 453 dirname, basename = util.split(name)
454 454 rel.append(basename)
455 455 if dirname == name:
456 456 break
457 457 name = dirname
458 458
459 459 raise util.Abort(_("%s not under root '%s'") % (myname, root))
460 460
461 461 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
462 462 '''yield every hg repository under path, always recursively.
463 463 The recurse flag will only control recursion into repo working dirs'''
464 464 def errhandler(err):
465 465 if err.filename == path:
466 466 raise err
467 467 samestat = getattr(os.path, 'samestat', None)
468 468 if followsym and samestat is not None:
469 469 def adddir(dirlst, dirname):
470 470 match = False
471 471 dirstat = os.stat(dirname)
472 472 for lstdirstat in dirlst:
473 473 if samestat(dirstat, lstdirstat):
474 474 match = True
475 475 break
476 476 if not match:
477 477 dirlst.append(dirstat)
478 478 return not match
479 479 else:
480 480 followsym = False
481 481
482 482 if (seen_dirs is None) and followsym:
483 483 seen_dirs = []
484 484 adddir(seen_dirs, path)
485 485 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
486 486 dirs.sort()
487 487 if '.hg' in dirs:
488 488 yield root # found a repository
489 489 qroot = os.path.join(root, '.hg', 'patches')
490 490 if os.path.isdir(os.path.join(qroot, '.hg')):
491 491 yield qroot # we have a patch queue repo here
492 492 if recurse:
493 493 # avoid recursing inside the .hg directory
494 494 dirs.remove('.hg')
495 495 else:
496 496 dirs[:] = [] # don't descend further
497 497 elif followsym:
498 498 newdirs = []
499 499 for d in dirs:
500 500 fname = os.path.join(root, d)
501 501 if adddir(seen_dirs, fname):
502 502 if os.path.islink(fname):
503 503 for hgname in walkrepos(fname, True, seen_dirs):
504 504 yield hgname
505 505 else:
506 506 newdirs.append(d)
507 507 dirs[:] = newdirs
508 508
509 509 def osrcpath():
510 510 '''return default os-specific hgrc search path'''
511 511 path = systemrcpath()
512 512 path.extend(userrcpath())
513 513 path = [os.path.normpath(f) for f in path]
514 514 return path
515 515
516 516 _rcpath = None
517 517
518 518 def rcpath():
519 519 '''return hgrc search path. if env var HGRCPATH is set, use it.
520 520 for each item in path, if directory, use files ending in .rc,
521 521 else use item.
522 522 make HGRCPATH empty to only look in .hg/hgrc of current repo.
523 523 if no HGRCPATH, use default os-specific path.'''
524 524 global _rcpath
525 525 if _rcpath is None:
526 526 if 'HGRCPATH' in os.environ:
527 527 _rcpath = []
528 528 for p in os.environ['HGRCPATH'].split(os.pathsep):
529 529 if not p:
530 530 continue
531 531 p = util.expandpath(p)
532 532 if os.path.isdir(p):
533 533 for f, kind in osutil.listdir(p):
534 534 if f.endswith('.rc'):
535 535 _rcpath.append(os.path.join(p, f))
536 536 else:
537 537 _rcpath.append(p)
538 538 else:
539 539 _rcpath = osrcpath()
540 540 return _rcpath
541 541
542 542 def revsingle(repo, revspec, default='.'):
543 543 if not revspec:
544 544 return repo[default]
545 545
546 546 l = revrange(repo, [revspec])
547 547 if len(l) < 1:
548 548 raise util.Abort(_('empty revision set'))
549 549 return repo[l[-1]]
550 550
551 551 def revpair(repo, revs):
552 552 if not revs:
553 553 return repo.dirstate.p1(), None
554 554
555 555 l = revrange(repo, revs)
556 556
557 557 if len(l) == 0:
558 558 if revs:
559 559 raise util.Abort(_('empty revision range'))
560 560 return repo.dirstate.p1(), None
561 561
562 562 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
563 563 return repo.lookup(l[0]), None
564 564
565 565 return repo.lookup(l[0]), repo.lookup(l[-1])
566 566
567 567 _revrangesep = ':'
568 568
569 569 def revrange(repo, revs):
570 570 """Yield revision as strings from a list of revision specifications."""
571 571
572 572 def revfix(repo, val, defval):
573 573 if not val and val != 0 and defval is not None:
574 574 return defval
575 575 return repo[val].rev()
576 576
577 577 seen, l = set(), []
578 578 for spec in revs:
579 579 if l and not seen:
580 580 seen = set(l)
581 581 # attempt to parse old-style ranges first to deal with
582 582 # things like old-tag which contain query metacharacters
583 583 try:
584 584 if isinstance(spec, int):
585 585 seen.add(spec)
586 586 l.append(spec)
587 587 continue
588 588
589 589 if _revrangesep in spec:
590 590 start, end = spec.split(_revrangesep, 1)
591 591 start = revfix(repo, start, 0)
592 592 end = revfix(repo, end, len(repo) - 1)
593 593 if end == nullrev and start <= 0:
594 594 start = nullrev
595 595 rangeiter = repo.changelog.revs(start, end)
596 596 if not seen and not l:
597 597 # by far the most common case: revs = ["-1:0"]
598 598 l = list(rangeiter)
599 599 # defer syncing seen until next iteration
600 600 continue
601 601 newrevs = set(rangeiter)
602 602 if seen:
603 603 newrevs.difference_update(seen)
604 604 seen.update(newrevs)
605 605 else:
606 606 seen = newrevs
607 607 l.extend(sorted(newrevs, reverse=start > end))
608 608 continue
609 609 elif spec and spec in repo: # single unquoted rev
610 610 rev = revfix(repo, spec, None)
611 611 if rev in seen:
612 612 continue
613 613 seen.add(rev)
614 614 l.append(rev)
615 615 continue
616 616 except error.RepoLookupError:
617 617 pass
618 618
619 619 # fall through to new-style queries if old-style fails
620 620 m = revset.match(repo.ui, spec)
621 621 dl = [r for r in m(repo, list(repo)) if r not in seen]
622 622 l.extend(dl)
623 623 seen.update(dl)
624 624
625 625 return l
626 626
627 627 def expandpats(pats):
628 628 if not util.expandglobs:
629 629 return list(pats)
630 630 ret = []
631 631 for p in pats:
632 632 kind, name = matchmod._patsplit(p, None)
633 633 if kind is None:
634 634 try:
635 635 globbed = glob.glob(name)
636 636 except re.error:
637 637 globbed = [name]
638 638 if globbed:
639 639 ret.extend(globbed)
640 640 continue
641 641 ret.append(p)
642 642 return ret
643 643
644 644 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
645 645 if pats == ("",):
646 646 pats = []
647 647 if not globbed and default == 'relpath':
648 648 pats = expandpats(pats or [])
649 649
650 650 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
651 651 default)
652 652 def badfn(f, msg):
653 653 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
654 654 m.bad = badfn
655 655 return m, pats
656 656
657 657 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
658 658 return matchandpats(ctx, pats, opts, globbed, default)[0]
659 659
660 660 def matchall(repo):
661 661 return matchmod.always(repo.root, repo.getcwd())
662 662
663 663 def matchfiles(repo, files):
664 664 return matchmod.exact(repo.root, repo.getcwd(), files)
665 665
666 666 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
667 667 if dry_run is None:
668 668 dry_run = opts.get('dry_run')
669 669 if similarity is None:
670 670 similarity = float(opts.get('similarity') or 0)
671 671 # we'd use status here, except handling of symlinks and ignore is tricky
672 672 added, unknown, deleted, removed = [], [], [], []
673 673 audit_path = pathauditor(repo.root)
674 674 m = match(repo[None], pats, opts)
675 675 rejected = []
676 676 m.bad = lambda x, y: rejected.append(x)
677 677
678 678 ctx = repo[None]
679 679 dirstate = repo.dirstate
680 680 walkresults = dirstate.walk(m, sorted(ctx.substate), True, False)
681 681 for abs, st in walkresults.iteritems():
682 682 dstate = dirstate[abs]
683 683 if dstate == '?' and audit_path.check(abs):
684 684 unknown.append(abs)
685 685 elif dstate != 'r' and not st:
686 686 deleted.append(abs)
687 687 # for finding renames
688 688 elif dstate == 'r':
689 689 removed.append(abs)
690 690 elif dstate == 'a':
691 691 added.append(abs)
692 692
693 693 unknownset = set(unknown)
694 694 toprint = unknownset.copy()
695 695 toprint.update(deleted)
696 696 for abs in sorted(toprint):
697 697 if repo.ui.verbose or not m.exact(abs):
698 698 rel = m.rel(abs)
699 699 if abs in unknownset:
700 700 status = _('adding %s\n') % ((pats and rel) or abs)
701 701 else:
702 702 status = _('removing %s\n') % ((pats and rel) or abs)
703 703 repo.ui.status(status)
704 704
705 705 copies = {}
706 706 if similarity > 0:
707 707 for old, new, score in similar.findrenames(repo,
708 708 added + unknown, removed + deleted, similarity):
709 709 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
710 710 repo.ui.status(_('recording removal of %s as rename to %s '
711 711 '(%d%% similar)\n') %
712 712 (m.rel(old), m.rel(new), score * 100))
713 713 copies[new] = old
714 714
715 715 if not dry_run:
716 716 wctx = repo[None]
717 717 wlock = repo.wlock()
718 718 try:
719 719 wctx.forget(deleted)
720 720 wctx.add(unknown)
721 721 for new, old in copies.iteritems():
722 722 wctx.copy(old, new)
723 723 finally:
724 724 wlock.release()
725 725
726 726 for f in rejected:
727 727 if f in m.files():
728 728 return 1
729 729 return 0
730 730
731 731 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
732 732 """Update the dirstate to reflect the intent of copying src to dst. For
733 733 different reasons it might not end with dst being marked as copied from src.
734 734 """
735 735 origsrc = repo.dirstate.copied(src) or src
736 736 if dst == origsrc: # copying back a copy?
737 737 if repo.dirstate[dst] not in 'mn' and not dryrun:
738 738 repo.dirstate.normallookup(dst)
739 739 else:
740 740 if repo.dirstate[origsrc] == 'a' and origsrc == src:
741 741 if not ui.quiet:
742 742 ui.warn(_("%s has not been committed yet, so no copy "
743 743 "data will be stored for %s.\n")
744 744 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
745 745 if repo.dirstate[dst] in '?r' and not dryrun:
746 746 wctx.add([dst])
747 747 elif not dryrun:
748 748 wctx.copy(origsrc, dst)
749 749
750 750 def readrequires(opener, supported):
751 751 '''Reads and parses .hg/requires and checks if all entries found
752 752 are in the list of supported features.'''
753 753 requirements = set(opener.read("requires").splitlines())
754 754 missings = []
755 755 for r in requirements:
756 756 if r not in supported:
757 757 if not r or not r[0].isalnum():
758 758 raise error.RequirementError(_(".hg/requires file is corrupt"))
759 759 missings.append(r)
760 760 missings.sort()
761 761 if missings:
762 762 raise error.RequirementError(
763 763 _("unknown repository format: requires features '%s' (upgrade "
764 764 "Mercurial)") % "', '".join(missings))
765 765 return requirements
766 766
767 767 class filecacheentry(object):
768 768 def __init__(self, path, stat=True):
769 769 self.path = path
770 770 self.cachestat = None
771 771 self._cacheable = None
772 772
773 773 if stat:
774 774 self.cachestat = filecacheentry.stat(self.path)
775 775
776 776 if self.cachestat:
777 777 self._cacheable = self.cachestat.cacheable()
778 778 else:
779 779 # None means we don't know yet
780 780 self._cacheable = None
781 781
782 782 def refresh(self):
783 783 if self.cacheable():
784 784 self.cachestat = filecacheentry.stat(self.path)
785 785
786 786 def cacheable(self):
787 787 if self._cacheable is not None:
788 788 return self._cacheable
789 789
790 790 # we don't know yet, assume it is for now
791 791 return True
792 792
793 793 def changed(self):
794 794 # no point in going further if we can't cache it
795 795 if not self.cacheable():
796 796 return True
797 797
798 798 newstat = filecacheentry.stat(self.path)
799 799
800 800 # we may not know if it's cacheable yet, check again now
801 801 if newstat and self._cacheable is None:
802 802 self._cacheable = newstat.cacheable()
803 803
804 804 # check again
805 805 if not self._cacheable:
806 806 return True
807 807
808 808 if self.cachestat != newstat:
809 809 self.cachestat = newstat
810 810 return True
811 811 else:
812 812 return False
813 813
814 814 @staticmethod
815 815 def stat(path):
816 816 try:
817 817 return util.cachestat(path)
818 818 except OSError, e:
819 819 if e.errno != errno.ENOENT:
820 820 raise
821 821
822 822 class filecache(object):
823 823 '''A property like decorator that tracks a file under .hg/ for updates.
824 824
825 825 Records stat info when called in _filecache.
826 826
827 827 On subsequent calls, compares old stat info with new info, and recreates
828 828 the object when needed, updating the new stat info in _filecache.
829 829
830 830 Mercurial either atomic renames or appends for files under .hg,
831 831 so to ensure the cache is reliable we need the filesystem to be able
832 832 to tell us if a file has been replaced. If it can't, we fallback to
833 833 recreating the object on every call (essentially the same behaviour as
834 834 propertycache).'''
835 835 def __init__(self, path):
836 836 self.path = path
837 837
838 838 def join(self, obj, fname):
839 839 """Used to compute the runtime path of the cached file.
840 840
841 841 Users should subclass filecache and provide their own version of this
842 842 function to call the appropriate join function on 'obj' (an instance
843 843 of the class that its member function was decorated).
844 844 """
845 845 return obj.join(fname)
846 846
847 847 def __call__(self, func):
848 848 self.func = func
849 849 self.name = func.__name__
850 850 return self
851 851
852 852 def __get__(self, obj, type=None):
853 853 # do we need to check if the file changed?
854 854 if self.name in obj.__dict__:
855 855 assert self.name in obj._filecache, self.name
856 856 return obj.__dict__[self.name]
857 857
858 858 entry = obj._filecache.get(self.name)
859 859
860 860 if entry:
861 861 if entry.changed():
862 862 entry.obj = self.func(obj)
863 863 else:
864 864 path = self.join(obj, self.path)
865 865
866 866 # We stat -before- creating the object so our cache doesn't lie if
867 867 # a writer modified between the time we read and stat
868 868 entry = filecacheentry(path)
869 869 entry.obj = self.func(obj)
870 870
871 871 obj._filecache[self.name] = entry
872 872
873 873 obj.__dict__[self.name] = entry.obj
874 874 return entry.obj
875 875
876 876 def __set__(self, obj, value):
877 877 if self.name not in obj._filecache:
878 878 # we add an entry for the missing value because X in __dict__
879 879 # implies X in _filecache
880 880 ce = filecacheentry(self.join(obj, self.path), False)
881 881 obj._filecache[self.name] = ce
882 882 else:
883 883 ce = obj._filecache[self.name]
884 884
885 885 ce.obj = value # update cached copy
886 886 obj.__dict__[self.name] = value # update copy returned by obj.x
887 887
888 888 def __delete__(self, obj):
889 889 try:
890 890 del obj.__dict__[self.name]
891 891 except KeyError:
892 892 raise AttributeError(self.name)
893 893
894 894 class dirs(object):
895 895 '''a multiset of directory names from a dirstate or manifest'''
896 896
897 897 def __init__(self, map, skip=None):
898 898 self._dirs = {}
899 899 addpath = self.addpath
900 900 if util.safehasattr(map, 'iteritems') and skip is not None:
901 901 for f, s in map.iteritems():
902 902 if s[0] != skip:
903 903 addpath(f)
904 904 else:
905 905 for f in map:
906 906 addpath(f)
907 907
908 908 def addpath(self, path):
909 909 dirs = self._dirs
910 910 for base in finddirs(path):
911 911 if base in dirs:
912 912 dirs[base] += 1
913 913 return
914 914 dirs[base] = 1
915 915
916 916 def delpath(self, path):
917 917 dirs = self._dirs
918 918 for base in finddirs(path):
919 919 if dirs[base] > 1:
920 920 dirs[base] -= 1
921 921 return
922 922 del dirs[base]
923 923
924 924 def __iter__(self):
925 925 return self._dirs.iterkeys()
926 926
927 927 def __contains__(self, d):
928 928 return d in self._dirs
929 929
930 if util.safehasattr(parsers, 'dirs'):
931 dirs = parsers.dirs
932
930 933 def finddirs(path):
931 934 pos = path.rfind('/')
932 935 while pos != -1:
933 936 yield path[:pos]
934 937 pos = path.rfind('/', 0, pos)
@@ -1,566 +1,567
1 1 #
2 2 # This is the mercurial setup script.
3 3 #
4 4 # 'python setup.py install', or
5 5 # 'python setup.py --help' for more options
6 6
7 7 import sys, platform
8 8 if getattr(sys, 'version_info', (0, 0, 0)) < (2, 4, 0, 'final'):
9 9 raise SystemExit("Mercurial requires Python 2.4 or later.")
10 10
11 11 if sys.version_info[0] >= 3:
12 12 def b(s):
13 13 '''A helper function to emulate 2.6+ bytes literals using string
14 14 literals.'''
15 15 return s.encode('latin1')
16 16 else:
17 17 def b(s):
18 18 '''A helper function to emulate 2.6+ bytes literals using string
19 19 literals.'''
20 20 return s
21 21
22 22 # Solaris Python packaging brain damage
23 23 try:
24 24 import hashlib
25 25 sha = hashlib.sha1()
26 26 except ImportError:
27 27 try:
28 28 import sha
29 29 except ImportError:
30 30 raise SystemExit(
31 31 "Couldn't import standard hashlib (incomplete Python install).")
32 32
33 33 try:
34 34 import zlib
35 35 except ImportError:
36 36 raise SystemExit(
37 37 "Couldn't import standard zlib (incomplete Python install).")
38 38
39 39 # The base IronPython distribution (as of 2.7.1) doesn't support bz2
40 40 isironpython = False
41 41 try:
42 42 isironpython = (platform.python_implementation()
43 43 .lower().find("ironpython") != -1)
44 44 except AttributeError:
45 45 pass
46 46
47 47 if isironpython:
48 48 sys.stderr.write("warning: IronPython detected (no bz2 support)\n")
49 49 else:
50 50 try:
51 51 import bz2
52 52 except ImportError:
53 53 raise SystemExit(
54 54 "Couldn't import standard bz2 (incomplete Python install).")
55 55
56 56 import os, subprocess, time
57 57 import shutil
58 58 import tempfile
59 59 from distutils import log
60 60 from distutils.core import setup, Command, Extension
61 61 from distutils.dist import Distribution
62 62 from distutils.command.build import build
63 63 from distutils.command.build_ext import build_ext
64 64 from distutils.command.build_py import build_py
65 65 from distutils.command.install_scripts import install_scripts
66 66 from distutils.spawn import spawn, find_executable
67 67 from distutils.ccompiler import new_compiler
68 68 from distutils import cygwinccompiler
69 69 from distutils.errors import CCompilerError, DistutilsExecError
70 70 from distutils.sysconfig import get_python_inc
71 71 from distutils.version import StrictVersion
72 72
73 73 convert2to3 = '--c2to3' in sys.argv
74 74 if convert2to3:
75 75 try:
76 76 from distutils.command.build_py import build_py_2to3 as build_py
77 77 from lib2to3.refactor import get_fixers_from_package as getfixers
78 78 except ImportError:
79 79 if sys.version_info[0] < 3:
80 80 raise SystemExit("--c2to3 is only compatible with python3.")
81 81 raise
82 82 sys.path.append('contrib')
83 83 elif sys.version_info[0] >= 3:
84 84 raise SystemExit("setup.py with python3 needs --c2to3 (experimental)")
85 85
86 86 scripts = ['hg']
87 87 if os.name == 'nt':
88 88 scripts.append('contrib/win32/hg.bat')
89 89
90 90 # simplified version of distutils.ccompiler.CCompiler.has_function
91 91 # that actually removes its temporary files.
92 92 def hasfunction(cc, funcname):
93 93 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
94 94 devnull = oldstderr = None
95 95 try:
96 96 try:
97 97 fname = os.path.join(tmpdir, 'funcname.c')
98 98 f = open(fname, 'w')
99 99 f.write('int main(void) {\n')
100 100 f.write(' %s();\n' % funcname)
101 101 f.write('}\n')
102 102 f.close()
103 103 # Redirect stderr to /dev/null to hide any error messages
104 104 # from the compiler.
105 105 # This will have to be changed if we ever have to check
106 106 # for a function on Windows.
107 107 devnull = open('/dev/null', 'w')
108 108 oldstderr = os.dup(sys.stderr.fileno())
109 109 os.dup2(devnull.fileno(), sys.stderr.fileno())
110 110 objects = cc.compile([fname], output_dir=tmpdir)
111 111 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
112 112 except Exception:
113 113 return False
114 114 return True
115 115 finally:
116 116 if oldstderr is not None:
117 117 os.dup2(oldstderr, sys.stderr.fileno())
118 118 if devnull is not None:
119 119 devnull.close()
120 120 shutil.rmtree(tmpdir)
121 121
122 122 # py2exe needs to be installed to work
123 123 try:
124 124 import py2exe
125 125 py2exeloaded = True
126 126 # import py2exe's patched Distribution class
127 127 from distutils.core import Distribution
128 128 except ImportError:
129 129 py2exeloaded = False
130 130
131 131 def runcmd(cmd, env):
132 132 if sys.platform == 'plan9':
133 133 # subprocess kludge to work around issues in half-baked Python
134 134 # ports, notably bichued/python:
135 135 _, out, err = os.popen3(cmd)
136 136 return str(out), str(err)
137 137 else:
138 138 p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
139 139 stderr=subprocess.PIPE, env=env)
140 140 out, err = p.communicate()
141 141 return out, err
142 142
143 143 def runhg(cmd, env):
144 144 out, err = runcmd(cmd, env)
145 145 # If root is executing setup.py, but the repository is owned by
146 146 # another user (as in "sudo python setup.py install") we will get
147 147 # trust warnings since the .hg/hgrc file is untrusted. That is
148 148 # fine, we don't want to load it anyway. Python may warn about
149 149 # a missing __init__.py in mercurial/locale, we also ignore that.
150 150 err = [e for e in err.splitlines()
151 151 if not e.startswith(b('not trusting file')) \
152 152 and not e.startswith(b('warning: Not importing')) \
153 153 and not e.startswith(b('obsolete feature not enabled'))]
154 154 if err:
155 155 print >> sys.stderr, "stderr from '%s':" % (' '.join(cmd))
156 156 print >> sys.stderr, '\n'.join([' ' + e for e in err])
157 157 return ''
158 158 return out
159 159
160 160 version = ''
161 161
162 162 # Execute hg out of this directory with a custom environment which
163 163 # includes the pure Python modules in mercurial/pure. We also take
164 164 # care to not use any hgrc files and do no localization.
165 165 pypath = ['mercurial', os.path.join('mercurial', 'pure')]
166 166 env = {'PYTHONPATH': os.pathsep.join(pypath),
167 167 'HGRCPATH': '',
168 168 'LANGUAGE': 'C'}
169 169 if 'LD_LIBRARY_PATH' in os.environ:
170 170 env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
171 171 if 'SystemRoot' in os.environ:
172 172 # Copy SystemRoot into the custom environment for Python 2.6
173 173 # under Windows. Otherwise, the subprocess will fail with
174 174 # error 0xc0150004. See: http://bugs.python.org/issue3440
175 175 env['SystemRoot'] = os.environ['SystemRoot']
176 176
177 177 if os.path.isdir('.hg'):
178 178 cmd = [sys.executable, 'hg', 'log', '-r', '.', '--template', '{tags}\n']
179 179 numerictags = [t for t in runhg(cmd, env).split() if t[0].isdigit()]
180 180 hgid = runhg([sys.executable, 'hg', 'id', '-i'], env).strip()
181 181 if numerictags: # tag(s) found
182 182 version = numerictags[-1]
183 183 if hgid.endswith('+'): # propagate the dirty status to the tag
184 184 version += '+'
185 185 else: # no tag found
186 186 cmd = [sys.executable, 'hg', 'parents', '--template',
187 187 '{latesttag}+{latesttagdistance}-']
188 188 version = runhg(cmd, env) + hgid
189 189 if version.endswith('+'):
190 190 version += time.strftime('%Y%m%d')
191 191 elif os.path.exists('.hg_archival.txt'):
192 192 kw = dict([[t.strip() for t in l.split(':', 1)]
193 193 for l in open('.hg_archival.txt')])
194 194 if 'tag' in kw:
195 195 version = kw['tag']
196 196 elif 'latesttag' in kw:
197 197 version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
198 198 else:
199 199 version = kw.get('node', '')[:12]
200 200
201 201 if version:
202 202 f = open("mercurial/__version__.py", "w")
203 203 f.write('# this file is autogenerated by setup.py\n')
204 204 f.write('version = "%s"\n' % version)
205 205 f.close()
206 206
207 207
208 208 try:
209 209 from mercurial import __version__
210 210 version = __version__.version
211 211 except ImportError:
212 212 version = 'unknown'
213 213
214 214 class hgbuild(build):
215 215 # Insert hgbuildmo first so that files in mercurial/locale/ are found
216 216 # when build_py is run next.
217 217 sub_commands = [('build_mo', None),
218 218
219 219 # We also need build_ext before build_py. Otherwise, when 2to3 is
220 220 # called (in build_py), it will not find osutil & friends,
221 221 # thinking that those modules are global and, consequently, making
222 222 # a mess, now that all module imports are global.
223 223
224 224 ('build_ext', build.has_ext_modules),
225 225 ] + build.sub_commands
226 226
227 227 class hgbuildmo(build):
228 228
229 229 description = "build translations (.mo files)"
230 230
231 231 def run(self):
232 232 if not find_executable('msgfmt'):
233 233 self.warn("could not find msgfmt executable, no translations "
234 234 "will be built")
235 235 return
236 236
237 237 podir = 'i18n'
238 238 if not os.path.isdir(podir):
239 239 self.warn("could not find %s/ directory" % podir)
240 240 return
241 241
242 242 join = os.path.join
243 243 for po in os.listdir(podir):
244 244 if not po.endswith('.po'):
245 245 continue
246 246 pofile = join(podir, po)
247 247 modir = join('locale', po[:-3], 'LC_MESSAGES')
248 248 mofile = join(modir, 'hg.mo')
249 249 mobuildfile = join('mercurial', mofile)
250 250 cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile]
251 251 if sys.platform != 'sunos5':
252 252 # msgfmt on Solaris does not know about -c
253 253 cmd.append('-c')
254 254 self.mkpath(join('mercurial', modir))
255 255 self.make_file([pofile], mobuildfile, spawn, (cmd,))
256 256
257 257
258 258 class hgdist(Distribution):
259 259 pure = 0
260 260
261 261 global_options = Distribution.global_options + \
262 262 [('pure', None, "use pure (slow) Python "
263 263 "code instead of C extensions"),
264 264 ('c2to3', None, "(experimental!) convert "
265 265 "code with 2to3"),
266 266 ]
267 267
268 268 def has_ext_modules(self):
269 269 # self.ext_modules is emptied in hgbuildpy.finalize_options which is
270 270 # too late for some cases
271 271 return not self.pure and Distribution.has_ext_modules(self)
272 272
273 273 class hgbuildext(build_ext):
274 274
275 275 def build_extension(self, ext):
276 276 try:
277 277 build_ext.build_extension(self, ext)
278 278 except CCompilerError:
279 279 if not getattr(ext, 'optional', False):
280 280 raise
281 281 log.warn("Failed to build optional extension '%s' (skipping)",
282 282 ext.name)
283 283
284 284 class hgbuildpy(build_py):
285 285 if convert2to3:
286 286 fixer_names = sorted(set(getfixers("lib2to3.fixes") +
287 287 getfixers("hgfixes")))
288 288
289 289 def finalize_options(self):
290 290 build_py.finalize_options(self)
291 291
292 292 if self.distribution.pure:
293 293 if self.py_modules is None:
294 294 self.py_modules = []
295 295 for ext in self.distribution.ext_modules:
296 296 if ext.name.startswith("mercurial."):
297 297 self.py_modules.append("mercurial.pure.%s" % ext.name[10:])
298 298 self.distribution.ext_modules = []
299 299 else:
300 300 if not os.path.exists(os.path.join(get_python_inc(), 'Python.h')):
301 301 raise SystemExit('Python headers are required to build '
302 302 'Mercurial')
303 303
304 304 def find_modules(self):
305 305 modules = build_py.find_modules(self)
306 306 for module in modules:
307 307 if module[0] == "mercurial.pure":
308 308 if module[1] != "__init__":
309 309 yield ("mercurial", module[1], module[2])
310 310 else:
311 311 yield module
312 312
313 313 class buildhgextindex(Command):
314 314 description = 'generate prebuilt index of hgext (for frozen package)'
315 315 user_options = []
316 316 _indexfilename = 'hgext/__index__.py'
317 317
318 318 def initialize_options(self):
319 319 pass
320 320
321 321 def finalize_options(self):
322 322 pass
323 323
324 324 def run(self):
325 325 if os.path.exists(self._indexfilename):
326 326 os.unlink(self._indexfilename)
327 327
328 328 # here no extension enabled, disabled() lists up everything
329 329 code = ('import pprint; from mercurial import extensions; '
330 330 'pprint.pprint(extensions.disabled())')
331 331 out, err = runcmd([sys.executable, '-c', code], env)
332 332 if err:
333 333 raise DistutilsExecError(err)
334 334
335 335 f = open(self._indexfilename, 'w')
336 336 f.write('# this file is autogenerated by setup.py\n')
337 337 f.write('docs = ')
338 338 f.write(out)
339 339 f.close()
340 340
341 341 class buildhgexe(build_ext):
342 342 description = 'compile hg.exe from mercurial/exewrapper.c'
343 343
344 344 def build_extensions(self):
345 345 if os.name != 'nt':
346 346 return
347 347 if isinstance(self.compiler, HackedMingw32CCompiler):
348 348 self.compiler.compiler_so = self.compiler.compiler # no -mdll
349 349 self.compiler.dll_libraries = [] # no -lmsrvc90
350 350 hv = sys.hexversion
351 351 pythonlib = 'python%d%d' % (hv >> 24, (hv >> 16) & 0xff)
352 352 f = open('mercurial/hgpythonlib.h', 'wb')
353 353 f.write('/* this file is autogenerated by setup.py */\n')
354 354 f.write('#define HGPYTHONLIB "%s"\n' % pythonlib)
355 355 f.close()
356 356 objects = self.compiler.compile(['mercurial/exewrapper.c'],
357 357 output_dir=self.build_temp)
358 358 dir = os.path.dirname(self.get_ext_fullpath('dummy'))
359 359 target = os.path.join(dir, 'hg')
360 360 self.compiler.link_executable(objects, target,
361 361 libraries=[],
362 362 output_dir=self.build_temp)
363 363
364 364 class hginstallscripts(install_scripts):
365 365 '''
366 366 This is a specialization of install_scripts that replaces the @LIBDIR@ with
367 367 the configured directory for modules. If possible, the path is made relative
368 368 to the directory for scripts.
369 369 '''
370 370
371 371 def initialize_options(self):
372 372 install_scripts.initialize_options(self)
373 373
374 374 self.install_lib = None
375 375
376 376 def finalize_options(self):
377 377 install_scripts.finalize_options(self)
378 378 self.set_undefined_options('install',
379 379 ('install_lib', 'install_lib'))
380 380
381 381 def run(self):
382 382 install_scripts.run(self)
383 383
384 384 if (os.path.splitdrive(self.install_dir)[0] !=
385 385 os.path.splitdrive(self.install_lib)[0]):
386 386 # can't make relative paths from one drive to another, so use an
387 387 # absolute path instead
388 388 libdir = self.install_lib
389 389 else:
390 390 common = os.path.commonprefix((self.install_dir, self.install_lib))
391 391 rest = self.install_dir[len(common):]
392 392 uplevel = len([n for n in os.path.split(rest) if n])
393 393
394 394 libdir = uplevel * ('..' + os.sep) + self.install_lib[len(common):]
395 395
396 396 for outfile in self.outfiles:
397 397 fp = open(outfile, 'rb')
398 398 data = fp.read()
399 399 fp.close()
400 400
401 401 # skip binary files
402 402 if b('\0') in data:
403 403 continue
404 404
405 405 data = data.replace('@LIBDIR@', libdir.encode('string_escape'))
406 406 fp = open(outfile, 'wb')
407 407 fp.write(data)
408 408 fp.close()
409 409
410 410 cmdclass = {'build': hgbuild,
411 411 'build_mo': hgbuildmo,
412 412 'build_ext': hgbuildext,
413 413 'build_py': hgbuildpy,
414 414 'build_hgextindex': buildhgextindex,
415 415 'install_scripts': hginstallscripts,
416 416 'build_hgexe': buildhgexe,
417 417 }
418 418
419 419 packages = ['mercurial', 'mercurial.hgweb', 'mercurial.httpclient',
420 420 'hgext', 'hgext.convert', 'hgext.highlight', 'hgext.zeroconf',
421 421 'hgext.largefiles']
422 422
423 423 pymodules = []
424 424
425 425 extmodules = [
426 426 Extension('mercurial.base85', ['mercurial/base85.c']),
427 427 Extension('mercurial.bdiff', ['mercurial/bdiff.c']),
428 428 Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c']),
429 429 Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
430 Extension('mercurial.parsers', ['mercurial/parsers.c',
430 Extension('mercurial.parsers', ['mercurial/dirs.c',
431 'mercurial/parsers.c',
431 432 'mercurial/pathencode.c']),
432 433 ]
433 434
434 435 osutil_ldflags = []
435 436
436 437 if sys.platform == 'darwin':
437 438 osutil_ldflags += ['-framework', 'ApplicationServices']
438 439
439 440 # disable osutil.c under windows + python 2.4 (issue1364)
440 441 if sys.platform == 'win32' and sys.version_info < (2, 5, 0, 'final'):
441 442 pymodules.append('mercurial.pure.osutil')
442 443 else:
443 444 extmodules.append(Extension('mercurial.osutil', ['mercurial/osutil.c'],
444 445 extra_link_args=osutil_ldflags))
445 446
446 447 # the -mno-cygwin option has been deprecated for years
447 448 Mingw32CCompiler = cygwinccompiler.Mingw32CCompiler
448 449
449 450 class HackedMingw32CCompiler(cygwinccompiler.Mingw32CCompiler):
450 451 def __init__(self, *args, **kwargs):
451 452 Mingw32CCompiler.__init__(self, *args, **kwargs)
452 453 for i in 'compiler compiler_so linker_exe linker_so'.split():
453 454 try:
454 455 getattr(self, i).remove('-mno-cygwin')
455 456 except ValueError:
456 457 pass
457 458
458 459 cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler
459 460
460 461 if sys.platform.startswith('linux') and os.uname()[2] > '2.6':
461 462 # The inotify extension is only usable with Linux 2.6 kernels.
462 463 # You also need a reasonably recent C library.
463 464 # In any case, if it fails to build the error will be skipped ('optional').
464 465 cc = new_compiler()
465 466 if hasfunction(cc, 'inotify_add_watch'):
466 467 inotify = Extension('hgext.inotify.linux._inotify',
467 468 ['hgext/inotify/linux/_inotify.c'],
468 469 ['mercurial'])
469 470 inotify.optional = True
470 471 extmodules.append(inotify)
471 472 packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
472 473
473 474 packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo',
474 475 'help/*.txt']}
475 476
476 477 def ordinarypath(p):
477 478 return p and p[0] != '.' and p[-1] != '~'
478 479
479 480 for root in ('templates',):
480 481 for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
481 482 curdir = curdir.split(os.sep, 1)[1]
482 483 dirs[:] = filter(ordinarypath, dirs)
483 484 for f in filter(ordinarypath, files):
484 485 f = os.path.join(curdir, f)
485 486 packagedata['mercurial'].append(f)
486 487
487 488 datafiles = []
488 489 setupversion = version
489 490 extra = {}
490 491
491 492 if py2exeloaded:
492 493 extra['console'] = [
493 494 {'script':'hg',
494 495 'copyright':'Copyright (C) 2005-2010 Matt Mackall and others',
495 496 'product_version':version}]
496 497 # sub command of 'build' because 'py2exe' does not handle sub_commands
497 498 build.sub_commands.insert(0, ('build_hgextindex', None))
498 499
499 500 if os.name == 'nt':
500 501 # Windows binary file versions for exe/dll files must have the
501 502 # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
502 503 setupversion = version.split('+', 1)[0]
503 504
504 505 if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
505 506 # XCode 4.0 dropped support for ppc architecture, which is hardcoded in
506 507 # distutils.sysconfig
507 508 version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[0].splitlines()
508 509 if version:
509 510 version = version[0]
510 511 xcode4 = (version.startswith('Xcode') and
511 512 StrictVersion(version.split()[1]) >= StrictVersion('4.0'))
512 513 else:
513 514 # xcodebuild returns empty on OS X Lion with XCode 4.3 not
514 515 # installed, but instead with only command-line tools. Assume
515 516 # that only happens on >= Lion, thus no PPC support.
516 517 xcode4 = True
517 518
518 519 if xcode4:
519 520 os.environ['ARCHFLAGS'] = ''
520 521
521 522 setup(name='mercurial',
522 523 version=setupversion,
523 524 author='Matt Mackall and many others',
524 525 author_email='mercurial@selenic.com',
525 526 url='http://mercurial.selenic.com/',
526 527 download_url='http://mercurial.selenic.com/release/',
527 528 description=('Fast scalable distributed SCM (revision control, version '
528 529 'control) system'),
529 530 long_description=('Mercurial is a distributed SCM tool written in Python.'
530 531 ' It is used by a number of large projects that require'
531 532 ' fast, reliable distributed revision control, such as '
532 533 'Mozilla.'),
533 534 license='GNU GPLv2 or any later version',
534 535 classifiers=[
535 536 'Development Status :: 6 - Mature',
536 537 'Environment :: Console',
537 538 'Intended Audience :: Developers',
538 539 'Intended Audience :: System Administrators',
539 540 'License :: OSI Approved :: GNU General Public License (GPL)',
540 541 'Natural Language :: Danish',
541 542 'Natural Language :: English',
542 543 'Natural Language :: German',
543 544 'Natural Language :: Italian',
544 545 'Natural Language :: Japanese',
545 546 'Natural Language :: Portuguese (Brazilian)',
546 547 'Operating System :: Microsoft :: Windows',
547 548 'Operating System :: OS Independent',
548 549 'Operating System :: POSIX',
549 550 'Programming Language :: C',
550 551 'Programming Language :: Python',
551 552 'Topic :: Software Development :: Version Control',
552 553 ],
553 554 scripts=scripts,
554 555 packages=packages,
555 556 py_modules=pymodules,
556 557 ext_modules=extmodules,
557 558 data_files=datafiles,
558 559 package_data=packagedata,
559 560 cmdclass=cmdclass,
560 561 distclass=hgdist,
561 562 options=dict(py2exe=dict(packages=['hgext', 'email']),
562 563 bdist_mpkg=dict(zipdist=True,
563 564 license='COPYING',
564 565 readme='contrib/macosx/Readme.html',
565 566 welcome='contrib/macosx/Welcome.html')),
566 567 **extra)
General Comments 0
You need to be logged in to leave comments. Login now