##// END OF EJS Templates
tab/space cleanup
Thomas Arendsen Hein -
r6334:7016f7fb default
parent child Browse files
Show More
@@ -1,608 +1,608 b''
1 1 /*
2 2 * _inotify.c - Python extension interfacing to the Linux inotify subsystem
3 3 *
4 4 * Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
5 5 *
6 6 * This library is free software; you can redistribute it and/or
7 7 * modify it under the terms of version 2.1 of the GNU Lesser General
8 8 * Public License, incorporated herein by reference.
9 9 */
10 10
11 11 #include <Python.h>
12 12 #include <alloca.h>
13 13 #include <sys/inotify.h>
14 14 #include <stdint.h>
15 15 #include <sys/ioctl.h>
16 16 #include <unistd.h>
17 17
18 18 static PyObject *init(PyObject *self, PyObject *args)
19 19 {
20 20 PyObject *ret = NULL;
21 21 int fd = -1;
22
22
23 23 if (!PyArg_ParseTuple(args, ":init"))
24 24 goto bail;
25
25
26 26 Py_BEGIN_ALLOW_THREADS
27 27 fd = inotify_init();
28 28 Py_END_ALLOW_THREADS
29 29
30 30 if (fd == -1) {
31 31 PyErr_SetFromErrno(PyExc_OSError);
32 32 goto bail;
33 33 }
34
34
35 35 ret = PyInt_FromLong(fd);
36 36 if (ret == NULL)
37 37 goto bail;
38 38
39 39 goto done;
40
40
41 41 bail:
42 42 if (fd != -1)
43 43 close(fd);
44 44
45 45 Py_CLEAR(ret);
46
46
47 47 done:
48 48 return ret;
49 49 }
50 50
51 51 PyDoc_STRVAR(
52 52 init_doc,
53 53 "init() -> fd\n"
54 54 "\n"
55 55 "Initialise an inotify instance.\n"
56 56 "Return a file descriptor associated with a new inotify event queue.");
57 57
58 58 static PyObject *add_watch(PyObject *self, PyObject *args)
59 59 {
60 60 PyObject *ret = NULL;
61 61 uint32_t mask;
62 62 int wd = -1;
63 63 char *path;
64 64 int fd;
65 65
66 66 if (!PyArg_ParseTuple(args, "isI:add_watch", &fd, &path, &mask))
67 67 goto bail;
68 68
69 69 Py_BEGIN_ALLOW_THREADS
70 70 wd = inotify_add_watch(fd, path, mask);
71 71 Py_END_ALLOW_THREADS
72 72
73 73 if (wd == -1) {
74 74 PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
75 75 goto bail;
76 76 }
77
77
78 78 ret = PyInt_FromLong(wd);
79 79 if (ret == NULL)
80 80 goto bail;
81
81
82 82 goto done;
83
83
84 84 bail:
85 85 if (wd != -1)
86 86 inotify_rm_watch(fd, wd);
87
87
88 88 Py_CLEAR(ret);
89 89
90 90 done:
91 91 return ret;
92 92 }
93 93
94 94 PyDoc_STRVAR(
95 95 add_watch_doc,
96 96 "add_watch(fd, path, mask) -> wd\n"
97 97 "\n"
98 98 "Add a watch to an inotify instance, or modify an existing watch.\n"
99 99 "\n"
100 100 " fd: file descriptor returned by init()\n"
101 101 " path: path to watch\n"
102 102 " mask: mask of events to watch for\n"
103 103 "\n"
104 104 "Return a unique numeric watch descriptor for the inotify instance\n"
105 105 "mapped by the file descriptor.");
106 106
107 107 static PyObject *remove_watch(PyObject *self, PyObject *args)
108 108 {
109 109 PyObject *ret = NULL;
110 110 uint32_t wd;
111 111 int fd;
112 112 int r;
113
113
114 114 if (!PyArg_ParseTuple(args, "iI:remove_watch", &fd, &wd))
115 115 goto bail;
116 116
117 117 Py_BEGIN_ALLOW_THREADS
118 118 r = inotify_rm_watch(fd, wd);
119 119 Py_END_ALLOW_THREADS
120 120
121 121 if (r == -1) {
122 122 PyErr_SetFromErrno(PyExc_OSError);
123 123 goto bail;
124 124 }
125
125
126 126 Py_INCREF(Py_None);
127
127
128 128 goto done;
129
129
130 130 bail:
131 131 Py_CLEAR(ret);
132
132
133 133 done:
134 134 return ret;
135 135 }
136 136
137 137 PyDoc_STRVAR(
138 138 remove_watch_doc,
139 139 "remove_watch(fd, wd)\n"
140 140 "\n"
141 141 " fd: file descriptor returned by init()\n"
142 142 " wd: watch descriptor returned by add_watch()\n"
143 143 "\n"
144 144 "Remove a watch associated with the watch descriptor wd from the\n"
145 145 "inotify instance associated with the file descriptor fd.\n"
146 146 "\n"
147 147 "Removing a watch causes an IN_IGNORED event to be generated for this\n"
148 148 "watch descriptor.");
149 149
150 150 #define bit_name(x) {x, #x}
151 151
152 152 static struct {
153 153 int bit;
154 154 const char *name;
155 155 PyObject *pyname;
156 156 } bit_names[] = {
157 157 bit_name(IN_ACCESS),
158 158 bit_name(IN_MODIFY),
159 159 bit_name(IN_ATTRIB),
160 160 bit_name(IN_CLOSE_WRITE),
161 161 bit_name(IN_CLOSE_NOWRITE),
162 162 bit_name(IN_OPEN),
163 163 bit_name(IN_MOVED_FROM),
164 164 bit_name(IN_MOVED_TO),
165 165 bit_name(IN_CREATE),
166 166 bit_name(IN_DELETE),
167 167 bit_name(IN_DELETE_SELF),
168 168 bit_name(IN_MOVE_SELF),
169 169 bit_name(IN_UNMOUNT),
170 170 bit_name(IN_Q_OVERFLOW),
171 171 bit_name(IN_IGNORED),
172 172 bit_name(IN_ONLYDIR),
173 173 bit_name(IN_DONT_FOLLOW),
174 174 bit_name(IN_MASK_ADD),
175 175 bit_name(IN_ISDIR),
176 176 bit_name(IN_ONESHOT),
177 177 {0}
178 178 };
179 179
180 180 static PyObject *decode_mask(int mask)
181 181 {
182 182 PyObject *ret = PyList_New(0);
183 183 int i;
184 184
185 185 if (ret == NULL)
186 186 goto bail;
187
187
188 188 for (i = 0; bit_names[i].bit; i++) {
189 189 if (mask & bit_names[i].bit) {
190 190 if (bit_names[i].pyname == NULL) {
191 191 bit_names[i].pyname = PyString_FromString(bit_names[i].name);
192 192 if (bit_names[i].pyname == NULL)
193 193 goto bail;
194 194 }
195 195 Py_INCREF(bit_names[i].pyname);
196 196 if (PyList_Append(ret, bit_names[i].pyname) == -1)
197 197 goto bail;
198 198 }
199 199 }
200
200
201 201 goto done;
202
202
203 203 bail:
204 204 Py_CLEAR(ret);
205 205
206 206 done:
207 207 return ret;
208 208 }
209
209
210 210 static PyObject *pydecode_mask(PyObject *self, PyObject *args)
211 211 {
212 212 int mask;
213
213
214 214 if (!PyArg_ParseTuple(args, "i:decode_mask", &mask))
215 215 return NULL;
216 216
217 217 return decode_mask(mask);
218 218 }
219
219
220 220 PyDoc_STRVAR(
221 221 decode_mask_doc,
222 222 "decode_mask(mask) -> list_of_strings\n"
223 223 "\n"
224 224 "Decode an inotify mask value into a list of strings that give the\n"
225 225 "name of each bit set in the mask.");
226 226
227 227 static char doc[] = "Low-level inotify interface wrappers.";
228 228
229 229 static void define_const(PyObject *dict, const char *name, uint32_t val)
230 230 {
231 231 PyObject *pyval = PyInt_FromLong(val);
232 232 PyObject *pyname = PyString_FromString(name);
233 233
234 234 if (!pyname || !pyval)
235 235 goto bail;
236
236
237 237 PyDict_SetItem(dict, pyname, pyval);
238 238
239 239 bail:
240 240 Py_XDECREF(pyname);
241 241 Py_XDECREF(pyval);
242 242 }
243 243
244 244 static void define_consts(PyObject *dict)
245 245 {
246 246 define_const(dict, "IN_ACCESS", IN_ACCESS);
247 247 define_const(dict, "IN_MODIFY", IN_MODIFY);
248 248 define_const(dict, "IN_ATTRIB", IN_ATTRIB);
249 249 define_const(dict, "IN_CLOSE_WRITE", IN_CLOSE_WRITE);
250 250 define_const(dict, "IN_CLOSE_NOWRITE", IN_CLOSE_NOWRITE);
251 251 define_const(dict, "IN_OPEN", IN_OPEN);
252 252 define_const(dict, "IN_MOVED_FROM", IN_MOVED_FROM);
253 253 define_const(dict, "IN_MOVED_TO", IN_MOVED_TO);
254 254
255 255 define_const(dict, "IN_CLOSE", IN_CLOSE);
256 256 define_const(dict, "IN_MOVE", IN_MOVE);
257 257
258 258 define_const(dict, "IN_CREATE", IN_CREATE);
259 259 define_const(dict, "IN_DELETE", IN_DELETE);
260 260 define_const(dict, "IN_DELETE_SELF", IN_DELETE_SELF);
261 261 define_const(dict, "IN_MOVE_SELF", IN_MOVE_SELF);
262 262 define_const(dict, "IN_UNMOUNT", IN_UNMOUNT);
263 263 define_const(dict, "IN_Q_OVERFLOW", IN_Q_OVERFLOW);
264 264 define_const(dict, "IN_IGNORED", IN_IGNORED);
265 265
266 266 define_const(dict, "IN_ONLYDIR", IN_ONLYDIR);
267 267 define_const(dict, "IN_DONT_FOLLOW", IN_DONT_FOLLOW);
268 268 define_const(dict, "IN_MASK_ADD", IN_MASK_ADD);
269 269 define_const(dict, "IN_ISDIR", IN_ISDIR);
270 270 define_const(dict, "IN_ONESHOT", IN_ONESHOT);
271 271 define_const(dict, "IN_ALL_EVENTS", IN_ALL_EVENTS);
272 272 }
273 273
274 274 struct event {
275 275 PyObject_HEAD
276 276 PyObject *wd;
277 277 PyObject *mask;
278 278 PyObject *cookie;
279 279 PyObject *name;
280 280 };
281
281
282 282 static PyObject *event_wd(PyObject *self, void *x)
283 283 {
284 284 struct event *evt = (struct event *) self;
285 285 Py_INCREF(evt->wd);
286 286 return evt->wd;
287 287 }
288
288
289 289 static PyObject *event_mask(PyObject *self, void *x)
290 290 {
291 291 struct event *evt = (struct event *) self;
292 292 Py_INCREF(evt->mask);
293 293 return evt->mask;
294 294 }
295
295
296 296 static PyObject *event_cookie(PyObject *self, void *x)
297 297 {
298 298 struct event *evt = (struct event *) self;
299 299 Py_INCREF(evt->cookie);
300 300 return evt->cookie;
301 301 }
302
302
303 303 static PyObject *event_name(PyObject *self, void *x)
304 304 {
305 305 struct event *evt = (struct event *) self;
306 306 Py_INCREF(evt->name);
307 307 return evt->name;
308 308 }
309 309
310 310 static struct PyGetSetDef event_getsets[] = {
311 311 {"wd", event_wd, NULL,
312 312 "watch descriptor"},
313 313 {"mask", event_mask, NULL,
314 314 "event mask"},
315 315 {"cookie", event_cookie, NULL,
316 316 "rename cookie, if rename-related event"},
317 317 {"name", event_name, NULL,
318 318 "file name"},
319 319 {NULL}
320 320 };
321 321
322 322 PyDoc_STRVAR(
323 323 event_doc,
324 324 "event: Structure describing an inotify event.");
325 325
326 326 static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k)
327 327 {
328 328 return (*t->tp_alloc)(t, 0);
329 329 }
330 330
331 331 static void event_dealloc(struct event *evt)
332 332 {
333 333 Py_XDECREF(evt->wd);
334 334 Py_XDECREF(evt->mask);
335 335 Py_XDECREF(evt->cookie);
336 336 Py_XDECREF(evt->name);
337
337
338 338 (*evt->ob_type->tp_free)(evt);
339 339 }
340 340
341 341 static PyObject *event_repr(struct event *evt)
342 342 {
343 343 int wd = PyInt_AsLong(evt->wd);
344 344 int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie);
345 345 PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL;
346 346 PyObject *join = NULL;
347 347 char *maskstr;
348 348
349 349 join = PyString_FromString("|");
350 350 if (join == NULL)
351 351 goto bail;
352 352
353 353 pymasks = decode_mask(PyInt_AsLong(evt->mask));
354 354 if (pymasks == NULL)
355 355 goto bail;
356
356
357 357 pymask = _PyString_Join(join, pymasks);
358 358 if (pymask == NULL)
359 359 goto bail;
360
360
361 361 maskstr = PyString_AsString(pymask);
362
362
363 363 if (evt->name != Py_None) {
364 364 PyObject *pyname = PyString_Repr(evt->name, 1);
365 365 char *name = pyname ? PyString_AsString(pyname) : "???";
366
366
367 367 if (cookie == -1)
368 368 ret = PyString_FromFormat("event(wd=%d, mask=%s, name=%s)",
369 369 wd, maskstr, name);
370 370 else
371 371 ret = PyString_FromFormat("event(wd=%d, mask=%s, "
372 372 "cookie=0x%x, name=%s)",
373 373 wd, maskstr, cookie, name);
374 374
375 375 Py_XDECREF(pyname);
376 376 } else {
377 377 if (cookie == -1)
378 378 ret = PyString_FromFormat("event(wd=%d, mask=%s)",
379 379 wd, maskstr);
380 380 else {
381 381 ret = PyString_FromFormat("event(wd=%d, mask=%s, cookie=0x%x)",
382 382 wd, maskstr, cookie);
383 383 }
384 384 }
385 385
386 386 goto done;
387 387 bail:
388 388 Py_CLEAR(ret);
389
389
390 390 done:
391 391 Py_XDECREF(pymask);
392 392 Py_XDECREF(pymasks);
393 393 Py_XDECREF(join);
394 394
395 395 return ret;
396 396 }
397 397
398 398 static PyTypeObject event_type = {
399 399 PyObject_HEAD_INIT(NULL)
400 400 0, /*ob_size*/
401 401 "_inotify.event", /*tp_name*/
402 402 sizeof(struct event), /*tp_basicsize*/
403 403 0, /*tp_itemsize*/
404 404 (destructor)event_dealloc, /*tp_dealloc*/
405 405 0, /*tp_print*/
406 406 0, /*tp_getattr*/
407 407 0, /*tp_setattr*/
408 408 0, /*tp_compare*/
409 409 (reprfunc)event_repr, /*tp_repr*/
410 410 0, /*tp_as_number*/
411 411 0, /*tp_as_sequence*/
412 412 0, /*tp_as_mapping*/
413 413 0, /*tp_hash */
414 414 0, /*tp_call*/
415 415 0, /*tp_str*/
416 416 0, /*tp_getattro*/
417 417 0, /*tp_setattro*/
418 418 0, /*tp_as_buffer*/
419 419 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
420 420 event_doc, /* tp_doc */
421 421 0, /* tp_traverse */
422 422 0, /* tp_clear */
423 423 0, /* tp_richcompare */
424 424 0, /* tp_weaklistoffset */
425 425 0, /* tp_iter */
426 426 0, /* tp_iternext */
427 427 0, /* tp_methods */
428 428 0, /* tp_members */
429 429 event_getsets, /* tp_getset */
430 430 0, /* tp_base */
431 431 0, /* tp_dict */
432 432 0, /* tp_descr_get */
433 433 0, /* tp_descr_set */
434 434 0, /* tp_dictoffset */
435 435 0, /* tp_init */
436 436 0, /* tp_alloc */
437 437 event_new, /* tp_new */
438 438 };
439
439
440 440 PyObject *read_events(PyObject *self, PyObject *args)
441 441 {
442 442 PyObject *ctor_args = NULL;
443 443 PyObject *pybufsize = NULL;
444 444 PyObject *ret = NULL;
445 445 int bufsize = 65536;
446 446 char *buf = NULL;
447 447 int nread, pos;
448 448 int fd;
449 449
450 450 if (!PyArg_ParseTuple(args, "i|O:read", &fd, &pybufsize))
451 goto bail;
451 goto bail;
452 452
453 453 if (pybufsize && pybufsize != Py_None)
454 454 bufsize = PyInt_AsLong(pybufsize);
455
455
456 456 ret = PyList_New(0);
457 457 if (ret == NULL)
458 458 goto bail;
459
459
460 460 if (bufsize <= 0) {
461 461 int r;
462
462
463 463 Py_BEGIN_ALLOW_THREADS
464 464 r = ioctl(fd, FIONREAD, &bufsize);
465 465 Py_END_ALLOW_THREADS
466
466
467 467 if (r == -1) {
468 468 PyErr_SetFromErrno(PyExc_OSError);
469 469 goto bail;
470 470 }
471 471 if (bufsize == 0)
472 472 goto done;
473 473 }
474 474 else {
475 475 static long name_max;
476 476 static long name_fd = -1;
477 477 long min;
478
478
479 479 if (name_fd != fd) {
480 480 name_fd = fd;
481 481 Py_BEGIN_ALLOW_THREADS
482 482 name_max = fpathconf(fd, _PC_NAME_MAX);
483 483 Py_END_ALLOW_THREADS
484 484 }
485
485
486 486 min = sizeof(struct inotify_event) + name_max + 1;
487
487
488 488 if (bufsize < min) {
489 489 PyErr_Format(PyExc_ValueError, "bufsize must be at least %d",
490 490 (int) min);
491 491 goto bail;
492 492 }
493 493 }
494 494
495 495 buf = alloca(bufsize);
496
496
497 497 Py_BEGIN_ALLOW_THREADS
498 498 nread = read(fd, buf, bufsize);
499 499 Py_END_ALLOW_THREADS
500 500
501 501 if (nread == -1) {
502 502 PyErr_SetFromErrno(PyExc_OSError);
503 503 goto bail;
504 504 }
505 505
506 506 ctor_args = PyTuple_New(0);
507 507
508 508 if (ctor_args == NULL)
509 509 goto bail;
510
510
511 511 pos = 0;
512
512
513 513 while (pos < nread) {
514 514 struct inotify_event *in = (struct inotify_event *) (buf + pos);
515 515 struct event *evt;
516 516 PyObject *obj;
517 517
518 518 obj = PyObject_CallObject((PyObject *) &event_type, ctor_args);
519 519
520 520 if (obj == NULL)
521 521 goto bail;
522 522
523 523 evt = (struct event *) obj;
524 524
525 525 evt->wd = PyInt_FromLong(in->wd);
526 526 evt->mask = PyInt_FromLong(in->mask);
527 527 if (in->mask & IN_MOVE)
528 528 evt->cookie = PyInt_FromLong(in->cookie);
529 529 else {
530 530 Py_INCREF(Py_None);
531 531 evt->cookie = Py_None;
532 532 }
533 533 if (in->len)
534 534 evt->name = PyString_FromString(in->name);
535 535 else {
536 536 Py_INCREF(Py_None);
537 537 evt->name = Py_None;
538 538 }
539 539
540 540 if (!evt->wd || !evt->mask || !evt->cookie || !evt->name)
541 541 goto mybail;
542 542
543 543 if (PyList_Append(ret, obj) == -1)
544 544 goto mybail;
545 545
546 546 pos += sizeof(struct inotify_event) + in->len;
547 547 continue;
548 548
549 549 mybail:
550 550 Py_CLEAR(evt->wd);
551 551 Py_CLEAR(evt->mask);
552 552 Py_CLEAR(evt->cookie);
553 553 Py_CLEAR(evt->name);
554 554 Py_DECREF(obj);
555 555
556 556 goto bail;
557 557 }
558
558
559 559 goto done;
560 560
561 561 bail:
562 562 Py_CLEAR(ret);
563
563
564 564 done:
565 565 Py_XDECREF(ctor_args);
566 566
567 567 return ret;
568 568 }
569 569
570 570 PyDoc_STRVAR(
571 571 read_doc,
572 572 "read(fd, bufsize[=65536]) -> list_of_events\n"
573 573 "\n"
574 574 "\nRead inotify events from a file descriptor.\n"
575 575 "\n"
576 576 " fd: file descriptor returned by init()\n"
577 577 " bufsize: size of buffer to read into, in bytes\n"
578 578 "\n"
579 579 "Return a list of event objects.\n"
580 580 "\n"
581 581 "If bufsize is > 0, block until events are available to be read.\n"
582 582 "Otherwise, immediately return all events that can be read without\n"
583 583 "blocking.");
584 584
585 585
586 586 static PyMethodDef methods[] = {
587 587 {"init", init, METH_VARARGS, init_doc},
588 588 {"add_watch", add_watch, METH_VARARGS, add_watch_doc},
589 589 {"remove_watch", remove_watch, METH_VARARGS, remove_watch_doc},
590 590 {"read", read_events, METH_VARARGS, read_doc},
591 591 {"decode_mask", pydecode_mask, METH_VARARGS, decode_mask_doc},
592 592 {NULL},
593 593 };
594 594
595 595 void init_inotify(void)
596 596 {
597 597 PyObject *mod, *dict;
598 598
599 599 if (PyType_Ready(&event_type) == -1)
600 return;
600 return;
601 601
602 602 mod = Py_InitModule3("_inotify", methods, doc);
603 603
604 604 dict = PyModule_GetDict(mod);
605
605
606 606 if (dict)
607 607 define_consts(dict);
608 608 }
@@ -1,2133 +1,2133 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71
72 72 try:
73 73 # files in .hg/ will be created using this mode
74 74 mode = os.stat(self.spath).st_mode
75 75 # avoid some useless chmods
76 76 if (0777 & ~util._umask) == (0777 & mode):
77 77 mode = None
78 78 except OSError:
79 79 mode = None
80 80
81 81 self._createmode = mode
82 82 self.opener.createmode = mode
83 83 sopener = util.opener(self.spath)
84 84 sopener.createmode = mode
85 85 self.sopener = util.encodedopener(sopener, self.encodefn)
86 86
87 87 self.ui = ui.ui(parentui=parentui)
88 88 try:
89 89 self.ui.readconfig(self.join("hgrc"), self.root)
90 90 extensions.loadall(self.ui)
91 91 except IOError:
92 92 pass
93 93
94 94 self.tagscache = None
95 95 self._tagstypecache = None
96 96 self.branchcache = None
97 97 self._ubranchcache = None # UTF-8 version of branchcache
98 98 self._branchcachetip = None
99 99 self.nodetagscache = None
100 100 self.filterpats = {}
101 101 self._datafilters = {}
102 102 self._transref = self._lockref = self._wlockref = None
103 103
104 104 def __getattr__(self, name):
105 105 if name == 'changelog':
106 106 self.changelog = changelog.changelog(self.sopener)
107 107 self.sopener.defversion = self.changelog.version
108 108 return self.changelog
109 109 if name == 'manifest':
110 110 self.changelog
111 111 self.manifest = manifest.manifest(self.sopener)
112 112 return self.manifest
113 113 if name == 'dirstate':
114 114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 115 return self.dirstate
116 116 else:
117 117 raise AttributeError, name
118 118
119 119 def url(self):
120 120 return 'file:' + self.root
121 121
122 122 def hook(self, name, throw=False, **args):
123 123 return hook.hook(self.ui, self, name, throw, **args)
124 124
125 125 tag_disallowed = ':\r\n'
126 126
127 127 def _tag(self, names, node, message, local, user, date, parent=None,
128 128 extra={}):
129 129 use_dirstate = parent is None
130 130
131 131 if isinstance(names, str):
132 132 allchars = names
133 133 names = (names,)
134 134 else:
135 135 allchars = ''.join(names)
136 136 for c in self.tag_disallowed:
137 137 if c in allchars:
138 138 raise util.Abort(_('%r cannot be used in a tag name') % c)
139 139
140 140 for name in names:
141 141 self.hook('pretag', throw=True, node=hex(node), tag=name,
142 142 local=local)
143 143
144 144 def writetags(fp, names, munge, prevtags):
145 145 fp.seek(0, 2)
146 146 if prevtags and prevtags[-1] != '\n':
147 147 fp.write('\n')
148 148 for name in names:
149 149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
150 150 fp.close()
151 151
152 152 prevtags = ''
153 153 if local:
154 154 try:
155 155 fp = self.opener('localtags', 'r+')
156 156 except IOError, err:
157 157 fp = self.opener('localtags', 'a')
158 158 else:
159 159 prevtags = fp.read()
160 160
161 161 # local tags are stored in the current charset
162 162 writetags(fp, names, None, prevtags)
163 163 for name in names:
164 164 self.hook('tag', node=hex(node), tag=name, local=local)
165 165 return
166 166
167 167 if use_dirstate:
168 168 try:
169 169 fp = self.wfile('.hgtags', 'rb+')
170 170 except IOError, err:
171 171 fp = self.wfile('.hgtags', 'ab')
172 172 else:
173 173 prevtags = fp.read()
174 174 else:
175 175 try:
176 176 prevtags = self.filectx('.hgtags', parent).data()
177 177 except revlog.LookupError:
178 178 pass
179 179 fp = self.wfile('.hgtags', 'wb')
180 180 if prevtags:
181 181 fp.write(prevtags)
182 182
183 183 # committed tags are stored in UTF-8
184 184 writetags(fp, names, util.fromlocal, prevtags)
185 185
186 186 if use_dirstate and '.hgtags' not in self.dirstate:
187 187 self.add(['.hgtags'])
188 188
189 189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 190 extra=extra)
191 191
192 192 for name in names:
193 193 self.hook('tag', node=hex(node), tag=name, local=local)
194 194
195 195 return tagnode
196 196
197 197 def tag(self, names, node, message, local, user, date):
198 198 '''tag a revision with one or more symbolic names.
199 199
200 200 names is a list of strings or, when adding a single tag, names may be a
201 201 string.
202
202
203 203 if local is True, the tags are stored in a per-repository file.
204 204 otherwise, they are stored in the .hgtags file, and a new
205 205 changeset is committed with the change.
206 206
207 207 keyword arguments:
208 208
209 209 local: whether to store tags in non-version-controlled file
210 210 (default False)
211 211
212 212 message: commit message to use if committing
213 213
214 214 user: name of user to use if committing
215 215
216 216 date: date tuple to use if committing'''
217 217
218 218 for x in self.status()[:5]:
219 219 if '.hgtags' in x:
220 220 raise util.Abort(_('working copy of .hgtags is changed '
221 221 '(please commit .hgtags manually)'))
222 222
223 223 self._tag(names, node, message, local, user, date)
224 224
225 225 def tags(self):
226 226 '''return a mapping of tag to node'''
227 227 if self.tagscache:
228 228 return self.tagscache
229 229
230 230 globaltags = {}
231 231 tagtypes = {}
232 232
233 233 def readtags(lines, fn, tagtype):
234 234 filetags = {}
235 235 count = 0
236 236
237 237 def warn(msg):
238 238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239 239
240 240 for l in lines:
241 241 count += 1
242 242 if not l:
243 243 continue
244 244 s = l.split(" ", 1)
245 245 if len(s) != 2:
246 246 warn(_("cannot parse entry"))
247 247 continue
248 248 node, key = s
249 249 key = util.tolocal(key.strip()) # stored in UTF-8
250 250 try:
251 251 bin_n = bin(node)
252 252 except TypeError:
253 253 warn(_("node '%s' is not well formed") % node)
254 254 continue
255 255 if bin_n not in self.changelog.nodemap:
256 256 warn(_("tag '%s' refers to unknown node") % key)
257 257 continue
258 258
259 259 h = []
260 260 if key in filetags:
261 261 n, h = filetags[key]
262 262 h.append(n)
263 263 filetags[key] = (bin_n, h)
264 264
265 265 for k, nh in filetags.items():
266 266 if k not in globaltags:
267 267 globaltags[k] = nh
268 268 tagtypes[k] = tagtype
269 269 continue
270 270
271 271 # we prefer the global tag if:
272 272 # it supercedes us OR
273 273 # mutual supercedes and it has a higher rank
274 274 # otherwise we win because we're tip-most
275 275 an, ah = nh
276 276 bn, bh = globaltags[k]
277 277 if (bn != an and an in bh and
278 278 (bn not in ah or len(bh) > len(ah))):
279 279 an = bn
280 280 ah.extend([n for n in bh if n not in ah])
281 281 globaltags[k] = an, ah
282 282 tagtypes[k] = tagtype
283 283
284 284 # read the tags file from each head, ending with the tip
285 285 f = None
286 286 for rev, node, fnode in self._hgtagsnodes():
287 287 f = (f and f.filectx(fnode) or
288 288 self.filectx('.hgtags', fileid=fnode))
289 289 readtags(f.data().splitlines(), f, "global")
290 290
291 291 try:
292 292 data = util.fromlocal(self.opener("localtags").read())
293 293 # localtags are stored in the local character set
294 294 # while the internal tag table is stored in UTF-8
295 295 readtags(data.splitlines(), "localtags", "local")
296 296 except IOError:
297 297 pass
298 298
299 299 self.tagscache = {}
300 300 self._tagstypecache = {}
301 301 for k,nh in globaltags.items():
302 302 n = nh[0]
303 303 if n != nullid:
304 304 self.tagscache[k] = n
305 305 self._tagstypecache[k] = tagtypes[k]
306 306 self.tagscache['tip'] = self.changelog.tip()
307 307
308 308 return self.tagscache
309 309
310 310 def tagtype(self, tagname):
311 311 '''
312 312 return the type of the given tag. result can be:
313 313
314 314 'local' : a local tag
315 315 'global' : a global tag
316 316 None : tag does not exist
317 317 '''
318 318
319 319 self.tags()
320 320
321 321 return self._tagstypecache.get(tagname)
322 322
323 323 def _hgtagsnodes(self):
324 324 heads = self.heads()
325 325 heads.reverse()
326 326 last = {}
327 327 ret = []
328 328 for node in heads:
329 329 c = self.changectx(node)
330 330 rev = c.rev()
331 331 try:
332 332 fnode = c.filenode('.hgtags')
333 333 except revlog.LookupError:
334 334 continue
335 335 ret.append((rev, node, fnode))
336 336 if fnode in last:
337 337 ret[last[fnode]] = None
338 338 last[fnode] = len(ret) - 1
339 339 return [item for item in ret if item]
340 340
341 341 def tagslist(self):
342 342 '''return a list of tags ordered by revision'''
343 343 l = []
344 344 for t, n in self.tags().items():
345 345 try:
346 346 r = self.changelog.rev(n)
347 347 except:
348 348 r = -2 # sort to the beginning of the list if unknown
349 349 l.append((r, t, n))
350 350 l.sort()
351 351 return [(t, n) for r, t, n in l]
352 352
353 353 def nodetags(self, node):
354 354 '''return the tags associated with a node'''
355 355 if not self.nodetagscache:
356 356 self.nodetagscache = {}
357 357 for t, n in self.tags().items():
358 358 self.nodetagscache.setdefault(n, []).append(t)
359 359 return self.nodetagscache.get(node, [])
360 360
361 361 def _branchtags(self, partial, lrev):
362 362 tiprev = self.changelog.count() - 1
363 363 if lrev != tiprev:
364 364 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 366
367 367 return partial
368 368
369 369 def branchtags(self):
370 370 tip = self.changelog.tip()
371 371 if self.branchcache is not None and self._branchcachetip == tip:
372 372 return self.branchcache
373 373
374 374 oldtip = self._branchcachetip
375 375 self._branchcachetip = tip
376 376 if self.branchcache is None:
377 377 self.branchcache = {} # avoid recursion in changectx
378 378 else:
379 379 self.branchcache.clear() # keep using the same dict
380 380 if oldtip is None or oldtip not in self.changelog.nodemap:
381 381 partial, last, lrev = self._readbranchcache()
382 382 else:
383 383 lrev = self.changelog.rev(oldtip)
384 384 partial = self._ubranchcache
385 385
386 386 self._branchtags(partial, lrev)
387 387
388 388 # the branch cache is stored on disk as UTF-8, but in the local
389 389 # charset internally
390 390 for k, v in partial.items():
391 391 self.branchcache[util.tolocal(k)] = v
392 392 self._ubranchcache = partial
393 393 return self.branchcache
394 394
395 395 def _readbranchcache(self):
396 396 partial = {}
397 397 try:
398 398 f = self.opener("branch.cache")
399 399 lines = f.read().split('\n')
400 400 f.close()
401 401 except (IOError, OSError):
402 402 return {}, nullid, nullrev
403 403
404 404 try:
405 405 last, lrev = lines.pop(0).split(" ", 1)
406 406 last, lrev = bin(last), int(lrev)
407 407 if not (lrev < self.changelog.count() and
408 408 self.changelog.node(lrev) == last): # sanity check
409 409 # invalidate the cache
410 410 raise ValueError('invalidating branch cache (tip differs)')
411 411 for l in lines:
412 412 if not l: continue
413 413 node, label = l.split(" ", 1)
414 414 partial[label.strip()] = bin(node)
415 415 except (KeyboardInterrupt, util.SignalInterrupt):
416 416 raise
417 417 except Exception, inst:
418 418 if self.ui.debugflag:
419 419 self.ui.warn(str(inst), '\n')
420 420 partial, last, lrev = {}, nullid, nullrev
421 421 return partial, last, lrev
422 422
423 423 def _writebranchcache(self, branches, tip, tiprev):
424 424 try:
425 425 f = self.opener("branch.cache", "w", atomictemp=True)
426 426 f.write("%s %s\n" % (hex(tip), tiprev))
427 427 for label, node in branches.iteritems():
428 428 f.write("%s %s\n" % (hex(node), label))
429 429 f.rename()
430 430 except (IOError, OSError):
431 431 pass
432 432
433 433 def _updatebranchcache(self, partial, start, end):
434 434 for r in xrange(start, end):
435 435 c = self.changectx(r)
436 436 b = c.branch()
437 437 partial[b] = c.node()
438 438
439 439 def lookup(self, key):
440 440 if key == '.':
441 441 key, second = self.dirstate.parents()
442 442 if key == nullid:
443 443 raise repo.RepoError(_("no revision checked out"))
444 444 if second != nullid:
445 445 self.ui.warn(_("warning: working directory has two parents, "
446 446 "tag '.' uses the first\n"))
447 447 elif key == 'null':
448 448 return nullid
449 449 n = self.changelog._match(key)
450 450 if n:
451 451 return n
452 452 if key in self.tags():
453 453 return self.tags()[key]
454 454 if key in self.branchtags():
455 455 return self.branchtags()[key]
456 456 n = self.changelog._partialmatch(key)
457 457 if n:
458 458 return n
459 459 try:
460 460 if len(key) == 20:
461 461 key = hex(key)
462 462 except:
463 463 pass
464 464 raise repo.RepoError(_("unknown revision '%s'") % key)
465 465
466 466 def local(self):
467 467 return True
468 468
469 469 def join(self, f):
470 470 return os.path.join(self.path, f)
471 471
472 472 def sjoin(self, f):
473 473 f = self.encodefn(f)
474 474 return os.path.join(self.spath, f)
475 475
476 476 def wjoin(self, f):
477 477 return os.path.join(self.root, f)
478 478
479 479 def file(self, f):
480 480 if f[0] == '/':
481 481 f = f[1:]
482 482 return filelog.filelog(self.sopener, f)
483 483
484 484 def changectx(self, changeid=None):
485 485 return context.changectx(self, changeid)
486 486
487 487 def workingctx(self):
488 488 return context.workingctx(self)
489 489
490 490 def parents(self, changeid=None):
491 491 '''
492 492 get list of changectxs for parents of changeid or working directory
493 493 '''
494 494 if changeid is None:
495 495 pl = self.dirstate.parents()
496 496 else:
497 497 n = self.changelog.lookup(changeid)
498 498 pl = self.changelog.parents(n)
499 499 if pl[1] == nullid:
500 500 return [self.changectx(pl[0])]
501 501 return [self.changectx(pl[0]), self.changectx(pl[1])]
502 502
503 503 def filectx(self, path, changeid=None, fileid=None):
504 504 """changeid can be a changeset revision, node, or tag.
505 505 fileid can be a file revision or node."""
506 506 return context.filectx(self, path, changeid, fileid)
507 507
508 508 def getcwd(self):
509 509 return self.dirstate.getcwd()
510 510
511 511 def pathto(self, f, cwd=None):
512 512 return self.dirstate.pathto(f, cwd)
513 513
514 514 def wfile(self, f, mode='r'):
515 515 return self.wopener(f, mode)
516 516
517 517 def _link(self, f):
518 518 return os.path.islink(self.wjoin(f))
519 519
520 520 def _filter(self, filter, filename, data):
521 521 if filter not in self.filterpats:
522 522 l = []
523 523 for pat, cmd in self.ui.configitems(filter):
524 524 mf = util.matcher(self.root, "", [pat], [], [])[1]
525 525 fn = None
526 526 params = cmd
527 527 for name, filterfn in self._datafilters.iteritems():
528 528 if cmd.startswith(name):
529 529 fn = filterfn
530 530 params = cmd[len(name):].lstrip()
531 531 break
532 532 if not fn:
533 533 fn = lambda s, c, **kwargs: util.filter(s, c)
534 534 # Wrap old filters not supporting keyword arguments
535 535 if not inspect.getargspec(fn)[2]:
536 536 oldfn = fn
537 537 fn = lambda s, c, **kwargs: oldfn(s, c)
538 538 l.append((mf, fn, params))
539 539 self.filterpats[filter] = l
540 540
541 541 for mf, fn, cmd in self.filterpats[filter]:
542 542 if mf(filename):
543 543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
544 544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
545 545 break
546 546
547 547 return data
548 548
549 549 def adddatafilter(self, name, filter):
550 550 self._datafilters[name] = filter
551 551
552 552 def wread(self, filename):
553 553 if self._link(filename):
554 554 data = os.readlink(self.wjoin(filename))
555 555 else:
556 556 data = self.wopener(filename, 'r').read()
557 557 return self._filter("encode", filename, data)
558 558
559 559 def wwrite(self, filename, data, flags):
560 560 data = self._filter("decode", filename, data)
561 561 try:
562 562 os.unlink(self.wjoin(filename))
563 563 except OSError:
564 564 pass
565 565 self.wopener(filename, 'w').write(data)
566 566 util.set_flags(self.wjoin(filename), flags)
567 567
568 568 def wwritedata(self, filename, data):
569 569 return self._filter("decode", filename, data)
570 570
571 571 def transaction(self):
572 572 if self._transref and self._transref():
573 573 return self._transref().nest()
574 574
575 575 # abort here if the journal already exists
576 576 if os.path.exists(self.sjoin("journal")):
577 577 raise repo.RepoError(_("journal already exists - run hg recover"))
578 578
579 579 # save dirstate for rollback
580 580 try:
581 581 ds = self.opener("dirstate").read()
582 582 except IOError:
583 583 ds = ""
584 584 self.opener("journal.dirstate", "w").write(ds)
585 585 self.opener("journal.branch", "w").write(self.dirstate.branch())
586 586
587 587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
588 588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
589 589 (self.join("journal.branch"), self.join("undo.branch"))]
590 590 tr = transaction.transaction(self.ui.warn, self.sopener,
591 591 self.sjoin("journal"),
592 592 aftertrans(renames),
593 593 self._createmode)
594 594 self._transref = weakref.ref(tr)
595 595 return tr
596 596
597 597 def recover(self):
598 598 l = self.lock()
599 599 try:
600 600 if os.path.exists(self.sjoin("journal")):
601 601 self.ui.status(_("rolling back interrupted transaction\n"))
602 602 transaction.rollback(self.sopener, self.sjoin("journal"))
603 603 self.invalidate()
604 604 return True
605 605 else:
606 606 self.ui.warn(_("no interrupted transaction available\n"))
607 607 return False
608 608 finally:
609 609 del l
610 610
611 611 def rollback(self):
612 612 wlock = lock = None
613 613 try:
614 614 wlock = self.wlock()
615 615 lock = self.lock()
616 616 if os.path.exists(self.sjoin("undo")):
617 617 self.ui.status(_("rolling back last transaction\n"))
618 618 transaction.rollback(self.sopener, self.sjoin("undo"))
619 619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
620 620 try:
621 621 branch = self.opener("undo.branch").read()
622 622 self.dirstate.setbranch(branch)
623 623 except IOError:
624 624 self.ui.warn(_("Named branch could not be reset, "
625 625 "current branch still is: %s\n")
626 626 % util.tolocal(self.dirstate.branch()))
627 627 self.invalidate()
628 628 self.dirstate.invalidate()
629 629 else:
630 630 self.ui.warn(_("no rollback information available\n"))
631 631 finally:
632 632 del lock, wlock
633 633
634 634 def invalidate(self):
635 635 for a in "changelog manifest".split():
636 636 if hasattr(self, a):
637 637 self.__delattr__(a)
638 638 self.tagscache = None
639 639 self._tagstypecache = None
640 640 self.nodetagscache = None
641 641 self.branchcache = None
642 642 self._ubranchcache = None
643 643 self._branchcachetip = None
644 644
645 645 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
646 646 try:
647 647 l = lock.lock(lockname, 0, releasefn, desc=desc)
648 648 except lock.LockHeld, inst:
649 649 if not wait:
650 650 raise
651 651 self.ui.warn(_("waiting for lock on %s held by %r\n") %
652 652 (desc, inst.locker))
653 653 # default to 600 seconds timeout
654 654 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
655 655 releasefn, desc=desc)
656 656 if acquirefn:
657 657 acquirefn()
658 658 return l
659 659
660 660 def lock(self, wait=True):
661 661 if self._lockref and self._lockref():
662 662 return self._lockref()
663 663
664 664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
665 665 _('repository %s') % self.origroot)
666 666 self._lockref = weakref.ref(l)
667 667 return l
668 668
669 669 def wlock(self, wait=True):
670 670 if self._wlockref and self._wlockref():
671 671 return self._wlockref()
672 672
673 673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 674 self.dirstate.invalidate, _('working directory of %s') %
675 675 self.origroot)
676 676 self._wlockref = weakref.ref(l)
677 677 return l
678 678
679 679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
680 680 """
681 681 commit an individual file as part of a larger transaction
682 682 """
683 683
684 684 t = self.wread(fn)
685 685 fl = self.file(fn)
686 686 fp1 = manifest1.get(fn, nullid)
687 687 fp2 = manifest2.get(fn, nullid)
688 688
689 689 meta = {}
690 690 cp = self.dirstate.copied(fn)
691 691 if cp:
692 692 # Mark the new revision of this file as a copy of another
693 693 # file. This copy data will effectively act as a parent
694 694 # of this new revision. If this is a merge, the first
695 695 # parent will be the nullid (meaning "look up the copy data")
696 696 # and the second one will be the other parent. For example:
697 697 #
698 698 # 0 --- 1 --- 3 rev1 changes file foo
699 699 # \ / rev2 renames foo to bar and changes it
700 700 # \- 2 -/ rev3 should have bar with all changes and
701 701 # should record that bar descends from
702 702 # bar in rev2 and foo in rev1
703 703 #
704 704 # this allows this merge to succeed:
705 705 #
706 706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 707 # \ / merging rev3 and rev4 should use bar@rev2
708 708 # \- 2 --- 4 as the merge base
709 709 #
710 710 meta["copy"] = cp
711 711 if not manifest2: # not a branch merge
712 712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 713 fp2 = nullid
714 714 elif fp2 != nullid: # copied on remote side
715 715 meta["copyrev"] = hex(manifest1.get(cp, nullid))
716 716 elif fp1 != nullid: # copied on local side, reversed
717 717 meta["copyrev"] = hex(manifest2.get(cp))
718 718 fp2 = fp1
719 719 elif cp in manifest2: # directory rename on local side
720 720 meta["copyrev"] = hex(manifest2[cp])
721 721 else: # directory rename on remote side
722 722 meta["copyrev"] = hex(manifest1.get(cp, nullid))
723 723 self.ui.debug(_(" %s: copy %s:%s\n") %
724 724 (fn, cp, meta["copyrev"]))
725 725 fp1 = nullid
726 726 elif fp2 != nullid:
727 727 # is one parent an ancestor of the other?
728 728 fpa = fl.ancestor(fp1, fp2)
729 729 if fpa == fp1:
730 730 fp1, fp2 = fp2, nullid
731 731 elif fpa == fp2:
732 732 fp2 = nullid
733 733
734 734 # is the file unmodified from the parent? report existing entry
735 735 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
736 736 return fp1
737 737
738 738 changelist.append(fn)
739 739 return fl.add(t, meta, tr, linkrev, fp1, fp2)
740 740
741 741 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
742 742 if p1 is None:
743 743 p1, p2 = self.dirstate.parents()
744 744 return self.commit(files=files, text=text, user=user, date=date,
745 745 p1=p1, p2=p2, extra=extra, empty_ok=True)
746 746
747 747 def commit(self, files=None, text="", user=None, date=None,
748 748 match=util.always, force=False, force_editor=False,
749 749 p1=None, p2=None, extra={}, empty_ok=False):
750 750 wlock = lock = tr = None
751 751 valid = 0 # don't save the dirstate if this isn't set
752 752 if files:
753 753 files = util.unique(files)
754 754 try:
755 755 commit = []
756 756 remove = []
757 757 changed = []
758 758 use_dirstate = (p1 is None) # not rawcommit
759 759 extra = extra.copy()
760 760
761 761 if use_dirstate:
762 762 if files:
763 763 for f in files:
764 764 s = self.dirstate[f]
765 765 if s in 'nma':
766 766 commit.append(f)
767 767 elif s == 'r':
768 768 remove.append(f)
769 769 else:
770 770 self.ui.warn(_("%s not tracked!\n") % f)
771 771 else:
772 772 changes = self.status(match=match)[:5]
773 773 modified, added, removed, deleted, unknown = changes
774 774 commit = modified + added
775 775 remove = removed
776 776 else:
777 777 commit = files
778 778
779 779 if use_dirstate:
780 780 p1, p2 = self.dirstate.parents()
781 781 update_dirstate = True
782 782 else:
783 783 p1, p2 = p1, p2 or nullid
784 784 update_dirstate = (self.dirstate.parents()[0] == p1)
785 785
786 786 c1 = self.changelog.read(p1)
787 787 c2 = self.changelog.read(p2)
788 788 m1 = self.manifest.read(c1[0]).copy()
789 789 m2 = self.manifest.read(c2[0])
790 790
791 791 if use_dirstate:
792 792 branchname = self.workingctx().branch()
793 793 try:
794 794 branchname = branchname.decode('UTF-8').encode('UTF-8')
795 795 except UnicodeDecodeError:
796 796 raise util.Abort(_('branch name not in UTF-8!'))
797 797 else:
798 798 branchname = ""
799 799
800 800 if use_dirstate:
801 801 oldname = c1[5].get("branch") # stored in UTF-8
802 802 if (not commit and not remove and not force and p2 == nullid
803 803 and branchname == oldname):
804 804 self.ui.status(_("nothing changed\n"))
805 805 return None
806 806
807 807 xp1 = hex(p1)
808 808 if p2 == nullid: xp2 = ''
809 809 else: xp2 = hex(p2)
810 810
811 811 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
812 812
813 813 wlock = self.wlock()
814 814 lock = self.lock()
815 815 tr = self.transaction()
816 816 trp = weakref.proxy(tr)
817 817
818 818 # check in files
819 819 new = {}
820 820 linkrev = self.changelog.count()
821 821 commit.sort()
822 822 is_exec = util.execfunc(self.root, m1.execf)
823 823 is_link = util.linkfunc(self.root, m1.linkf)
824 824 for f in commit:
825 825 self.ui.note(f + "\n")
826 826 try:
827 827 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
828 828 new_exec = is_exec(f)
829 829 new_link = is_link(f)
830 830 if ((not changed or changed[-1] != f) and
831 831 m2.get(f) != new[f]):
832 832 # mention the file in the changelog if some
833 833 # flag changed, even if there was no content
834 834 # change.
835 835 old_exec = m1.execf(f)
836 836 old_link = m1.linkf(f)
837 837 if old_exec != new_exec or old_link != new_link:
838 838 changed.append(f)
839 839 m1.set(f, new_exec, new_link)
840 840 if use_dirstate:
841 841 self.dirstate.normal(f)
842 842
843 843 except (OSError, IOError):
844 844 if use_dirstate:
845 845 self.ui.warn(_("trouble committing %s!\n") % f)
846 846 raise
847 847 else:
848 848 remove.append(f)
849 849
850 850 # update manifest
851 851 m1.update(new)
852 852 remove.sort()
853 853 removed = []
854 854
855 855 for f in remove:
856 856 if f in m1:
857 857 del m1[f]
858 858 removed.append(f)
859 859 elif f in m2:
860 860 removed.append(f)
861 861 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
862 862 (new, removed))
863 863
864 864 # add changeset
865 865 new = new.keys()
866 866 new.sort()
867 867
868 868 user = user or self.ui.username()
869 869 if (not empty_ok and not text) or force_editor:
870 870 edittext = []
871 871 if text:
872 872 edittext.append(text)
873 873 edittext.append("")
874 874 edittext.append(_("HG: Enter commit message."
875 875 " Lines beginning with 'HG:' are removed."))
876 876 edittext.append("HG: --")
877 877 edittext.append("HG: user: %s" % user)
878 878 if p2 != nullid:
879 879 edittext.append("HG: branch merge")
880 880 if branchname:
881 881 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
882 882 edittext.extend(["HG: changed %s" % f for f in changed])
883 883 edittext.extend(["HG: removed %s" % f for f in removed])
884 884 if not changed and not remove:
885 885 edittext.append("HG: no files changed")
886 886 edittext.append("")
887 887 # run editor in the repository root
888 888 olddir = os.getcwd()
889 889 os.chdir(self.root)
890 890 text = self.ui.edit("\n".join(edittext), user)
891 891 os.chdir(olddir)
892 892
893 893 if branchname:
894 894 extra["branch"] = branchname
895 895
896 896 lines = [line.rstrip() for line in text.rstrip().splitlines()]
897 897 while lines and not lines[0]:
898 898 del lines[0]
899 899 if not lines and use_dirstate:
900 900 raise util.Abort(_("empty commit message"))
901 901 text = '\n'.join(lines)
902 902
903 903 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
904 904 user, date, extra)
905 905 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
906 906 parent2=xp2)
907 907 tr.close()
908 908
909 909 if self.branchcache:
910 910 self.branchtags()
911 911
912 912 if use_dirstate or update_dirstate:
913 913 self.dirstate.setparents(n)
914 914 if use_dirstate:
915 915 for f in removed:
916 916 self.dirstate.forget(f)
917 917 valid = 1 # our dirstate updates are complete
918 918
919 919 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
920 920 return n
921 921 finally:
922 922 if not valid: # don't save our updated dirstate
923 923 self.dirstate.invalidate()
924 924 del tr, lock, wlock
925 925
926 926 def walk(self, node=None, files=[], match=util.always, badmatch=None):
927 927 '''
928 928 walk recursively through the directory tree or a given
929 929 changeset, finding all files matched by the match
930 930 function
931 931
932 932 results are yielded in a tuple (src, filename), where src
933 933 is one of:
934 934 'f' the file was found in the directory tree
935 935 'm' the file was only in the dirstate and not in the tree
936 936 'b' file was not found and matched badmatch
937 937 '''
938 938
939 939 if node:
940 940 fdict = dict.fromkeys(files)
941 941 # for dirstate.walk, files=['.'] means "walk the whole tree".
942 942 # follow that here, too
943 943 fdict.pop('.', None)
944 944 mdict = self.manifest.read(self.changelog.read(node)[0])
945 945 mfiles = mdict.keys()
946 946 mfiles.sort()
947 947 for fn in mfiles:
948 948 for ffn in fdict:
949 949 # match if the file is the exact name or a directory
950 950 if ffn == fn or fn.startswith("%s/" % ffn):
951 951 del fdict[ffn]
952 952 break
953 953 if match(fn):
954 954 yield 'm', fn
955 955 ffiles = fdict.keys()
956 956 ffiles.sort()
957 957 for fn in ffiles:
958 958 if badmatch and badmatch(fn):
959 959 if match(fn):
960 960 yield 'b', fn
961 961 else:
962 962 self.ui.warn(_('%s: No such file in rev %s\n')
963 963 % (self.pathto(fn), short(node)))
964 964 else:
965 965 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
966 966 yield src, fn
967 967
968 968 def status(self, node1=None, node2=None, files=[], match=util.always,
969 969 list_ignored=False, list_clean=False, list_unknown=True):
970 970 """return status of files between two nodes or node and working directory
971 971
972 972 If node1 is None, use the first dirstate parent instead.
973 973 If node2 is None, compare node1 with working directory.
974 974 """
975 975
976 976 def fcmp(fn, getnode):
977 977 t1 = self.wread(fn)
978 978 return self.file(fn).cmp(getnode(fn), t1)
979 979
980 980 def mfmatches(node):
981 981 change = self.changelog.read(node)
982 982 mf = self.manifest.read(change[0]).copy()
983 983 for fn in mf.keys():
984 984 if not match(fn):
985 985 del mf[fn]
986 986 return mf
987 987
988 988 modified, added, removed, deleted, unknown = [], [], [], [], []
989 989 ignored, clean = [], []
990 990
991 991 compareworking = False
992 992 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
993 993 compareworking = True
994 994
995 995 if not compareworking:
996 996 # read the manifest from node1 before the manifest from node2,
997 997 # so that we'll hit the manifest cache if we're going through
998 998 # all the revisions in parent->child order.
999 999 mf1 = mfmatches(node1)
1000 1000
1001 1001 # are we comparing the working directory?
1002 1002 if not node2:
1003 1003 (lookup, modified, added, removed, deleted, unknown,
1004 1004 ignored, clean) = self.dirstate.status(files, match,
1005 1005 list_ignored, list_clean,
1006 1006 list_unknown)
1007 1007
1008 1008 # are we comparing working dir against its parent?
1009 1009 if compareworking:
1010 1010 if lookup:
1011 1011 fixup = []
1012 1012 # do a full compare of any files that might have changed
1013 1013 ctx = self.changectx()
1014 1014 mexec = lambda f: 'x' in ctx.fileflags(f)
1015 1015 mlink = lambda f: 'l' in ctx.fileflags(f)
1016 1016 is_exec = util.execfunc(self.root, mexec)
1017 1017 is_link = util.linkfunc(self.root, mlink)
1018 1018 def flags(f):
1019 1019 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1020 1020 for f in lookup:
1021 1021 if (f not in ctx or flags(f) != ctx.fileflags(f)
1022 1022 or ctx[f].cmp(self.wread(f))):
1023 1023 modified.append(f)
1024 1024 else:
1025 1025 fixup.append(f)
1026 1026 if list_clean:
1027 1027 clean.append(f)
1028 1028
1029 1029 # update dirstate for files that are actually clean
1030 1030 if fixup:
1031 1031 wlock = None
1032 1032 try:
1033 1033 try:
1034 1034 wlock = self.wlock(False)
1035 1035 except lock.LockException:
1036 1036 pass
1037 1037 if wlock:
1038 1038 for f in fixup:
1039 1039 self.dirstate.normal(f)
1040 1040 finally:
1041 1041 del wlock
1042 1042 else:
1043 1043 # we are comparing working dir against non-parent
1044 1044 # generate a pseudo-manifest for the working dir
1045 1045 # XXX: create it in dirstate.py ?
1046 1046 mf2 = mfmatches(self.dirstate.parents()[0])
1047 1047 is_exec = util.execfunc(self.root, mf2.execf)
1048 1048 is_link = util.linkfunc(self.root, mf2.linkf)
1049 1049 for f in lookup + modified + added:
1050 1050 mf2[f] = ""
1051 1051 mf2.set(f, is_exec(f), is_link(f))
1052 1052 for f in removed:
1053 1053 if f in mf2:
1054 1054 del mf2[f]
1055 1055
1056 1056 else:
1057 1057 # we are comparing two revisions
1058 1058 mf2 = mfmatches(node2)
1059 1059
1060 1060 if not compareworking:
1061 1061 # flush lists from dirstate before comparing manifests
1062 1062 modified, added, clean = [], [], []
1063 1063
1064 1064 # make sure to sort the files so we talk to the disk in a
1065 1065 # reasonable order
1066 1066 mf2keys = mf2.keys()
1067 1067 mf2keys.sort()
1068 1068 getnode = lambda fn: mf1.get(fn, nullid)
1069 1069 for fn in mf2keys:
1070 1070 if fn in mf1:
1071 1071 if (mf1.flags(fn) != mf2.flags(fn) or
1072 1072 (mf1[fn] != mf2[fn] and
1073 1073 (mf2[fn] != "" or fcmp(fn, getnode)))):
1074 1074 modified.append(fn)
1075 1075 elif list_clean:
1076 1076 clean.append(fn)
1077 1077 del mf1[fn]
1078 1078 else:
1079 1079 added.append(fn)
1080 1080
1081 1081 removed = mf1.keys()
1082 1082
1083 1083 # sort and return results:
1084 1084 for l in modified, added, removed, deleted, unknown, ignored, clean:
1085 1085 l.sort()
1086 1086 return (modified, added, removed, deleted, unknown, ignored, clean)
1087 1087
1088 1088 def add(self, list):
1089 1089 wlock = self.wlock()
1090 1090 try:
1091 1091 rejected = []
1092 1092 for f in list:
1093 1093 p = self.wjoin(f)
1094 1094 try:
1095 1095 st = os.lstat(p)
1096 1096 except:
1097 1097 self.ui.warn(_("%s does not exist!\n") % f)
1098 1098 rejected.append(f)
1099 1099 continue
1100 1100 if st.st_size > 10000000:
1101 1101 self.ui.warn(_("%s: files over 10MB may cause memory and"
1102 1102 " performance problems\n"
1103 1103 "(use 'hg revert %s' to unadd the file)\n")
1104 1104 % (f, f))
1105 1105 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1106 1106 self.ui.warn(_("%s not added: only files and symlinks "
1107 1107 "supported currently\n") % f)
1108 1108 rejected.append(p)
1109 1109 elif self.dirstate[f] in 'amn':
1110 1110 self.ui.warn(_("%s already tracked!\n") % f)
1111 1111 elif self.dirstate[f] == 'r':
1112 1112 self.dirstate.normallookup(f)
1113 1113 else:
1114 1114 self.dirstate.add(f)
1115 1115 return rejected
1116 1116 finally:
1117 1117 del wlock
1118 1118
1119 1119 def forget(self, list):
1120 1120 wlock = self.wlock()
1121 1121 try:
1122 1122 for f in list:
1123 1123 if self.dirstate[f] != 'a':
1124 1124 self.ui.warn(_("%s not added!\n") % f)
1125 1125 else:
1126 1126 self.dirstate.forget(f)
1127 1127 finally:
1128 1128 del wlock
1129 1129
1130 1130 def remove(self, list, unlink=False):
1131 1131 wlock = None
1132 1132 try:
1133 1133 if unlink:
1134 1134 for f in list:
1135 1135 try:
1136 1136 util.unlink(self.wjoin(f))
1137 1137 except OSError, inst:
1138 1138 if inst.errno != errno.ENOENT:
1139 1139 raise
1140 1140 wlock = self.wlock()
1141 1141 for f in list:
1142 1142 if unlink and os.path.exists(self.wjoin(f)):
1143 1143 self.ui.warn(_("%s still exists!\n") % f)
1144 1144 elif self.dirstate[f] == 'a':
1145 1145 self.dirstate.forget(f)
1146 1146 elif f not in self.dirstate:
1147 1147 self.ui.warn(_("%s not tracked!\n") % f)
1148 1148 else:
1149 1149 self.dirstate.remove(f)
1150 1150 finally:
1151 1151 del wlock
1152 1152
1153 1153 def undelete(self, list):
1154 1154 wlock = None
1155 1155 try:
1156 1156 manifests = [self.manifest.read(self.changelog.read(p)[0])
1157 1157 for p in self.dirstate.parents() if p != nullid]
1158 1158 wlock = self.wlock()
1159 1159 for f in list:
1160 1160 if self.dirstate[f] != 'r':
1161 1161 self.ui.warn("%s not removed!\n" % f)
1162 1162 else:
1163 1163 m = f in manifests[0] and manifests[0] or manifests[1]
1164 1164 t = self.file(f).read(m[f])
1165 1165 self.wwrite(f, t, m.flags(f))
1166 1166 self.dirstate.normal(f)
1167 1167 finally:
1168 1168 del wlock
1169 1169
1170 1170 def copy(self, source, dest):
1171 1171 wlock = None
1172 1172 try:
1173 1173 p = self.wjoin(dest)
1174 1174 if not (os.path.exists(p) or os.path.islink(p)):
1175 1175 self.ui.warn(_("%s does not exist!\n") % dest)
1176 1176 elif not (os.path.isfile(p) or os.path.islink(p)):
1177 1177 self.ui.warn(_("copy failed: %s is not a file or a "
1178 1178 "symbolic link\n") % dest)
1179 1179 else:
1180 1180 wlock = self.wlock()
1181 1181 if dest not in self.dirstate:
1182 1182 self.dirstate.add(dest)
1183 1183 self.dirstate.copy(source, dest)
1184 1184 finally:
1185 1185 del wlock
1186 1186
1187 1187 def heads(self, start=None):
1188 1188 heads = self.changelog.heads(start)
1189 1189 # sort the output in rev descending order
1190 1190 heads = [(-self.changelog.rev(h), h) for h in heads]
1191 1191 heads.sort()
1192 1192 return [n for (r, n) in heads]
1193 1193
1194 1194 def branchheads(self, branch, start=None):
1195 1195 branches = self.branchtags()
1196 1196 if branch not in branches:
1197 1197 return []
1198 1198 # The basic algorithm is this:
1199 1199 #
1200 1200 # Start from the branch tip since there are no later revisions that can
1201 1201 # possibly be in this branch, and the tip is a guaranteed head.
1202 1202 #
1203 1203 # Remember the tip's parents as the first ancestors, since these by
1204 1204 # definition are not heads.
1205 1205 #
1206 1206 # Step backwards from the brach tip through all the revisions. We are
1207 1207 # guaranteed by the rules of Mercurial that we will now be visiting the
1208 1208 # nodes in reverse topological order (children before parents).
1209 1209 #
1210 1210 # If a revision is one of the ancestors of a head then we can toss it
1211 1211 # out of the ancestors set (we've already found it and won't be
1212 1212 # visiting it again) and put its parents in the ancestors set.
1213 1213 #
1214 1214 # Otherwise, if a revision is in the branch it's another head, since it
1215 1215 # wasn't in the ancestor list of an existing head. So add it to the
1216 1216 # head list, and add its parents to the ancestor list.
1217 1217 #
1218 1218 # If it is not in the branch ignore it.
1219 1219 #
1220 1220 # Once we have a list of heads, use nodesbetween to filter out all the
1221 1221 # heads that cannot be reached from startrev. There may be a more
1222 1222 # efficient way to do this as part of the previous algorithm.
1223 1223
1224 1224 set = util.set
1225 1225 heads = [self.changelog.rev(branches[branch])]
1226 1226 # Don't care if ancestors contains nullrev or not.
1227 1227 ancestors = set(self.changelog.parentrevs(heads[0]))
1228 1228 for rev in xrange(heads[0] - 1, nullrev, -1):
1229 1229 if rev in ancestors:
1230 1230 ancestors.update(self.changelog.parentrevs(rev))
1231 1231 ancestors.remove(rev)
1232 1232 elif self.changectx(rev).branch() == branch:
1233 1233 heads.append(rev)
1234 1234 ancestors.update(self.changelog.parentrevs(rev))
1235 1235 heads = [self.changelog.node(rev) for rev in heads]
1236 1236 if start is not None:
1237 1237 heads = self.changelog.nodesbetween([start], heads)[2]
1238 1238 return heads
1239 1239
1240 1240 def branches(self, nodes):
1241 1241 if not nodes:
1242 1242 nodes = [self.changelog.tip()]
1243 1243 b = []
1244 1244 for n in nodes:
1245 1245 t = n
1246 1246 while 1:
1247 1247 p = self.changelog.parents(n)
1248 1248 if p[1] != nullid or p[0] == nullid:
1249 1249 b.append((t, n, p[0], p[1]))
1250 1250 break
1251 1251 n = p[0]
1252 1252 return b
1253 1253
1254 1254 def between(self, pairs):
1255 1255 r = []
1256 1256
1257 1257 for top, bottom in pairs:
1258 1258 n, l, i = top, [], 0
1259 1259 f = 1
1260 1260
1261 1261 while n != bottom:
1262 1262 p = self.changelog.parents(n)[0]
1263 1263 if i == f:
1264 1264 l.append(n)
1265 1265 f = f * 2
1266 1266 n = p
1267 1267 i += 1
1268 1268
1269 1269 r.append(l)
1270 1270
1271 1271 return r
1272 1272
1273 1273 def findincoming(self, remote, base=None, heads=None, force=False):
1274 1274 """Return list of roots of the subsets of missing nodes from remote
1275 1275
1276 1276 If base dict is specified, assume that these nodes and their parents
1277 1277 exist on the remote side and that no child of a node of base exists
1278 1278 in both remote and self.
1279 1279 Furthermore base will be updated to include the nodes that exists
1280 1280 in self and remote but no children exists in self and remote.
1281 1281 If a list of heads is specified, return only nodes which are heads
1282 1282 or ancestors of these heads.
1283 1283
1284 1284 All the ancestors of base are in self and in remote.
1285 1285 All the descendants of the list returned are missing in self.
1286 1286 (and so we know that the rest of the nodes are missing in remote, see
1287 1287 outgoing)
1288 1288 """
1289 1289 m = self.changelog.nodemap
1290 1290 search = []
1291 1291 fetch = {}
1292 1292 seen = {}
1293 1293 seenbranch = {}
1294 1294 if base == None:
1295 1295 base = {}
1296 1296
1297 1297 if not heads:
1298 1298 heads = remote.heads()
1299 1299
1300 1300 if self.changelog.tip() == nullid:
1301 1301 base[nullid] = 1
1302 1302 if heads != [nullid]:
1303 1303 return [nullid]
1304 1304 return []
1305 1305
1306 1306 # assume we're closer to the tip than the root
1307 1307 # and start by examining the heads
1308 1308 self.ui.status(_("searching for changes\n"))
1309 1309
1310 1310 unknown = []
1311 1311 for h in heads:
1312 1312 if h not in m:
1313 1313 unknown.append(h)
1314 1314 else:
1315 1315 base[h] = 1
1316 1316
1317 1317 if not unknown:
1318 1318 return []
1319 1319
1320 1320 req = dict.fromkeys(unknown)
1321 1321 reqcnt = 0
1322 1322
1323 1323 # search through remote branches
1324 1324 # a 'branch' here is a linear segment of history, with four parts:
1325 1325 # head, root, first parent, second parent
1326 1326 # (a branch always has two parents (or none) by definition)
1327 1327 unknown = remote.branches(unknown)
1328 1328 while unknown:
1329 1329 r = []
1330 1330 while unknown:
1331 1331 n = unknown.pop(0)
1332 1332 if n[0] in seen:
1333 1333 continue
1334 1334
1335 1335 self.ui.debug(_("examining %s:%s\n")
1336 1336 % (short(n[0]), short(n[1])))
1337 1337 if n[0] == nullid: # found the end of the branch
1338 1338 pass
1339 1339 elif n in seenbranch:
1340 1340 self.ui.debug(_("branch already found\n"))
1341 1341 continue
1342 1342 elif n[1] and n[1] in m: # do we know the base?
1343 1343 self.ui.debug(_("found incomplete branch %s:%s\n")
1344 1344 % (short(n[0]), short(n[1])))
1345 1345 search.append(n) # schedule branch range for scanning
1346 1346 seenbranch[n] = 1
1347 1347 else:
1348 1348 if n[1] not in seen and n[1] not in fetch:
1349 1349 if n[2] in m and n[3] in m:
1350 1350 self.ui.debug(_("found new changeset %s\n") %
1351 1351 short(n[1]))
1352 1352 fetch[n[1]] = 1 # earliest unknown
1353 1353 for p in n[2:4]:
1354 1354 if p in m:
1355 1355 base[p] = 1 # latest known
1356 1356
1357 1357 for p in n[2:4]:
1358 1358 if p not in req and p not in m:
1359 1359 r.append(p)
1360 1360 req[p] = 1
1361 1361 seen[n[0]] = 1
1362 1362
1363 1363 if r:
1364 1364 reqcnt += 1
1365 1365 self.ui.debug(_("request %d: %s\n") %
1366 1366 (reqcnt, " ".join(map(short, r))))
1367 1367 for p in xrange(0, len(r), 10):
1368 1368 for b in remote.branches(r[p:p+10]):
1369 1369 self.ui.debug(_("received %s:%s\n") %
1370 1370 (short(b[0]), short(b[1])))
1371 1371 unknown.append(b)
1372 1372
1373 1373 # do binary search on the branches we found
1374 1374 while search:
1375 1375 n = search.pop(0)
1376 1376 reqcnt += 1
1377 1377 l = remote.between([(n[0], n[1])])[0]
1378 1378 l.append(n[1])
1379 1379 p = n[0]
1380 1380 f = 1
1381 1381 for i in l:
1382 1382 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1383 1383 if i in m:
1384 1384 if f <= 2:
1385 1385 self.ui.debug(_("found new branch changeset %s\n") %
1386 1386 short(p))
1387 1387 fetch[p] = 1
1388 1388 base[i] = 1
1389 1389 else:
1390 1390 self.ui.debug(_("narrowed branch search to %s:%s\n")
1391 1391 % (short(p), short(i)))
1392 1392 search.append((p, i))
1393 1393 break
1394 1394 p, f = i, f * 2
1395 1395
1396 1396 # sanity check our fetch list
1397 1397 for f in fetch.keys():
1398 1398 if f in m:
1399 1399 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1400 1400
1401 1401 if base.keys() == [nullid]:
1402 1402 if force:
1403 1403 self.ui.warn(_("warning: repository is unrelated\n"))
1404 1404 else:
1405 1405 raise util.Abort(_("repository is unrelated"))
1406 1406
1407 1407 self.ui.debug(_("found new changesets starting at ") +
1408 1408 " ".join([short(f) for f in fetch]) + "\n")
1409 1409
1410 1410 self.ui.debug(_("%d total queries\n") % reqcnt)
1411 1411
1412 1412 return fetch.keys()
1413 1413
1414 1414 def findoutgoing(self, remote, base=None, heads=None, force=False):
1415 1415 """Return list of nodes that are roots of subsets not in remote
1416 1416
1417 1417 If base dict is specified, assume that these nodes and their parents
1418 1418 exist on the remote side.
1419 1419 If a list of heads is specified, return only nodes which are heads
1420 1420 or ancestors of these heads, and return a second element which
1421 1421 contains all remote heads which get new children.
1422 1422 """
1423 1423 if base == None:
1424 1424 base = {}
1425 1425 self.findincoming(remote, base, heads, force=force)
1426 1426
1427 1427 self.ui.debug(_("common changesets up to ")
1428 1428 + " ".join(map(short, base.keys())) + "\n")
1429 1429
1430 1430 remain = dict.fromkeys(self.changelog.nodemap)
1431 1431
1432 1432 # prune everything remote has from the tree
1433 1433 del remain[nullid]
1434 1434 remove = base.keys()
1435 1435 while remove:
1436 1436 n = remove.pop(0)
1437 1437 if n in remain:
1438 1438 del remain[n]
1439 1439 for p in self.changelog.parents(n):
1440 1440 remove.append(p)
1441 1441
1442 1442 # find every node whose parents have been pruned
1443 1443 subset = []
1444 1444 # find every remote head that will get new children
1445 1445 updated_heads = {}
1446 1446 for n in remain:
1447 1447 p1, p2 = self.changelog.parents(n)
1448 1448 if p1 not in remain and p2 not in remain:
1449 1449 subset.append(n)
1450 1450 if heads:
1451 1451 if p1 in heads:
1452 1452 updated_heads[p1] = True
1453 1453 if p2 in heads:
1454 1454 updated_heads[p2] = True
1455 1455
1456 1456 # this is the set of all roots we have to push
1457 1457 if heads:
1458 1458 return subset, updated_heads.keys()
1459 1459 else:
1460 1460 return subset
1461 1461
1462 1462 def pull(self, remote, heads=None, force=False):
1463 1463 lock = self.lock()
1464 1464 try:
1465 1465 fetch = self.findincoming(remote, heads=heads, force=force)
1466 1466 if fetch == [nullid]:
1467 1467 self.ui.status(_("requesting all changes\n"))
1468 1468
1469 1469 if not fetch:
1470 1470 self.ui.status(_("no changes found\n"))
1471 1471 return 0
1472 1472
1473 1473 if heads is None:
1474 1474 cg = remote.changegroup(fetch, 'pull')
1475 1475 else:
1476 1476 if 'changegroupsubset' not in remote.capabilities:
1477 1477 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1478 1478 cg = remote.changegroupsubset(fetch, heads, 'pull')
1479 1479 return self.addchangegroup(cg, 'pull', remote.url())
1480 1480 finally:
1481 1481 del lock
1482 1482
1483 1483 def push(self, remote, force=False, revs=None):
1484 1484 # there are two ways to push to remote repo:
1485 1485 #
1486 1486 # addchangegroup assumes local user can lock remote
1487 1487 # repo (local filesystem, old ssh servers).
1488 1488 #
1489 1489 # unbundle assumes local user cannot lock remote repo (new ssh
1490 1490 # servers, http servers).
1491 1491
1492 1492 if remote.capable('unbundle'):
1493 1493 return self.push_unbundle(remote, force, revs)
1494 1494 return self.push_addchangegroup(remote, force, revs)
1495 1495
1496 1496 def prepush(self, remote, force, revs):
1497 1497 base = {}
1498 1498 remote_heads = remote.heads()
1499 1499 inc = self.findincoming(remote, base, remote_heads, force=force)
1500 1500
1501 1501 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1502 1502 if revs is not None:
1503 1503 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1504 1504 else:
1505 1505 bases, heads = update, self.changelog.heads()
1506 1506
1507 1507 if not bases:
1508 1508 self.ui.status(_("no changes found\n"))
1509 1509 return None, 1
1510 1510 elif not force:
1511 1511 # check if we're creating new remote heads
1512 1512 # to be a remote head after push, node must be either
1513 1513 # - unknown locally
1514 1514 # - a local outgoing head descended from update
1515 1515 # - a remote head that's known locally and not
1516 1516 # ancestral to an outgoing head
1517 1517
1518 1518 warn = 0
1519 1519
1520 1520 if remote_heads == [nullid]:
1521 1521 warn = 0
1522 1522 elif not revs and len(heads) > len(remote_heads):
1523 1523 warn = 1
1524 1524 else:
1525 1525 newheads = list(heads)
1526 1526 for r in remote_heads:
1527 1527 if r in self.changelog.nodemap:
1528 1528 desc = self.changelog.heads(r, heads)
1529 1529 l = [h for h in heads if h in desc]
1530 1530 if not l:
1531 1531 newheads.append(r)
1532 1532 else:
1533 1533 newheads.append(r)
1534 1534 if len(newheads) > len(remote_heads):
1535 1535 warn = 1
1536 1536
1537 1537 if warn:
1538 1538 self.ui.warn(_("abort: push creates new remote heads!\n"))
1539 1539 self.ui.status(_("(did you forget to merge?"
1540 1540 " use push -f to force)\n"))
1541 1541 return None, 0
1542 1542 elif inc:
1543 1543 self.ui.warn(_("note: unsynced remote changes!\n"))
1544 1544
1545 1545
1546 1546 if revs is None:
1547 1547 cg = self.changegroup(update, 'push')
1548 1548 else:
1549 1549 cg = self.changegroupsubset(update, revs, 'push')
1550 1550 return cg, remote_heads
1551 1551
1552 1552 def push_addchangegroup(self, remote, force, revs):
1553 1553 lock = remote.lock()
1554 1554 try:
1555 1555 ret = self.prepush(remote, force, revs)
1556 1556 if ret[0] is not None:
1557 1557 cg, remote_heads = ret
1558 1558 return remote.addchangegroup(cg, 'push', self.url())
1559 1559 return ret[1]
1560 1560 finally:
1561 1561 del lock
1562 1562
1563 1563 def push_unbundle(self, remote, force, revs):
1564 1564 # local repo finds heads on server, finds out what revs it
1565 1565 # must push. once revs transferred, if server finds it has
1566 1566 # different heads (someone else won commit/push race), server
1567 1567 # aborts.
1568 1568
1569 1569 ret = self.prepush(remote, force, revs)
1570 1570 if ret[0] is not None:
1571 1571 cg, remote_heads = ret
1572 1572 if force: remote_heads = ['force']
1573 1573 return remote.unbundle(cg, remote_heads, 'push')
1574 1574 return ret[1]
1575 1575
1576 1576 def changegroupinfo(self, nodes, source):
1577 1577 if self.ui.verbose or source == 'bundle':
1578 1578 self.ui.status(_("%d changesets found\n") % len(nodes))
1579 1579 if self.ui.debugflag:
1580 1580 self.ui.debug(_("List of changesets:\n"))
1581 1581 for node in nodes:
1582 1582 self.ui.debug("%s\n" % hex(node))
1583 1583
1584 1584 def changegroupsubset(self, bases, heads, source, extranodes=None):
1585 1585 """This function generates a changegroup consisting of all the nodes
1586 1586 that are descendents of any of the bases, and ancestors of any of
1587 1587 the heads.
1588 1588
1589 1589 It is fairly complex as determining which filenodes and which
1590 1590 manifest nodes need to be included for the changeset to be complete
1591 1591 is non-trivial.
1592 1592
1593 1593 Another wrinkle is doing the reverse, figuring out which changeset in
1594 1594 the changegroup a particular filenode or manifestnode belongs to.
1595 1595
1596 1596 The caller can specify some nodes that must be included in the
1597 1597 changegroup using the extranodes argument. It should be a dict
1598 1598 where the keys are the filenames (or 1 for the manifest), and the
1599 1599 values are lists of (node, linknode) tuples, where node is a wanted
1600 1600 node and linknode is the changelog node that should be transmitted as
1601 1601 the linkrev.
1602 1602 """
1603 1603
1604 1604 self.hook('preoutgoing', throw=True, source=source)
1605 1605
1606 1606 # Set up some initial variables
1607 1607 # Make it easy to refer to self.changelog
1608 1608 cl = self.changelog
1609 1609 # msng is short for missing - compute the list of changesets in this
1610 1610 # changegroup.
1611 1611 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1612 1612 self.changegroupinfo(msng_cl_lst, source)
1613 1613 # Some bases may turn out to be superfluous, and some heads may be
1614 1614 # too. nodesbetween will return the minimal set of bases and heads
1615 1615 # necessary to re-create the changegroup.
1616 1616
1617 1617 # Known heads are the list of heads that it is assumed the recipient
1618 1618 # of this changegroup will know about.
1619 1619 knownheads = {}
1620 1620 # We assume that all parents of bases are known heads.
1621 1621 for n in bases:
1622 1622 for p in cl.parents(n):
1623 1623 if p != nullid:
1624 1624 knownheads[p] = 1
1625 1625 knownheads = knownheads.keys()
1626 1626 if knownheads:
1627 1627 # Now that we know what heads are known, we can compute which
1628 1628 # changesets are known. The recipient must know about all
1629 1629 # changesets required to reach the known heads from the null
1630 1630 # changeset.
1631 1631 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1632 1632 junk = None
1633 1633 # Transform the list into an ersatz set.
1634 1634 has_cl_set = dict.fromkeys(has_cl_set)
1635 1635 else:
1636 1636 # If there were no known heads, the recipient cannot be assumed to
1637 1637 # know about any changesets.
1638 1638 has_cl_set = {}
1639 1639
1640 1640 # Make it easy to refer to self.manifest
1641 1641 mnfst = self.manifest
1642 1642 # We don't know which manifests are missing yet
1643 1643 msng_mnfst_set = {}
1644 1644 # Nor do we know which filenodes are missing.
1645 1645 msng_filenode_set = {}
1646 1646
1647 1647 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1648 1648 junk = None
1649 1649
1650 1650 # A changeset always belongs to itself, so the changenode lookup
1651 1651 # function for a changenode is identity.
1652 1652 def identity(x):
1653 1653 return x
1654 1654
1655 1655 # A function generating function. Sets up an environment for the
1656 1656 # inner function.
1657 1657 def cmp_by_rev_func(revlog):
1658 1658 # Compare two nodes by their revision number in the environment's
1659 1659 # revision history. Since the revision number both represents the
1660 1660 # most efficient order to read the nodes in, and represents a
1661 1661 # topological sorting of the nodes, this function is often useful.
1662 1662 def cmp_by_rev(a, b):
1663 1663 return cmp(revlog.rev(a), revlog.rev(b))
1664 1664 return cmp_by_rev
1665 1665
1666 1666 # If we determine that a particular file or manifest node must be a
1667 1667 # node that the recipient of the changegroup will already have, we can
1668 1668 # also assume the recipient will have all the parents. This function
1669 1669 # prunes them from the set of missing nodes.
1670 1670 def prune_parents(revlog, hasset, msngset):
1671 1671 haslst = hasset.keys()
1672 1672 haslst.sort(cmp_by_rev_func(revlog))
1673 1673 for node in haslst:
1674 1674 parentlst = [p for p in revlog.parents(node) if p != nullid]
1675 1675 while parentlst:
1676 1676 n = parentlst.pop()
1677 1677 if n not in hasset:
1678 1678 hasset[n] = 1
1679 1679 p = [p for p in revlog.parents(n) if p != nullid]
1680 1680 parentlst.extend(p)
1681 1681 for n in hasset:
1682 1682 msngset.pop(n, None)
1683 1683
1684 1684 # This is a function generating function used to set up an environment
1685 1685 # for the inner function to execute in.
1686 1686 def manifest_and_file_collector(changedfileset):
1687 1687 # This is an information gathering function that gathers
1688 1688 # information from each changeset node that goes out as part of
1689 1689 # the changegroup. The information gathered is a list of which
1690 1690 # manifest nodes are potentially required (the recipient may
1691 1691 # already have them) and total list of all files which were
1692 1692 # changed in any changeset in the changegroup.
1693 1693 #
1694 1694 # We also remember the first changenode we saw any manifest
1695 1695 # referenced by so we can later determine which changenode 'owns'
1696 1696 # the manifest.
1697 1697 def collect_manifests_and_files(clnode):
1698 1698 c = cl.read(clnode)
1699 1699 for f in c[3]:
1700 1700 # This is to make sure we only have one instance of each
1701 1701 # filename string for each filename.
1702 1702 changedfileset.setdefault(f, f)
1703 1703 msng_mnfst_set.setdefault(c[0], clnode)
1704 1704 return collect_manifests_and_files
1705 1705
1706 1706 # Figure out which manifest nodes (of the ones we think might be part
1707 1707 # of the changegroup) the recipient must know about and remove them
1708 1708 # from the changegroup.
1709 1709 def prune_manifests():
1710 1710 has_mnfst_set = {}
1711 1711 for n in msng_mnfst_set:
1712 1712 # If a 'missing' manifest thinks it belongs to a changenode
1713 1713 # the recipient is assumed to have, obviously the recipient
1714 1714 # must have that manifest.
1715 1715 linknode = cl.node(mnfst.linkrev(n))
1716 1716 if linknode in has_cl_set:
1717 1717 has_mnfst_set[n] = 1
1718 1718 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1719 1719
1720 1720 # Use the information collected in collect_manifests_and_files to say
1721 1721 # which changenode any manifestnode belongs to.
1722 1722 def lookup_manifest_link(mnfstnode):
1723 1723 return msng_mnfst_set[mnfstnode]
1724 1724
1725 1725 # A function generating function that sets up the initial environment
1726 1726 # the inner function.
1727 1727 def filenode_collector(changedfiles):
1728 1728 next_rev = [0]
1729 1729 # This gathers information from each manifestnode included in the
1730 1730 # changegroup about which filenodes the manifest node references
1731 1731 # so we can include those in the changegroup too.
1732 1732 #
1733 1733 # It also remembers which changenode each filenode belongs to. It
1734 1734 # does this by assuming the a filenode belongs to the changenode
1735 1735 # the first manifest that references it belongs to.
1736 1736 def collect_msng_filenodes(mnfstnode):
1737 1737 r = mnfst.rev(mnfstnode)
1738 1738 if r == next_rev[0]:
1739 1739 # If the last rev we looked at was the one just previous,
1740 1740 # we only need to see a diff.
1741 1741 deltamf = mnfst.readdelta(mnfstnode)
1742 1742 # For each line in the delta
1743 1743 for f, fnode in deltamf.items():
1744 1744 f = changedfiles.get(f, None)
1745 1745 # And if the file is in the list of files we care
1746 1746 # about.
1747 1747 if f is not None:
1748 1748 # Get the changenode this manifest belongs to
1749 1749 clnode = msng_mnfst_set[mnfstnode]
1750 1750 # Create the set of filenodes for the file if
1751 1751 # there isn't one already.
1752 1752 ndset = msng_filenode_set.setdefault(f, {})
1753 1753 # And set the filenode's changelog node to the
1754 1754 # manifest's if it hasn't been set already.
1755 1755 ndset.setdefault(fnode, clnode)
1756 1756 else:
1757 1757 # Otherwise we need a full manifest.
1758 1758 m = mnfst.read(mnfstnode)
1759 1759 # For every file in we care about.
1760 1760 for f in changedfiles:
1761 1761 fnode = m.get(f, None)
1762 1762 # If it's in the manifest
1763 1763 if fnode is not None:
1764 1764 # See comments above.
1765 1765 clnode = msng_mnfst_set[mnfstnode]
1766 1766 ndset = msng_filenode_set.setdefault(f, {})
1767 1767 ndset.setdefault(fnode, clnode)
1768 1768 # Remember the revision we hope to see next.
1769 1769 next_rev[0] = r + 1
1770 1770 return collect_msng_filenodes
1771 1771
1772 1772 # We have a list of filenodes we think we need for a file, lets remove
1773 1773 # all those we now the recipient must have.
1774 1774 def prune_filenodes(f, filerevlog):
1775 1775 msngset = msng_filenode_set[f]
1776 1776 hasset = {}
1777 1777 # If a 'missing' filenode thinks it belongs to a changenode we
1778 1778 # assume the recipient must have, then the recipient must have
1779 1779 # that filenode.
1780 1780 for n in msngset:
1781 1781 clnode = cl.node(filerevlog.linkrev(n))
1782 1782 if clnode in has_cl_set:
1783 1783 hasset[n] = 1
1784 1784 prune_parents(filerevlog, hasset, msngset)
1785 1785
1786 1786 # A function generator function that sets up the a context for the
1787 1787 # inner function.
1788 1788 def lookup_filenode_link_func(fname):
1789 1789 msngset = msng_filenode_set[fname]
1790 1790 # Lookup the changenode the filenode belongs to.
1791 1791 def lookup_filenode_link(fnode):
1792 1792 return msngset[fnode]
1793 1793 return lookup_filenode_link
1794 1794
1795 1795 # Add the nodes that were explicitly requested.
1796 1796 def add_extra_nodes(name, nodes):
1797 1797 if not extranodes or name not in extranodes:
1798 1798 return
1799 1799
1800 1800 for node, linknode in extranodes[name]:
1801 1801 if node not in nodes:
1802 1802 nodes[node] = linknode
1803 1803
1804 1804 # Now that we have all theses utility functions to help out and
1805 1805 # logically divide up the task, generate the group.
1806 1806 def gengroup():
1807 1807 # The set of changed files starts empty.
1808 1808 changedfiles = {}
1809 1809 # Create a changenode group generator that will call our functions
1810 1810 # back to lookup the owning changenode and collect information.
1811 1811 group = cl.group(msng_cl_lst, identity,
1812 1812 manifest_and_file_collector(changedfiles))
1813 1813 for chnk in group:
1814 1814 yield chnk
1815 1815
1816 1816 # The list of manifests has been collected by the generator
1817 1817 # calling our functions back.
1818 1818 prune_manifests()
1819 1819 add_extra_nodes(1, msng_mnfst_set)
1820 1820 msng_mnfst_lst = msng_mnfst_set.keys()
1821 1821 # Sort the manifestnodes by revision number.
1822 1822 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1823 1823 # Create a generator for the manifestnodes that calls our lookup
1824 1824 # and data collection functions back.
1825 1825 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1826 1826 filenode_collector(changedfiles))
1827 1827 for chnk in group:
1828 1828 yield chnk
1829 1829
1830 1830 # These are no longer needed, dereference and toss the memory for
1831 1831 # them.
1832 1832 msng_mnfst_lst = None
1833 1833 msng_mnfst_set.clear()
1834 1834
1835 1835 if extranodes:
1836 1836 for fname in extranodes:
1837 1837 if isinstance(fname, int):
1838 1838 continue
1839 1839 add_extra_nodes(fname,
1840 1840 msng_filenode_set.setdefault(fname, {}))
1841 1841 changedfiles[fname] = 1
1842 1842 changedfiles = changedfiles.keys()
1843 1843 changedfiles.sort()
1844 1844 # Go through all our files in order sorted by name.
1845 1845 for fname in changedfiles:
1846 1846 filerevlog = self.file(fname)
1847 1847 if filerevlog.count() == 0:
1848 1848 raise util.Abort(_("empty or missing revlog for %s") % fname)
1849 1849 # Toss out the filenodes that the recipient isn't really
1850 1850 # missing.
1851 1851 if fname in msng_filenode_set:
1852 1852 prune_filenodes(fname, filerevlog)
1853 1853 msng_filenode_lst = msng_filenode_set[fname].keys()
1854 1854 else:
1855 1855 msng_filenode_lst = []
1856 1856 # If any filenodes are left, generate the group for them,
1857 1857 # otherwise don't bother.
1858 1858 if len(msng_filenode_lst) > 0:
1859 1859 yield changegroup.chunkheader(len(fname))
1860 1860 yield fname
1861 1861 # Sort the filenodes by their revision #
1862 1862 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1863 1863 # Create a group generator and only pass in a changenode
1864 1864 # lookup function as we need to collect no information
1865 1865 # from filenodes.
1866 1866 group = filerevlog.group(msng_filenode_lst,
1867 1867 lookup_filenode_link_func(fname))
1868 1868 for chnk in group:
1869 1869 yield chnk
1870 1870 if fname in msng_filenode_set:
1871 1871 # Don't need this anymore, toss it to free memory.
1872 1872 del msng_filenode_set[fname]
1873 1873 # Signal that no more groups are left.
1874 1874 yield changegroup.closechunk()
1875 1875
1876 1876 if msng_cl_lst:
1877 1877 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1878 1878
1879 1879 return util.chunkbuffer(gengroup())
1880 1880
1881 1881 def changegroup(self, basenodes, source):
1882 1882 """Generate a changegroup of all nodes that we have that a recipient
1883 1883 doesn't.
1884 1884
1885 1885 This is much easier than the previous function as we can assume that
1886 1886 the recipient has any changenode we aren't sending them."""
1887 1887
1888 1888 self.hook('preoutgoing', throw=True, source=source)
1889 1889
1890 1890 cl = self.changelog
1891 1891 nodes = cl.nodesbetween(basenodes, None)[0]
1892 1892 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1893 1893 self.changegroupinfo(nodes, source)
1894 1894
1895 1895 def identity(x):
1896 1896 return x
1897 1897
1898 1898 def gennodelst(revlog):
1899 1899 for r in xrange(0, revlog.count()):
1900 1900 n = revlog.node(r)
1901 1901 if revlog.linkrev(n) in revset:
1902 1902 yield n
1903 1903
1904 1904 def changed_file_collector(changedfileset):
1905 1905 def collect_changed_files(clnode):
1906 1906 c = cl.read(clnode)
1907 1907 for fname in c[3]:
1908 1908 changedfileset[fname] = 1
1909 1909 return collect_changed_files
1910 1910
1911 1911 def lookuprevlink_func(revlog):
1912 1912 def lookuprevlink(n):
1913 1913 return cl.node(revlog.linkrev(n))
1914 1914 return lookuprevlink
1915 1915
1916 1916 def gengroup():
1917 1917 # construct a list of all changed files
1918 1918 changedfiles = {}
1919 1919
1920 1920 for chnk in cl.group(nodes, identity,
1921 1921 changed_file_collector(changedfiles)):
1922 1922 yield chnk
1923 1923 changedfiles = changedfiles.keys()
1924 1924 changedfiles.sort()
1925 1925
1926 1926 mnfst = self.manifest
1927 1927 nodeiter = gennodelst(mnfst)
1928 1928 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1929 1929 yield chnk
1930 1930
1931 1931 for fname in changedfiles:
1932 1932 filerevlog = self.file(fname)
1933 1933 if filerevlog.count() == 0:
1934 1934 raise util.Abort(_("empty or missing revlog for %s") % fname)
1935 1935 nodeiter = gennodelst(filerevlog)
1936 1936 nodeiter = list(nodeiter)
1937 1937 if nodeiter:
1938 1938 yield changegroup.chunkheader(len(fname))
1939 1939 yield fname
1940 1940 lookup = lookuprevlink_func(filerevlog)
1941 1941 for chnk in filerevlog.group(nodeiter, lookup):
1942 1942 yield chnk
1943 1943
1944 1944 yield changegroup.closechunk()
1945 1945
1946 1946 if nodes:
1947 1947 self.hook('outgoing', node=hex(nodes[0]), source=source)
1948 1948
1949 1949 return util.chunkbuffer(gengroup())
1950 1950
1951 1951 def addchangegroup(self, source, srctype, url, emptyok=False):
1952 1952 """add changegroup to repo.
1953 1953
1954 1954 return values:
1955 1955 - nothing changed or no source: 0
1956 1956 - more heads than before: 1+added heads (2..n)
1957 1957 - less heads than before: -1-removed heads (-2..-n)
1958 1958 - number of heads stays the same: 1
1959 1959 """
1960 1960 def csmap(x):
1961 1961 self.ui.debug(_("add changeset %s\n") % short(x))
1962 1962 return cl.count()
1963 1963
1964 1964 def revmap(x):
1965 1965 return cl.rev(x)
1966 1966
1967 1967 if not source:
1968 1968 return 0
1969 1969
1970 1970 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1971 1971
1972 1972 changesets = files = revisions = 0
1973 1973
1974 1974 # write changelog data to temp files so concurrent readers will not see
1975 1975 # inconsistent view
1976 1976 cl = self.changelog
1977 1977 cl.delayupdate()
1978 1978 oldheads = len(cl.heads())
1979 1979
1980 1980 tr = self.transaction()
1981 1981 try:
1982 1982 trp = weakref.proxy(tr)
1983 1983 # pull off the changeset group
1984 1984 self.ui.status(_("adding changesets\n"))
1985 1985 cor = cl.count() - 1
1986 1986 chunkiter = changegroup.chunkiter(source)
1987 1987 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1988 1988 raise util.Abort(_("received changelog group is empty"))
1989 1989 cnr = cl.count() - 1
1990 1990 changesets = cnr - cor
1991 1991
1992 1992 # pull off the manifest group
1993 1993 self.ui.status(_("adding manifests\n"))
1994 1994 chunkiter = changegroup.chunkiter(source)
1995 1995 # no need to check for empty manifest group here:
1996 1996 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1997 1997 # no new manifest will be created and the manifest group will
1998 1998 # be empty during the pull
1999 1999 self.manifest.addgroup(chunkiter, revmap, trp)
2000 2000
2001 2001 # process the files
2002 2002 self.ui.status(_("adding file changes\n"))
2003 2003 while 1:
2004 2004 f = changegroup.getchunk(source)
2005 2005 if not f:
2006 2006 break
2007 2007 self.ui.debug(_("adding %s revisions\n") % f)
2008 2008 fl = self.file(f)
2009 2009 o = fl.count()
2010 2010 chunkiter = changegroup.chunkiter(source)
2011 2011 if fl.addgroup(chunkiter, revmap, trp) is None:
2012 2012 raise util.Abort(_("received file revlog group is empty"))
2013 2013 revisions += fl.count() - o
2014 2014 files += 1
2015 2015
2016 2016 # make changelog see real files again
2017 2017 cl.finalize(trp)
2018 2018
2019 2019 newheads = len(self.changelog.heads())
2020 2020 heads = ""
2021 2021 if oldheads and newheads != oldheads:
2022 2022 heads = _(" (%+d heads)") % (newheads - oldheads)
2023 2023
2024 2024 self.ui.status(_("added %d changesets"
2025 2025 " with %d changes to %d files%s\n")
2026 2026 % (changesets, revisions, files, heads))
2027 2027
2028 2028 if changesets > 0:
2029 2029 self.hook('pretxnchangegroup', throw=True,
2030 2030 node=hex(self.changelog.node(cor+1)), source=srctype,
2031 2031 url=url)
2032 2032
2033 2033 tr.close()
2034 2034 finally:
2035 2035 del tr
2036 2036
2037 2037 if changesets > 0:
2038 2038 # forcefully update the on-disk branch cache
2039 2039 self.ui.debug(_("updating the branch cache\n"))
2040 2040 self.branchtags()
2041 2041 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2042 2042 source=srctype, url=url)
2043 2043
2044 2044 for i in xrange(cor + 1, cnr + 1):
2045 2045 self.hook("incoming", node=hex(self.changelog.node(i)),
2046 2046 source=srctype, url=url)
2047 2047
2048 2048 # never return 0 here:
2049 2049 if newheads < oldheads:
2050 2050 return newheads - oldheads - 1
2051 2051 else:
2052 2052 return newheads - oldheads + 1
2053 2053
2054 2054
2055 2055 def stream_in(self, remote):
2056 2056 fp = remote.stream_out()
2057 2057 l = fp.readline()
2058 2058 try:
2059 2059 resp = int(l)
2060 2060 except ValueError:
2061 2061 raise util.UnexpectedOutput(
2062 2062 _('Unexpected response from remote server:'), l)
2063 2063 if resp == 1:
2064 2064 raise util.Abort(_('operation forbidden by server'))
2065 2065 elif resp == 2:
2066 2066 raise util.Abort(_('locking the remote repository failed'))
2067 2067 elif resp != 0:
2068 2068 raise util.Abort(_('the server sent an unknown error code'))
2069 2069 self.ui.status(_('streaming all changes\n'))
2070 2070 l = fp.readline()
2071 2071 try:
2072 2072 total_files, total_bytes = map(int, l.split(' ', 1))
2073 2073 except ValueError, TypeError:
2074 2074 raise util.UnexpectedOutput(
2075 2075 _('Unexpected response from remote server:'), l)
2076 2076 self.ui.status(_('%d files to transfer, %s of data\n') %
2077 2077 (total_files, util.bytecount(total_bytes)))
2078 2078 start = time.time()
2079 2079 for i in xrange(total_files):
2080 2080 # XXX doesn't support '\n' or '\r' in filenames
2081 2081 l = fp.readline()
2082 2082 try:
2083 2083 name, size = l.split('\0', 1)
2084 2084 size = int(size)
2085 2085 except ValueError, TypeError:
2086 2086 raise util.UnexpectedOutput(
2087 2087 _('Unexpected response from remote server:'), l)
2088 2088 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2089 2089 ofp = self.sopener(name, 'w')
2090 2090 for chunk in util.filechunkiter(fp, limit=size):
2091 2091 ofp.write(chunk)
2092 2092 ofp.close()
2093 2093 elapsed = time.time() - start
2094 2094 if elapsed <= 0:
2095 2095 elapsed = 0.001
2096 2096 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2097 2097 (util.bytecount(total_bytes), elapsed,
2098 2098 util.bytecount(total_bytes / elapsed)))
2099 2099 self.invalidate()
2100 2100 return len(self.heads()) + 1
2101 2101
2102 2102 def clone(self, remote, heads=[], stream=False):
2103 2103 '''clone remote repository.
2104 2104
2105 2105 keyword arguments:
2106 2106 heads: list of revs to clone (forces use of pull)
2107 2107 stream: use streaming clone if possible'''
2108 2108
2109 2109 # now, all clients that can request uncompressed clones can
2110 2110 # read repo formats supported by all servers that can serve
2111 2111 # them.
2112 2112
2113 2113 # if revlog format changes, client will have to check version
2114 2114 # and format flags on "stream" capability, and use
2115 2115 # uncompressed only if compatible.
2116 2116
2117 2117 if stream and not heads and remote.capable('stream'):
2118 2118 return self.stream_in(remote)
2119 2119 return self.pull(remote, heads)
2120 2120
2121 2121 # used to avoid circular references so destructors work
2122 2122 def aftertrans(files):
2123 2123 renamefiles = [tuple(t) for t in files]
2124 2124 def a():
2125 2125 for src, dest in renamefiles:
2126 2126 util.rename(src, dest)
2127 2127 return a
2128 2128
2129 2129 def instance(ui, path, create):
2130 2130 return localrepository(ui, util.drop_scheme('file', path), create)
2131 2131
2132 2132 def islocal(path):
2133 2133 return True
General Comments 0
You need to be logged in to leave comments. Login now