##// END OF EJS Templates
filecommit: swallow some bits from _commitctx, add _
Matt Mackall -
r8401:ca7dc47e default
parent child Browse files
Show More
@@ -1,800 +1,803
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, error, util
11 11 import os, errno
12 12
13 13 propertycache = util.propertycache
14 14
15 15 class changectx(object):
16 16 """A changecontext object makes access to data related to a particular
17 17 changeset convenient."""
18 18 def __init__(self, repo, changeid=''):
19 19 """changeid is a revision number, node, or tag"""
20 20 if changeid == '':
21 21 changeid = '.'
22 22 self._repo = repo
23 23 if isinstance(changeid, (long, int)):
24 24 self._rev = changeid
25 25 self._node = self._repo.changelog.node(changeid)
26 26 else:
27 27 self._node = self._repo.lookup(changeid)
28 28 self._rev = self._repo.changelog.rev(self._node)
29 29
30 30 def __str__(self):
31 31 return short(self.node())
32 32
33 33 def __int__(self):
34 34 return self.rev()
35 35
36 36 def __repr__(self):
37 37 return "<changectx %s>" % str(self)
38 38
39 39 def __hash__(self):
40 40 try:
41 41 return hash(self._rev)
42 42 except AttributeError:
43 43 return id(self)
44 44
45 45 def __eq__(self, other):
46 46 try:
47 47 return self._rev == other._rev
48 48 except AttributeError:
49 49 return False
50 50
51 51 def __ne__(self, other):
52 52 return not (self == other)
53 53
54 54 def __nonzero__(self):
55 55 return self._rev != nullrev
56 56
57 57 @propertycache
58 58 def _changeset(self):
59 59 return self._repo.changelog.read(self.node())
60 60
61 61 @propertycache
62 62 def _manifest(self):
63 63 return self._repo.manifest.read(self._changeset[0])
64 64
65 65 @propertycache
66 66 def _manifestdelta(self):
67 67 return self._repo.manifest.readdelta(self._changeset[0])
68 68
69 69 @propertycache
70 70 def _parents(self):
71 71 p = self._repo.changelog.parentrevs(self._rev)
72 72 if p[1] == nullrev:
73 73 p = p[:-1]
74 74 return [changectx(self._repo, x) for x in p]
75 75
76 76 def __contains__(self, key):
77 77 return key in self._manifest
78 78
79 79 def __getitem__(self, key):
80 80 return self.filectx(key)
81 81
82 82 def __iter__(self):
83 83 for f in sorted(self._manifest):
84 84 yield f
85 85
86 86 def changeset(self): return self._changeset
87 87 def manifest(self): return self._manifest
88 88
89 89 def rev(self): return self._rev
90 90 def node(self): return self._node
91 91 def hex(self): return hex(self._node)
92 92 def user(self): return self._changeset[1]
93 93 def date(self): return self._changeset[2]
94 94 def files(self): return self._changeset[3]
95 95 def description(self): return self._changeset[4]
96 96 def branch(self): return self._changeset[5].get("branch")
97 97 def extra(self): return self._changeset[5]
98 98 def tags(self): return self._repo.nodetags(self._node)
99 99
100 100 def parents(self):
101 101 """return contexts for each parent changeset"""
102 102 return self._parents
103 103
104 104 def children(self):
105 105 """return contexts for each child changeset"""
106 106 c = self._repo.changelog.children(self._node)
107 107 return [changectx(self._repo, x) for x in c]
108 108
109 109 def ancestors(self):
110 110 for a in self._repo.changelog.ancestors(self._rev):
111 111 yield changectx(self._repo, a)
112 112
113 113 def descendants(self):
114 114 for d in self._repo.changelog.descendants(self._rev):
115 115 yield changectx(self._repo, d)
116 116
117 117 def _fileinfo(self, path):
118 118 if '_manifest' in self.__dict__:
119 119 try:
120 120 return self._manifest[path], self._manifest.flags(path)
121 121 except KeyError:
122 122 raise error.LookupError(self._node, path,
123 123 _('not found in manifest'))
124 124 if '_manifestdelta' in self.__dict__ or path in self.files():
125 125 if path in self._manifestdelta:
126 126 return self._manifestdelta[path], self._manifestdelta.flags(path)
127 127 node, flag = self._repo.manifest.find(self._changeset[0], path)
128 128 if not node:
129 129 raise error.LookupError(self._node, path,
130 130 _('not found in manifest'))
131 131
132 132 return node, flag
133 133
134 134 def filenode(self, path):
135 135 return self._fileinfo(path)[0]
136 136
137 137 def flags(self, path):
138 138 try:
139 139 return self._fileinfo(path)[1]
140 140 except error.LookupError:
141 141 return ''
142 142
143 143 def filectx(self, path, fileid=None, filelog=None):
144 144 """get a file context from this changeset"""
145 145 if fileid is None:
146 146 fileid = self.filenode(path)
147 147 return filectx(self._repo, path, fileid=fileid,
148 148 changectx=self, filelog=filelog)
149 149
150 150 def ancestor(self, c2):
151 151 """
152 152 return the ancestor context of self and c2
153 153 """
154 154 n = self._repo.changelog.ancestor(self._node, c2._node)
155 155 return changectx(self._repo, n)
156 156
157 157 def walk(self, match):
158 158 fset = set(match.files())
159 159 # for dirstate.walk, files=['.'] means "walk the whole tree".
160 160 # follow that here, too
161 161 fset.discard('.')
162 162 for fn in self:
163 163 for ffn in fset:
164 164 # match if the file is the exact name or a directory
165 165 if ffn == fn or fn.startswith("%s/" % ffn):
166 166 fset.remove(ffn)
167 167 break
168 168 if match(fn):
169 169 yield fn
170 170 for fn in sorted(fset):
171 171 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
172 172 yield fn
173 173
174 174 class filectx(object):
175 175 """A filecontext object makes access to data related to a particular
176 176 filerevision convenient."""
177 177 def __init__(self, repo, path, changeid=None, fileid=None,
178 178 filelog=None, changectx=None):
179 179 """changeid can be a changeset revision, node, or tag.
180 180 fileid can be a file revision or node."""
181 181 self._repo = repo
182 182 self._path = path
183 183
184 184 assert (changeid is not None
185 185 or fileid is not None
186 186 or changectx is not None)
187 187
188 188 if filelog:
189 189 self._filelog = filelog
190 190
191 191 if changeid is not None:
192 192 self._changeid = changeid
193 193 if changectx is not None:
194 194 self._changectx = changectx
195 195 if fileid is not None:
196 196 self._fileid = fileid
197 197
198 198 @propertycache
199 199 def _changectx(self):
200 200 return changectx(self._repo, self._changeid)
201 201
202 202 @propertycache
203 203 def _filelog(self):
204 204 return self._repo.file(self._path)
205 205
206 206 @propertycache
207 207 def _changeid(self):
208 208 if '_changectx' in self.__dict__:
209 209 return self._changectx.rev()
210 210 else:
211 211 return self._filelog.linkrev(self._filerev)
212 212
213 213 @propertycache
214 214 def _filenode(self):
215 215 if '_fileid' in self.__dict__:
216 216 return self._filelog.lookup(self._fileid)
217 217 else:
218 218 return self._changectx.filenode(self._path)
219 219
220 220 @propertycache
221 221 def _filerev(self):
222 222 return self._filelog.rev(self._filenode)
223 223
224 224 @propertycache
225 225 def _repopath(self):
226 226 return self._path
227 227
228 228 def __nonzero__(self):
229 229 try:
230 230 self._filenode
231 231 return True
232 232 except error.LookupError:
233 233 # file is missing
234 234 return False
235 235
236 236 def __str__(self):
237 237 return "%s@%s" % (self.path(), short(self.node()))
238 238
239 239 def __repr__(self):
240 240 return "<filectx %s>" % str(self)
241 241
242 242 def __hash__(self):
243 243 try:
244 244 return hash((self._path, self._fileid))
245 245 except AttributeError:
246 246 return id(self)
247 247
248 248 def __eq__(self, other):
249 249 try:
250 250 return (self._path == other._path
251 251 and self._fileid == other._fileid)
252 252 except AttributeError:
253 253 return False
254 254
255 255 def __ne__(self, other):
256 256 return not (self == other)
257 257
258 258 def filectx(self, fileid):
259 259 '''opens an arbitrary revision of the file without
260 260 opening a new filelog'''
261 261 return filectx(self._repo, self._path, fileid=fileid,
262 262 filelog=self._filelog)
263 263
264 264 def filerev(self): return self._filerev
265 265 def filenode(self): return self._filenode
266 266 def flags(self): return self._changectx.flags(self._path)
267 267 def filelog(self): return self._filelog
268 268
269 269 def rev(self):
270 270 if '_changectx' in self.__dict__:
271 271 return self._changectx.rev()
272 272 if '_changeid' in self.__dict__:
273 273 return self._changectx.rev()
274 274 return self._filelog.linkrev(self._filerev)
275 275
276 276 def linkrev(self): return self._filelog.linkrev(self._filerev)
277 277 def node(self): return self._changectx.node()
278 278 def user(self): return self._changectx.user()
279 279 def date(self): return self._changectx.date()
280 280 def files(self): return self._changectx.files()
281 281 def description(self): return self._changectx.description()
282 282 def branch(self): return self._changectx.branch()
283 283 def manifest(self): return self._changectx.manifest()
284 284 def changectx(self): return self._changectx
285 285
286 286 def data(self): return self._filelog.read(self._filenode)
287 287 def path(self): return self._path
288 288 def size(self): return self._filelog.size(self._filerev)
289 289
290 290 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
291 291
292 292 def renamed(self):
293 293 """check if file was actually renamed in this changeset revision
294 294
295 295 If rename logged in file revision, we report copy for changeset only
296 296 if file revisions linkrev points back to the changeset in question
297 297 or both changeset parents contain different file revisions.
298 298 """
299 299
300 300 renamed = self._filelog.renamed(self._filenode)
301 301 if not renamed:
302 302 return renamed
303 303
304 304 if self.rev() == self.linkrev():
305 305 return renamed
306 306
307 307 name = self.path()
308 308 fnode = self._filenode
309 309 for p in self._changectx.parents():
310 310 try:
311 311 if fnode == p.filenode(name):
312 312 return None
313 313 except error.LookupError:
314 314 pass
315 315 return renamed
316 316
317 317 def parents(self):
318 318 p = self._path
319 319 fl = self._filelog
320 320 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
321 321
322 322 r = self._filelog.renamed(self._filenode)
323 323 if r:
324 324 pl[0] = (r[0], r[1], None)
325 325
326 326 return [filectx(self._repo, p, fileid=n, filelog=l)
327 327 for p,n,l in pl if n != nullid]
328 328
329 329 def children(self):
330 330 # hard for renames
331 331 c = self._filelog.children(self._filenode)
332 332 return [filectx(self._repo, self._path, fileid=x,
333 333 filelog=self._filelog) for x in c]
334 334
335 335 def annotate(self, follow=False, linenumber=None):
336 336 '''returns a list of tuples of (ctx, line) for each line
337 337 in the file, where ctx is the filectx of the node where
338 338 that line was last changed.
339 339 This returns tuples of ((ctx, linenumber), line) for each line,
340 340 if "linenumber" parameter is NOT "None".
341 341 In such tuples, linenumber means one at the first appearance
342 342 in the managed file.
343 343 To reduce annotation cost,
344 344 this returns fixed value(False is used) as linenumber,
345 345 if "linenumber" parameter is "False".'''
346 346
347 347 def decorate_compat(text, rev):
348 348 return ([rev] * len(text.splitlines()), text)
349 349
350 350 def without_linenumber(text, rev):
351 351 return ([(rev, False)] * len(text.splitlines()), text)
352 352
353 353 def with_linenumber(text, rev):
354 354 size = len(text.splitlines())
355 355 return ([(rev, i) for i in xrange(1, size + 1)], text)
356 356
357 357 decorate = (((linenumber is None) and decorate_compat) or
358 358 (linenumber and with_linenumber) or
359 359 without_linenumber)
360 360
361 361 def pair(parent, child):
362 362 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
363 363 child[0][b1:b2] = parent[0][a1:a2]
364 364 return child
365 365
366 366 getlog = util.cachefunc(lambda x: self._repo.file(x))
367 367 def getctx(path, fileid):
368 368 log = path == self._path and self._filelog or getlog(path)
369 369 return filectx(self._repo, path, fileid=fileid, filelog=log)
370 370 getctx = util.cachefunc(getctx)
371 371
372 372 def parents(f):
373 373 # we want to reuse filectx objects as much as possible
374 374 p = f._path
375 375 if f._filerev is None: # working dir
376 376 pl = [(n.path(), n.filerev()) for n in f.parents()]
377 377 else:
378 378 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
379 379
380 380 if follow:
381 381 r = f.renamed()
382 382 if r:
383 383 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
384 384
385 385 return [getctx(p, n) for p, n in pl if n != nullrev]
386 386
387 387 # use linkrev to find the first changeset where self appeared
388 388 if self.rev() != self.linkrev():
389 389 base = self.filectx(self.filerev())
390 390 else:
391 391 base = self
392 392
393 393 # find all ancestors
394 394 needed = {base: 1}
395 395 visit = [base]
396 396 files = [base._path]
397 397 while visit:
398 398 f = visit.pop(0)
399 399 for p in parents(f):
400 400 if p not in needed:
401 401 needed[p] = 1
402 402 visit.append(p)
403 403 if p._path not in files:
404 404 files.append(p._path)
405 405 else:
406 406 # count how many times we'll use this
407 407 needed[p] += 1
408 408
409 409 # sort by revision (per file) which is a topological order
410 410 visit = []
411 411 for f in files:
412 412 fn = [(n.rev(), n) for n in needed if n._path == f]
413 413 visit.extend(fn)
414 414
415 415 hist = {}
416 416 for r, f in sorted(visit):
417 417 curr = decorate(f.data(), f)
418 418 for p in parents(f):
419 419 if p != nullid:
420 420 curr = pair(hist[p], curr)
421 421 # trim the history of unneeded revs
422 422 needed[p] -= 1
423 423 if not needed[p]:
424 424 del hist[p]
425 425 hist[f] = curr
426 426
427 427 return zip(hist[f][0], hist[f][1].splitlines(1))
428 428
429 429 def ancestor(self, fc2):
430 430 """
431 431 find the common ancestor file context, if any, of self, and fc2
432 432 """
433 433
434 434 acache = {}
435 435
436 436 # prime the ancestor cache for the working directory
437 437 for c in (self, fc2):
438 438 if c._filerev == None:
439 439 pl = [(n.path(), n.filenode()) for n in c.parents()]
440 440 acache[(c._path, None)] = pl
441 441
442 442 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
443 443 def parents(vertex):
444 444 if vertex in acache:
445 445 return acache[vertex]
446 446 f, n = vertex
447 447 if f not in flcache:
448 448 flcache[f] = self._repo.file(f)
449 449 fl = flcache[f]
450 450 pl = [(f, p) for p in fl.parents(n) if p != nullid]
451 451 re = fl.renamed(n)
452 452 if re:
453 453 pl.append(re)
454 454 acache[vertex] = pl
455 455 return pl
456 456
457 457 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
458 458 v = ancestor.ancestor(a, b, parents)
459 459 if v:
460 460 f, n = v
461 461 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
462 462
463 463 return None
464 464
465 465 class workingctx(changectx):
466 466 """A workingctx object makes access to data related to
467 467 the current working directory convenient.
468 468 parents - a pair of parent nodeids, or None to use the dirstate.
469 469 date - any valid date string or (unixtime, offset), or None.
470 470 user - username string, or None.
471 471 extra - a dictionary of extra values, or None.
472 472 changes - a list of file lists as returned by localrepo.status()
473 473 or None to use the repository status.
474 474 """
475 475 def __init__(self, repo, parents=None, text="", user=None, date=None,
476 476 extra=None, changes=None):
477 477 self._repo = repo
478 478 self._rev = None
479 479 self._node = None
480 480 self._text = text
481 481 if date:
482 482 self._date = util.parsedate(date)
483 483 if user:
484 484 self._user = user
485 485 if parents:
486 486 self._parents = [changectx(self._repo, p) for p in parents]
487 487 if changes:
488 488 self._status = list(changes)
489 489
490 490 self._extra = {}
491 491 if extra:
492 492 self._extra = extra.copy()
493 493 if 'branch' not in self._extra:
494 494 branch = self._repo.dirstate.branch()
495 495 try:
496 496 branch = branch.decode('UTF-8').encode('UTF-8')
497 497 except UnicodeDecodeError:
498 498 raise util.Abort(_('branch name not in UTF-8!'))
499 499 self._extra['branch'] = branch
500 500 if self._extra['branch'] == '':
501 501 self._extra['branch'] = 'default'
502 502
503 503 def __str__(self):
504 504 return str(self._parents[0]) + "+"
505 505
506 506 def __nonzero__(self):
507 507 return True
508 508
509 509 def __contains__(self, key):
510 510 return self._repo.dirstate[key] not in "?r"
511 511
512 512 @propertycache
513 513 def _manifest(self):
514 514 """generate a manifest corresponding to the working directory"""
515 515
516 516 man = self._parents[0].manifest().copy()
517 517 copied = self._repo.dirstate.copies()
518 518 cf = lambda x: man.flags(copied.get(x, x))
519 519 ff = self._repo.dirstate.flagfunc(cf)
520 520 modified, added, removed, deleted, unknown = self._status[:5]
521 521 for i, l in (("a", added), ("m", modified), ("u", unknown)):
522 522 for f in l:
523 523 man[f] = man.get(copied.get(f, f), nullid) + i
524 524 try:
525 525 man.set(f, ff(f))
526 526 except OSError:
527 527 pass
528 528
529 529 for f in deleted + removed:
530 530 if f in man:
531 531 del man[f]
532 532
533 533 return man
534 534
535 535 @propertycache
536 536 def _status(self):
537 537 return self._repo.status(unknown=True)
538 538
539 539 @propertycache
540 540 def _user(self):
541 541 return self._repo.ui.username()
542 542
543 543 @propertycache
544 544 def _date(self):
545 545 return util.makedate()
546 546
547 547 @propertycache
548 548 def _parents(self):
549 549 p = self._repo.dirstate.parents()
550 550 if p[1] == nullid:
551 551 p = p[:-1]
552 552 self._parents = [changectx(self._repo, x) for x in p]
553 553 return self._parents
554 554
555 555 def manifest(self): return self._manifest
556 556
557 557 def user(self): return self._user or self._repo.ui.username()
558 558 def date(self): return self._date
559 559 def description(self): return self._text
560 560 def files(self):
561 561 return sorted(self._status[0] + self._status[1] + self._status[2])
562 562
563 563 def modified(self): return self._status[0]
564 564 def added(self): return self._status[1]
565 565 def removed(self): return self._status[2]
566 566 def deleted(self): return self._status[3]
567 567 def unknown(self): return self._status[4]
568 568 def clean(self): return self._status[5]
569 569 def branch(self): return self._extra['branch']
570 570 def extra(self): return self._extra
571 571
572 572 def tags(self):
573 573 t = []
574 574 [t.extend(p.tags()) for p in self.parents()]
575 575 return t
576 576
577 577 def children(self):
578 578 return []
579 579
580 580 def flags(self, path):
581 581 if '_manifest' in self.__dict__:
582 582 try:
583 583 return self._manifest.flags(path)
584 584 except KeyError:
585 585 return ''
586 586
587 587 pnode = self._parents[0].changeset()[0]
588 588 orig = self._repo.dirstate.copies().get(path, path)
589 589 node, flag = self._repo.manifest.find(pnode, orig)
590 590 try:
591 591 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
592 592 return ff(path)
593 593 except OSError:
594 594 pass
595 595
596 596 if not node or path in self.deleted() or path in self.removed():
597 597 return ''
598 598 return flag
599 599
600 600 def filectx(self, path, filelog=None):
601 601 """get a file context from the working directory"""
602 602 return workingfilectx(self._repo, path, workingctx=self,
603 603 filelog=filelog)
604 604
605 605 def ancestor(self, c2):
606 606 """return the ancestor context of self and c2"""
607 607 return self._parents[0].ancestor(c2) # punt on two parents for now
608 608
609 609 def walk(self, match):
610 610 return sorted(self._repo.dirstate.walk(match, True, False))
611 611
612 612 class workingfilectx(filectx):
613 613 """A workingfilectx object makes access to data related to a particular
614 614 file in the working directory convenient."""
615 615 def __init__(self, repo, path, filelog=None, workingctx=None):
616 616 """changeid can be a changeset revision, node, or tag.
617 617 fileid can be a file revision or node."""
618 618 self._repo = repo
619 619 self._path = path
620 620 self._changeid = None
621 621 self._filerev = self._filenode = None
622 622
623 623 if filelog:
624 624 self._filelog = filelog
625 625 if workingctx:
626 626 self._changectx = workingctx
627 627
628 628 @propertycache
629 629 def _changectx(self):
630 630 return workingctx(self._repo)
631 631
632 632 @propertycache
633 633 def _repopath(self):
634 634 return self._repo.dirstate.copied(self._path) or self._path
635 635
636 636 @propertycache
637 637 def _filelog(self):
638 638 return self._repo.file(self._repopath)
639 639
640 640 def __nonzero__(self):
641 641 return True
642 642
643 643 def __str__(self):
644 644 return "%s@%s" % (self.path(), self._changectx)
645 645
646 646 def filectx(self, fileid):
647 647 '''opens an arbitrary revision of the file without
648 648 opening a new filelog'''
649 649 return filectx(self._repo, self._repopath, fileid=fileid,
650 650 filelog=self._filelog)
651 651
652 652 def rev(self):
653 653 if '_changectx' in self.__dict__:
654 654 return self._changectx.rev()
655 655 return self._filelog.linkrev(self._filerev)
656 656
657 657 def data(self): return self._repo.wread(self._path)
658 658 def renamed(self):
659 659 rp = self._repopath
660 660 if rp == self._path:
661 661 return None
662 662 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
663 663
664 664 def parents(self):
665 665 '''return parent filectxs, following copies if necessary'''
666 666 p = self._path
667 667 rp = self._repopath
668 668 pcl = self._changectx._parents
669 669 fl = self._filelog
670 670 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
671 671 if len(pcl) > 1:
672 672 if rp != p:
673 673 fl = None
674 674 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
675 675
676 676 return [filectx(self._repo, p, fileid=n, filelog=l)
677 677 for p,n,l in pl if n != nullid]
678 678
679 679 def children(self):
680 680 return []
681 681
682 682 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
683 683 def date(self):
684 684 t, tz = self._changectx.date()
685 685 try:
686 686 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
687 687 except OSError, err:
688 688 if err.errno != errno.ENOENT: raise
689 689 return (t, tz)
690 690
691 691 def cmp(self, text): return self._repo.wread(self._path) == text
692 692
693 693 class memctx(object):
694 694 """Use memctx to perform in-memory commits via localrepo.commitctx().
695 695
696 696 Revision information is supplied at initialization time while
697 697 related files data and is made available through a callback
698 698 mechanism. 'repo' is the current localrepo, 'parents' is a
699 699 sequence of two parent revisions identifiers (pass None for every
700 700 missing parent), 'text' is the commit message and 'files' lists
701 701 names of files touched by the revision (normalized and relative to
702 702 repository root).
703 703
704 704 filectxfn(repo, memctx, path) is a callable receiving the
705 705 repository, the current memctx object and the normalized path of
706 706 requested file, relative to repository root. It is fired by the
707 707 commit function for every file in 'files', but calls order is
708 708 undefined. If the file is available in the revision being
709 709 committed (updated or added), filectxfn returns a memfilectx
710 710 object. If the file was removed, filectxfn raises an
711 711 IOError. Moved files are represented by marking the source file
712 712 removed and the new file added with copy information (see
713 713 memfilectx).
714 714
715 715 user receives the committer name and defaults to current
716 716 repository username, date is the commit date in any format
717 717 supported by util.parsedate() and defaults to current date, extra
718 718 is a dictionary of metadata or is left empty.
719 719 """
720 720 def __init__(self, repo, parents, text, files, filectxfn, user=None,
721 721 date=None, extra=None):
722 722 self._repo = repo
723 723 self._rev = None
724 724 self._node = None
725 725 self._text = text
726 726 self._date = date and util.parsedate(date) or util.makedate()
727 727 self._user = user
728 728 parents = [(p or nullid) for p in parents]
729 729 p1, p2 = parents
730 730 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
731 731 files = sorted(set(files))
732 732 self._status = [files, [], [], [], []]
733 733 self._filectxfn = filectxfn
734 734
735 735 self._extra = extra and extra.copy() or {}
736 736 if 'branch' not in self._extra:
737 737 self._extra['branch'] = 'default'
738 738 elif self._extra.get('branch') == '':
739 739 self._extra['branch'] = 'default'
740 740
741 741 def __str__(self):
742 742 return str(self._parents[0]) + "+"
743 743
744 744 def __int__(self):
745 745 return self._rev
746 746
747 747 def __nonzero__(self):
748 748 return True
749 749
750 def __getitem__(self, key):
751 return self.filectx(key)
752
750 753 def user(self): return self._user or self._repo.ui.username()
751 754 def date(self): return self._date
752 755 def description(self): return self._text
753 756 def files(self): return self.modified()
754 757 def modified(self): return self._status[0]
755 758 def added(self): return self._status[1]
756 759 def removed(self): return self._status[2]
757 760 def deleted(self): return self._status[3]
758 761 def unknown(self): return self._status[4]
759 762 def clean(self): return self._status[5]
760 763 def branch(self): return self._extra['branch']
761 764 def extra(self): return self._extra
762 765 def flags(self, f): return self[f].flags()
763 766
764 767 def parents(self):
765 768 """return contexts for each parent changeset"""
766 769 return self._parents
767 770
768 771 def filectx(self, path, filelog=None):
769 772 """get a file context from the working directory"""
770 773 return self._filectxfn(self._repo, self, path)
771 774
772 775 class memfilectx(object):
773 776 """memfilectx represents an in-memory file to commit.
774 777
775 778 See memctx for more details.
776 779 """
777 780 def __init__(self, path, data, islink, isexec, copied):
778 781 """
779 782 path is the normalized file path relative to repository root.
780 783 data is the file content as a string.
781 784 islink is True if the file is a symbolic link.
782 785 isexec is True if the file is executable.
783 786 copied is the source file path if current file was copied in the
784 787 revision being committed, or None."""
785 788 self._path = path
786 789 self._data = data
787 790 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
788 791 self._copied = None
789 792 if copied:
790 793 self._copied = (copied, nullid)
791 794
792 795 def __nonzero__(self): return True
793 796 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
794 797 def path(self): return self._path
795 798 def data(self): return self._data
796 799 def flags(self): return self._flags
797 800 def isexec(self): return 'x' in self._flags
798 801 def islink(self): return 'l' in self._flags
799 802 def renamed(self): return self._copied
800 803
@@ -1,2154 +1,2151
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 from lock import release
17 17 import weakref, stat, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19
20 20 class localrepository(repo.repository):
21 21 capabilities = set(('lookup', 'changegroupsubset'))
22 22 supported = set('revlogv1 store fncache'.split())
23 23
24 24 def __init__(self, baseui, path=None, create=0):
25 25 repo.repository.__init__(self)
26 26 self.root = os.path.realpath(path)
27 27 self.path = os.path.join(self.root, ".hg")
28 28 self.origroot = path
29 29 self.opener = util.opener(self.path)
30 30 self.wopener = util.opener(self.root)
31 31
32 32 if not os.path.isdir(self.path):
33 33 if create:
34 34 if not os.path.exists(path):
35 35 os.mkdir(path)
36 36 os.mkdir(self.path)
37 37 requirements = ["revlogv1"]
38 38 if baseui.configbool('format', 'usestore', True):
39 39 os.mkdir(os.path.join(self.path, "store"))
40 40 requirements.append("store")
41 41 if baseui.configbool('format', 'usefncache', True):
42 42 requirements.append("fncache")
43 43 # create an invalid changelog
44 44 self.opener("00changelog.i", "a").write(
45 45 '\0\0\0\2' # represents revlogv2
46 46 ' dummy changelog to prevent using the old repo layout'
47 47 )
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 else:
53 53 raise error.RepoError(_("repository %s not found") % path)
54 54 elif create:
55 55 raise error.RepoError(_("repository %s already exists") % path)
56 56 else:
57 57 # find requirements
58 58 requirements = set()
59 59 try:
60 60 requirements = set(self.opener("requires").read().splitlines())
61 61 except IOError, inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64 for r in requirements - self.supported:
65 65 raise error.RepoError(_("requirement '%s' not supported") % r)
66 66
67 67 self.store = store.store(requirements, self.path, util.opener)
68 68 self.spath = self.store.path
69 69 self.sopener = self.store.opener
70 70 self.sjoin = self.store.join
71 71 self.opener.createmode = self.store.createmode
72 72
73 73 self.baseui = baseui
74 74 self.ui = baseui.copy()
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self._tagstypecache = None
83 83 self.branchcache = None
84 84 self._ubranchcache = None # UTF-8 version of branchcache
85 85 self._branchcachetip = None
86 86 self.nodetagscache = None
87 87 self.filterpats = {}
88 88 self._datafilters = {}
89 89 self._transref = self._lockref = self._wlockref = None
90 90
91 91 @propertycache
92 92 def changelog(self):
93 93 c = changelog.changelog(self.sopener)
94 94 if 'HG_PENDING' in os.environ:
95 95 p = os.environ['HG_PENDING']
96 96 if p.startswith(self.root):
97 97 c.readpending('00changelog.i.a')
98 98 self.sopener.defversion = c.version
99 99 return c
100 100
101 101 @propertycache
102 102 def manifest(self):
103 103 return manifest.manifest(self.sopener)
104 104
105 105 @propertycache
106 106 def dirstate(self):
107 107 return dirstate.dirstate(self.opener, self.ui, self.root)
108 108
109 109 def __getitem__(self, changeid):
110 110 if changeid == None:
111 111 return context.workingctx(self)
112 112 return context.changectx(self, changeid)
113 113
114 114 def __nonzero__(self):
115 115 return True
116 116
117 117 def __len__(self):
118 118 return len(self.changelog)
119 119
120 120 def __iter__(self):
121 121 for i in xrange(len(self)):
122 122 yield i
123 123
124 124 def url(self):
125 125 return 'file:' + self.root
126 126
127 127 def hook(self, name, throw=False, **args):
128 128 return hook.hook(self.ui, self, name, throw, **args)
129 129
130 130 tag_disallowed = ':\r\n'
131 131
132 132 def _tag(self, names, node, message, local, user, date, parent=None,
133 133 extra={}):
134 134 use_dirstate = parent is None
135 135
136 136 if isinstance(names, str):
137 137 allchars = names
138 138 names = (names,)
139 139 else:
140 140 allchars = ''.join(names)
141 141 for c in self.tag_disallowed:
142 142 if c in allchars:
143 143 raise util.Abort(_('%r cannot be used in a tag name') % c)
144 144
145 145 for name in names:
146 146 self.hook('pretag', throw=True, node=hex(node), tag=name,
147 147 local=local)
148 148
149 149 def writetags(fp, names, munge, prevtags):
150 150 fp.seek(0, 2)
151 151 if prevtags and prevtags[-1] != '\n':
152 152 fp.write('\n')
153 153 for name in names:
154 154 m = munge and munge(name) or name
155 155 if self._tagstypecache and name in self._tagstypecache:
156 156 old = self.tagscache.get(name, nullid)
157 157 fp.write('%s %s\n' % (hex(old), m))
158 158 fp.write('%s %s\n' % (hex(node), m))
159 159 fp.close()
160 160
161 161 prevtags = ''
162 162 if local:
163 163 try:
164 164 fp = self.opener('localtags', 'r+')
165 165 except IOError:
166 166 fp = self.opener('localtags', 'a')
167 167 else:
168 168 prevtags = fp.read()
169 169
170 170 # local tags are stored in the current charset
171 171 writetags(fp, names, None, prevtags)
172 172 for name in names:
173 173 self.hook('tag', node=hex(node), tag=name, local=local)
174 174 return
175 175
176 176 if use_dirstate:
177 177 try:
178 178 fp = self.wfile('.hgtags', 'rb+')
179 179 except IOError:
180 180 fp = self.wfile('.hgtags', 'ab')
181 181 else:
182 182 prevtags = fp.read()
183 183 else:
184 184 try:
185 185 prevtags = self.filectx('.hgtags', parent).data()
186 186 except error.LookupError:
187 187 pass
188 188 fp = self.wfile('.hgtags', 'wb')
189 189 if prevtags:
190 190 fp.write(prevtags)
191 191
192 192 # committed tags are stored in UTF-8
193 193 writetags(fp, names, encoding.fromlocal, prevtags)
194 194
195 195 if use_dirstate and '.hgtags' not in self.dirstate:
196 196 self.add(['.hgtags'])
197 197
198 198 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
199 199 extra=extra)
200 200
201 201 for name in names:
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203
204 204 return tagnode
205 205
206 206 def tag(self, names, node, message, local, user, date):
207 207 '''tag a revision with one or more symbolic names.
208 208
209 209 names is a list of strings or, when adding a single tag, names may be a
210 210 string.
211 211
212 212 if local is True, the tags are stored in a per-repository file.
213 213 otherwise, they are stored in the .hgtags file, and a new
214 214 changeset is committed with the change.
215 215
216 216 keyword arguments:
217 217
218 218 local: whether to store tags in non-version-controlled file
219 219 (default False)
220 220
221 221 message: commit message to use if committing
222 222
223 223 user: name of user to use if committing
224 224
225 225 date: date tuple to use if committing'''
226 226
227 227 for x in self.status()[:5]:
228 228 if '.hgtags' in x:
229 229 raise util.Abort(_('working copy of .hgtags is changed '
230 230 '(please commit .hgtags manually)'))
231 231
232 232 self.tags() # instantiate the cache
233 233 self._tag(names, node, message, local, user, date)
234 234
235 235 def tags(self):
236 236 '''return a mapping of tag to node'''
237 237 if self.tagscache:
238 238 return self.tagscache
239 239
240 240 globaltags = {}
241 241 tagtypes = {}
242 242
243 243 def readtags(lines, fn, tagtype):
244 244 filetags = {}
245 245 count = 0
246 246
247 247 def warn(msg):
248 248 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
249 249
250 250 for l in lines:
251 251 count += 1
252 252 if not l:
253 253 continue
254 254 s = l.split(" ", 1)
255 255 if len(s) != 2:
256 256 warn(_("cannot parse entry"))
257 257 continue
258 258 node, key = s
259 259 key = encoding.tolocal(key.strip()) # stored in UTF-8
260 260 try:
261 261 bin_n = bin(node)
262 262 except TypeError:
263 263 warn(_("node '%s' is not well formed") % node)
264 264 continue
265 265 if bin_n not in self.changelog.nodemap:
266 266 warn(_("tag '%s' refers to unknown node") % key)
267 267 continue
268 268
269 269 h = []
270 270 if key in filetags:
271 271 n, h = filetags[key]
272 272 h.append(n)
273 273 filetags[key] = (bin_n, h)
274 274
275 275 for k, nh in filetags.iteritems():
276 276 if k not in globaltags:
277 277 globaltags[k] = nh
278 278 tagtypes[k] = tagtype
279 279 continue
280 280
281 281 # we prefer the global tag if:
282 282 # it supercedes us OR
283 283 # mutual supercedes and it has a higher rank
284 284 # otherwise we win because we're tip-most
285 285 an, ah = nh
286 286 bn, bh = globaltags[k]
287 287 if (bn != an and an in bh and
288 288 (bn not in ah or len(bh) > len(ah))):
289 289 an = bn
290 290 ah.extend([n for n in bh if n not in ah])
291 291 globaltags[k] = an, ah
292 292 tagtypes[k] = tagtype
293 293
294 294 # read the tags file from each head, ending with the tip
295 295 f = None
296 296 for rev, node, fnode in self._hgtagsnodes():
297 297 f = (f and f.filectx(fnode) or
298 298 self.filectx('.hgtags', fileid=fnode))
299 299 readtags(f.data().splitlines(), f, "global")
300 300
301 301 try:
302 302 data = encoding.fromlocal(self.opener("localtags").read())
303 303 # localtags are stored in the local character set
304 304 # while the internal tag table is stored in UTF-8
305 305 readtags(data.splitlines(), "localtags", "local")
306 306 except IOError:
307 307 pass
308 308
309 309 self.tagscache = {}
310 310 self._tagstypecache = {}
311 311 for k, nh in globaltags.iteritems():
312 312 n = nh[0]
313 313 if n != nullid:
314 314 self.tagscache[k] = n
315 315 self._tagstypecache[k] = tagtypes[k]
316 316 self.tagscache['tip'] = self.changelog.tip()
317 317 return self.tagscache
318 318
319 319 def tagtype(self, tagname):
320 320 '''
321 321 return the type of the given tag. result can be:
322 322
323 323 'local' : a local tag
324 324 'global' : a global tag
325 325 None : tag does not exist
326 326 '''
327 327
328 328 self.tags()
329 329
330 330 return self._tagstypecache.get(tagname)
331 331
332 332 def _hgtagsnodes(self):
333 333 last = {}
334 334 ret = []
335 335 for node in reversed(self.heads()):
336 336 c = self[node]
337 337 rev = c.rev()
338 338 try:
339 339 fnode = c.filenode('.hgtags')
340 340 except error.LookupError:
341 341 continue
342 342 ret.append((rev, node, fnode))
343 343 if fnode in last:
344 344 ret[last[fnode]] = None
345 345 last[fnode] = len(ret) - 1
346 346 return [item for item in ret if item]
347 347
348 348 def tagslist(self):
349 349 '''return a list of tags ordered by revision'''
350 350 l = []
351 351 for t, n in self.tags().iteritems():
352 352 try:
353 353 r = self.changelog.rev(n)
354 354 except:
355 355 r = -2 # sort to the beginning of the list if unknown
356 356 l.append((r, t, n))
357 357 return [(t, n) for r, t, n in sorted(l)]
358 358
359 359 def nodetags(self, node):
360 360 '''return the tags associated with a node'''
361 361 if not self.nodetagscache:
362 362 self.nodetagscache = {}
363 363 for t, n in self.tags().iteritems():
364 364 self.nodetagscache.setdefault(n, []).append(t)
365 365 return self.nodetagscache.get(node, [])
366 366
367 367 def _branchtags(self, partial, lrev):
368 368 # TODO: rename this function?
369 369 tiprev = len(self) - 1
370 370 if lrev != tiprev:
371 371 self._updatebranchcache(partial, lrev+1, tiprev+1)
372 372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
373 373
374 374 return partial
375 375
376 376 def _branchheads(self):
377 377 tip = self.changelog.tip()
378 378 if self.branchcache is not None and self._branchcachetip == tip:
379 379 return self.branchcache
380 380
381 381 oldtip = self._branchcachetip
382 382 self._branchcachetip = tip
383 383 if self.branchcache is None:
384 384 self.branchcache = {} # avoid recursion in changectx
385 385 else:
386 386 self.branchcache.clear() # keep using the same dict
387 387 if oldtip is None or oldtip not in self.changelog.nodemap:
388 388 partial, last, lrev = self._readbranchcache()
389 389 else:
390 390 lrev = self.changelog.rev(oldtip)
391 391 partial = self._ubranchcache
392 392
393 393 self._branchtags(partial, lrev)
394 394 # this private cache holds all heads (not just tips)
395 395 self._ubranchcache = partial
396 396
397 397 # the branch cache is stored on disk as UTF-8, but in the local
398 398 # charset internally
399 399 for k, v in partial.iteritems():
400 400 self.branchcache[encoding.tolocal(k)] = v
401 401 return self.branchcache
402 402
403 403
404 404 def branchtags(self):
405 405 '''return a dict where branch names map to the tipmost head of
406 406 the branch, open heads come before closed'''
407 407 bt = {}
408 408 for bn, heads in self._branchheads().iteritems():
409 409 head = None
410 410 for i in range(len(heads)-1, -1, -1):
411 411 h = heads[i]
412 412 if 'close' not in self.changelog.read(h)[5]:
413 413 head = h
414 414 break
415 415 # no open heads were found
416 416 if head is None:
417 417 head = heads[-1]
418 418 bt[bn] = head
419 419 return bt
420 420
421 421
422 422 def _readbranchcache(self):
423 423 partial = {}
424 424 try:
425 425 f = self.opener("branchheads.cache")
426 426 lines = f.read().split('\n')
427 427 f.close()
428 428 except (IOError, OSError):
429 429 return {}, nullid, nullrev
430 430
431 431 try:
432 432 last, lrev = lines.pop(0).split(" ", 1)
433 433 last, lrev = bin(last), int(lrev)
434 434 if lrev >= len(self) or self[lrev].node() != last:
435 435 # invalidate the cache
436 436 raise ValueError('invalidating branch cache (tip differs)')
437 437 for l in lines:
438 438 if not l: continue
439 439 node, label = l.split(" ", 1)
440 440 partial.setdefault(label.strip(), []).append(bin(node))
441 441 except KeyboardInterrupt:
442 442 raise
443 443 except Exception, inst:
444 444 if self.ui.debugflag:
445 445 self.ui.warn(str(inst), '\n')
446 446 partial, last, lrev = {}, nullid, nullrev
447 447 return partial, last, lrev
448 448
449 449 def _writebranchcache(self, branches, tip, tiprev):
450 450 try:
451 451 f = self.opener("branchheads.cache", "w", atomictemp=True)
452 452 f.write("%s %s\n" % (hex(tip), tiprev))
453 453 for label, nodes in branches.iteritems():
454 454 for node in nodes:
455 455 f.write("%s %s\n" % (hex(node), label))
456 456 f.rename()
457 457 except (IOError, OSError):
458 458 pass
459 459
460 460 def _updatebranchcache(self, partial, start, end):
461 461 for r in xrange(start, end):
462 462 c = self[r]
463 463 b = c.branch()
464 464 bheads = partial.setdefault(b, [])
465 465 bheads.append(c.node())
466 466 for p in c.parents():
467 467 pn = p.node()
468 468 if pn in bheads:
469 469 bheads.remove(pn)
470 470
471 471 def lookup(self, key):
472 472 if isinstance(key, int):
473 473 return self.changelog.node(key)
474 474 elif key == '.':
475 475 return self.dirstate.parents()[0]
476 476 elif key == 'null':
477 477 return nullid
478 478 elif key == 'tip':
479 479 return self.changelog.tip()
480 480 n = self.changelog._match(key)
481 481 if n:
482 482 return n
483 483 if key in self.tags():
484 484 return self.tags()[key]
485 485 if key in self.branchtags():
486 486 return self.branchtags()[key]
487 487 n = self.changelog._partialmatch(key)
488 488 if n:
489 489 return n
490 490 try:
491 491 if len(key) == 20:
492 492 key = hex(key)
493 493 except:
494 494 pass
495 495 raise error.RepoError(_("unknown revision '%s'") % key)
496 496
497 497 def local(self):
498 498 return True
499 499
500 500 def join(self, f):
501 501 return os.path.join(self.path, f)
502 502
503 503 def wjoin(self, f):
504 504 return os.path.join(self.root, f)
505 505
506 506 def rjoin(self, f):
507 507 return os.path.join(self.root, util.pconvert(f))
508 508
509 509 def file(self, f):
510 510 if f[0] == '/':
511 511 f = f[1:]
512 512 return filelog.filelog(self.sopener, f)
513 513
514 514 def changectx(self, changeid):
515 515 return self[changeid]
516 516
517 517 def parents(self, changeid=None):
518 518 '''get list of changectxs for parents of changeid'''
519 519 return self[changeid].parents()
520 520
521 521 def filectx(self, path, changeid=None, fileid=None):
522 522 """changeid can be a changeset revision, node, or tag.
523 523 fileid can be a file revision or node."""
524 524 return context.filectx(self, path, changeid, fileid)
525 525
526 526 def getcwd(self):
527 527 return self.dirstate.getcwd()
528 528
529 529 def pathto(self, f, cwd=None):
530 530 return self.dirstate.pathto(f, cwd)
531 531
532 532 def wfile(self, f, mode='r'):
533 533 return self.wopener(f, mode)
534 534
535 535 def _link(self, f):
536 536 return os.path.islink(self.wjoin(f))
537 537
538 538 def _filter(self, filter, filename, data):
539 539 if filter not in self.filterpats:
540 540 l = []
541 541 for pat, cmd in self.ui.configitems(filter):
542 542 if cmd == '!':
543 543 continue
544 544 mf = util.matcher(self.root, "", [pat], [], [])[1]
545 545 fn = None
546 546 params = cmd
547 547 for name, filterfn in self._datafilters.iteritems():
548 548 if cmd.startswith(name):
549 549 fn = filterfn
550 550 params = cmd[len(name):].lstrip()
551 551 break
552 552 if not fn:
553 553 fn = lambda s, c, **kwargs: util.filter(s, c)
554 554 # Wrap old filters not supporting keyword arguments
555 555 if not inspect.getargspec(fn)[2]:
556 556 oldfn = fn
557 557 fn = lambda s, c, **kwargs: oldfn(s, c)
558 558 l.append((mf, fn, params))
559 559 self.filterpats[filter] = l
560 560
561 561 for mf, fn, cmd in self.filterpats[filter]:
562 562 if mf(filename):
563 563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
564 564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
565 565 break
566 566
567 567 return data
568 568
569 569 def adddatafilter(self, name, filter):
570 570 self._datafilters[name] = filter
571 571
572 572 def wread(self, filename):
573 573 if self._link(filename):
574 574 data = os.readlink(self.wjoin(filename))
575 575 else:
576 576 data = self.wopener(filename, 'r').read()
577 577 return self._filter("encode", filename, data)
578 578
579 579 def wwrite(self, filename, data, flags):
580 580 data = self._filter("decode", filename, data)
581 581 try:
582 582 os.unlink(self.wjoin(filename))
583 583 except OSError:
584 584 pass
585 585 if 'l' in flags:
586 586 self.wopener.symlink(data, filename)
587 587 else:
588 588 self.wopener(filename, 'w').write(data)
589 589 if 'x' in flags:
590 590 util.set_flags(self.wjoin(filename), False, True)
591 591
592 592 def wwritedata(self, filename, data):
593 593 return self._filter("decode", filename, data)
594 594
595 595 def transaction(self):
596 596 tr = self._transref and self._transref() or None
597 597 if tr and tr.running():
598 598 return tr.nest()
599 599
600 600 # abort here if the journal already exists
601 601 if os.path.exists(self.sjoin("journal")):
602 602 raise error.RepoError(_("journal already exists - run hg recover"))
603 603
604 604 # save dirstate for rollback
605 605 try:
606 606 ds = self.opener("dirstate").read()
607 607 except IOError:
608 608 ds = ""
609 609 self.opener("journal.dirstate", "w").write(ds)
610 610 self.opener("journal.branch", "w").write(self.dirstate.branch())
611 611
612 612 renames = [(self.sjoin("journal"), self.sjoin("undo")),
613 613 (self.join("journal.dirstate"), self.join("undo.dirstate")),
614 614 (self.join("journal.branch"), self.join("undo.branch"))]
615 615 tr = transaction.transaction(self.ui.warn, self.sopener,
616 616 self.sjoin("journal"),
617 617 aftertrans(renames),
618 618 self.store.createmode)
619 619 self._transref = weakref.ref(tr)
620 620 return tr
621 621
622 622 def recover(self):
623 623 lock = self.lock()
624 624 try:
625 625 if os.path.exists(self.sjoin("journal")):
626 626 self.ui.status(_("rolling back interrupted transaction\n"))
627 627 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
628 628 self.invalidate()
629 629 return True
630 630 else:
631 631 self.ui.warn(_("no interrupted transaction available\n"))
632 632 return False
633 633 finally:
634 634 lock.release()
635 635
636 636 def rollback(self):
637 637 wlock = lock = None
638 638 try:
639 639 wlock = self.wlock()
640 640 lock = self.lock()
641 641 if os.path.exists(self.sjoin("undo")):
642 642 self.ui.status(_("rolling back last transaction\n"))
643 643 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
644 644 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
645 645 try:
646 646 branch = self.opener("undo.branch").read()
647 647 self.dirstate.setbranch(branch)
648 648 except IOError:
649 649 self.ui.warn(_("Named branch could not be reset, "
650 650 "current branch still is: %s\n")
651 651 % encoding.tolocal(self.dirstate.branch()))
652 652 self.invalidate()
653 653 self.dirstate.invalidate()
654 654 else:
655 655 self.ui.warn(_("no rollback information available\n"))
656 656 finally:
657 657 release(lock, wlock)
658 658
659 659 def invalidate(self):
660 660 for a in "changelog manifest".split():
661 661 if a in self.__dict__:
662 662 delattr(self, a)
663 663 self.tagscache = None
664 664 self._tagstypecache = None
665 665 self.nodetagscache = None
666 666 self.branchcache = None
667 667 self._ubranchcache = None
668 668 self._branchcachetip = None
669 669
670 670 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
671 671 try:
672 672 l = lock.lock(lockname, 0, releasefn, desc=desc)
673 673 except error.LockHeld, inst:
674 674 if not wait:
675 675 raise
676 676 self.ui.warn(_("waiting for lock on %s held by %r\n") %
677 677 (desc, inst.locker))
678 678 # default to 600 seconds timeout
679 679 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
680 680 releasefn, desc=desc)
681 681 if acquirefn:
682 682 acquirefn()
683 683 return l
684 684
685 685 def lock(self, wait=True):
686 686 l = self._lockref and self._lockref()
687 687 if l is not None and l.held:
688 688 l.lock()
689 689 return l
690 690
691 691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 692 _('repository %s') % self.origroot)
693 693 self._lockref = weakref.ref(l)
694 694 return l
695 695
696 696 def wlock(self, wait=True):
697 697 l = self._wlockref and self._wlockref()
698 698 if l is not None and l.held:
699 699 l.lock()
700 700 return l
701 701
702 702 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
703 703 self.dirstate.invalidate, _('working directory of %s') %
704 704 self.origroot)
705 705 self._wlockref = weakref.ref(l)
706 706 return l
707 707
708 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
708 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
709 709 """
710 710 commit an individual file as part of a larger transaction
711 711 """
712 712
713 713 fname = fctx.path()
714 714 text = fctx.data()
715 715 flog = self.file(fname)
716 716 fparent1 = manifest1.get(fname, nullid)
717 fparent2 = manifest2.get(fname, nullid)
717 fparent2 = fparent2o = manifest2.get(fname, nullid)
718 718
719 719 meta = {}
720 720 copy = fctx.renamed()
721 721 if copy and copy[0] != fname:
722 722 # Mark the new revision of this file as a copy of another
723 723 # file. This copy data will effectively act as a parent
724 724 # of this new revision. If this is a merge, the first
725 725 # parent will be the nullid (meaning "look up the copy data")
726 726 # and the second one will be the other parent. For example:
727 727 #
728 728 # 0 --- 1 --- 3 rev1 changes file foo
729 729 # \ / rev2 renames foo to bar and changes it
730 730 # \- 2 -/ rev3 should have bar with all changes and
731 731 # should record that bar descends from
732 732 # bar in rev2 and foo in rev1
733 733 #
734 734 # this allows this merge to succeed:
735 735 #
736 736 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
737 737 # \ / merging rev3 and rev4 should use bar@rev2
738 738 # \- 2 --- 4 as the merge base
739 739 #
740 740
741 741 cfname = copy[0]
742 742 crev = manifest1.get(cfname)
743 743 newfparent = fparent2
744 744
745 745 if manifest2: # branch merge
746 746 if fparent2 == nullid or crev is None: # copied on remote side
747 747 if cfname in manifest2:
748 748 crev = manifest2[cfname]
749 749 newfparent = fparent1
750 750
751 751 # find source in nearest ancestor if we've lost track
752 752 if not crev:
753 753 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
754 754 (fname, cfname))
755 755 for ancestor in self['.'].ancestors():
756 756 if cfname in ancestor:
757 757 crev = ancestor[cfname].filenode()
758 758 break
759 759
760 760 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
761 761 meta["copy"] = cfname
762 762 meta["copyrev"] = hex(crev)
763 763 fparent1, fparent2 = nullid, newfparent
764 764 elif fparent2 != nullid:
765 765 # is one parent an ancestor of the other?
766 766 fparentancestor = flog.ancestor(fparent1, fparent2)
767 767 if fparentancestor == fparent1:
768 768 fparent1, fparent2 = fparent2, nullid
769 769 elif fparentancestor == fparent2:
770 770 fparent2 = nullid
771 771
772 # is the file unmodified from the parent? report existing entry
773 if fparent2 == nullid and not flog.cmp(fparent1, text) and not meta:
774 return fparent1
775
772 # is the file changed?
773 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
776 774 changelist.append(fname)
777 775 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
778 776
777 # are just the flags changed during merge?
778 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
779 changelist.append(fname)
780
781 return fparent1
782
779 783 def commit(self, files=None, text="", user=None, date=None,
780 784 match=None, force=False, force_editor=False,
781 785 p1=None, p2=None, extra={}, empty_ok=False):
782 786 wlock = lock = None
783 787 if extra.get("close"):
784 788 force = True
785 789 if files:
786 790 files = list(set(files))
787 791 try:
788 792 wlock = self.wlock()
789 793 lock = self.lock()
790 794
791 795 p1, p2 = self.dirstate.parents()
792 796
793 797 if (not force and p2 != nullid and
794 798 (match and (match.files() or match.anypats()))):
795 799 raise util.Abort(_('cannot partially commit a merge '
796 800 '(do not specify files or patterns)'))
797 801
798 802 if files:
799 803 modified, removed = [], []
800 804 for f in files:
801 805 s = self.dirstate[f]
802 806 if s in 'nma':
803 807 modified.append(f)
804 808 elif s == 'r':
805 809 removed.append(f)
806 810 else:
807 811 self.ui.warn(_("%s not tracked!\n") % f)
808 812 changes = [modified, [], removed, [], []]
809 813 else:
810 814 changes = self.status(match=match)
811 815
812 816 ms = merge_.mergestate(self)
813 817 for f in changes[0]:
814 818 if f in ms and ms[f] == 'u':
815 819 raise util.Abort(_("unresolved merge conflicts "
816 820 "(see hg resolve)"))
817 821 wctx = context.workingctx(self, (p1, p2), text, user, date,
818 822 extra, changes)
819 823 r = self._commitctx(wctx, force, force_editor, empty_ok, True)
820 824 ms.reset()
821 825 return r
822 826
823 827 finally:
824 828 release(lock, wlock)
825 829
826 830 def commitctx(self, ctx):
827 831 """Add a new revision to current repository.
828 832
829 833 Revision information is passed in the context.memctx argument.
830 834 commitctx() does not touch the working directory.
831 835 """
832 836 lock = self.lock()
833 837 try:
834 838 return self._commitctx(ctx, force=True, force_editor=False,
835 839 empty_ok=True, working=False)
836 840 finally:
837 841 lock.release()
838 842
839 843 def _commitctx(self, ctx, force=False, force_editor=False, empty_ok=False,
840 844 working=True):
841 845 tr = None
842 846 valid = 0 # don't save the dirstate if this isn't set
843 847 try:
844 848 commit = sorted(ctx.modified() + ctx.added())
845 849 remove = ctx.removed()
846 850 extra = ctx.extra().copy()
847 851 branchname = extra['branch']
848 852 user = ctx.user()
849 853 text = ctx.description()
850 854
851 855 p1, p2 = [p.node() for p in ctx.parents()]
852 856 c1 = self.changelog.read(p1)
853 857 c2 = self.changelog.read(p2)
854 858 m1 = self.manifest.read(c1[0]).copy()
855 859 m2 = self.manifest.read(c2[0])
856 860
857 861 if working:
858 862 oldname = c1[5].get("branch") # stored in UTF-8
859 863 if (not commit and not remove and not force and p2 == nullid
860 864 and branchname == oldname):
861 865 self.ui.status(_("nothing changed\n"))
862 866 return None
863 867
864 868 xp1 = hex(p1)
865 869 if p2 == nullid: xp2 = ''
866 870 else: xp2 = hex(p2)
867 871
868 872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
869 873
870 874 tr = self.transaction()
871 875 trp = weakref.proxy(tr)
872 876
873 877 # check in files
874 878 new = {}
875 879 changed = []
876 880 linkrev = len(self)
877 881 for f in commit:
878 882 self.ui.note(f + "\n")
879 883 try:
880 fctx = ctx.filectx(f)
881 newflags = fctx.flags()
882 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
883 if ((not changed or changed[-1] != f) and
884 m2.get(f) != new[f]):
885 # mention the file in the changelog if some
886 # flag changed, even if there was no content
887 # change.
888 if m1.flags(f) != newflags:
889 changed.append(f)
890 m1.set(f, newflags)
884 fctx = ctx[f]
885 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
886 changed)
887 m1.set(f, fctx.flags())
891 888 if working:
892 889 self.dirstate.normal(f)
893 890
894 891 except (OSError, IOError):
895 892 if working:
896 893 self.ui.warn(_("trouble committing %s!\n") % f)
897 894 raise
898 895 else:
899 896 remove.append(f)
900 897
901 898 updated, added = [], []
902 899 for f in sorted(changed):
903 900 if f in m1 or f in m2:
904 901 updated.append(f)
905 902 else:
906 903 added.append(f)
907 904
908 905 # update manifest
909 906 m1.update(new)
910 907 removed = [f for f in sorted(remove) if f in m1 or f in m2]
911 908 removed1 = []
912 909
913 910 for f in removed:
914 911 if f in m1:
915 912 del m1[f]
916 913 removed1.append(f)
917 914 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
918 915 (new, removed1))
919 916
920 917 # add changeset
921 918 if (not empty_ok and not text) or force_editor:
922 919 edittext = []
923 920 if text:
924 921 edittext.append(text)
925 922 edittext.append("")
926 923 edittext.append("") # Empty line between message and comments.
927 924 edittext.append(_("HG: Enter commit message."
928 925 " Lines beginning with 'HG:' are removed."))
929 926 edittext.append("HG: --")
930 927 edittext.append(_("HG: user: %s") % user)
931 928 if p2 != nullid:
932 929 edittext.append(_("HG: branch merge"))
933 930 if branchname:
934 931 edittext.append(_("HG: branch '%s'")
935 932 % encoding.tolocal(branchname))
936 933 edittext.extend([_("HG: added %s") % f for f in added])
937 934 edittext.extend([_("HG: changed %s") % f for f in updated])
938 935 edittext.extend([_("HG: removed %s") % f for f in removed])
939 936 if not added and not updated and not removed:
940 937 edittext.append(_("HG: no files changed"))
941 938 edittext.append("")
942 939 # run editor in the repository root
943 940 olddir = os.getcwd()
944 941 os.chdir(self.root)
945 942 text = self.ui.edit("\n".join(edittext), user)
946 943 os.chdir(olddir)
947 944
948 945 lines = [line.rstrip() for line in text.rstrip().splitlines()]
949 946 while lines and not lines[0]:
950 947 del lines[0]
951 948 if not lines and working:
952 949 raise util.Abort(_("empty commit message"))
953 950 text = '\n'.join(lines)
954 951
955 952 self.changelog.delayupdate()
956 953 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
957 954 user, ctx.date(), extra)
958 955 p = lambda: self.changelog.writepending() and self.root or ""
959 956 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
960 957 parent2=xp2, pending=p)
961 958 self.changelog.finalize(trp)
962 959 tr.close()
963 960
964 961 if self.branchcache:
965 962 self.branchtags()
966 963
967 964 if working:
968 965 self.dirstate.setparents(n)
969 966 for f in removed:
970 967 self.dirstate.forget(f)
971 968 valid = 1 # our dirstate updates are complete
972 969
973 970 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
974 971 return n
975 972 finally:
976 973 if not valid: # don't save our updated dirstate
977 974 self.dirstate.invalidate()
978 975 del tr
979 976
980 977 def walk(self, match, node=None):
981 978 '''
982 979 walk recursively through the directory tree or a given
983 980 changeset, finding all files matched by the match
984 981 function
985 982 '''
986 983 return self[node].walk(match)
987 984
988 985 def status(self, node1='.', node2=None, match=None,
989 986 ignored=False, clean=False, unknown=False):
990 987 """return status of files between two nodes or node and working directory
991 988
992 989 If node1 is None, use the first dirstate parent instead.
993 990 If node2 is None, compare node1 with working directory.
994 991 """
995 992
996 993 def mfmatches(ctx):
997 994 mf = ctx.manifest().copy()
998 995 for fn in mf.keys():
999 996 if not match(fn):
1000 997 del mf[fn]
1001 998 return mf
1002 999
1003 1000 if isinstance(node1, context.changectx):
1004 1001 ctx1 = node1
1005 1002 else:
1006 1003 ctx1 = self[node1]
1007 1004 if isinstance(node2, context.changectx):
1008 1005 ctx2 = node2
1009 1006 else:
1010 1007 ctx2 = self[node2]
1011 1008
1012 1009 working = ctx2.rev() is None
1013 1010 parentworking = working and ctx1 == self['.']
1014 1011 match = match or match_.always(self.root, self.getcwd())
1015 1012 listignored, listclean, listunknown = ignored, clean, unknown
1016 1013
1017 1014 # load earliest manifest first for caching reasons
1018 1015 if not working and ctx2.rev() < ctx1.rev():
1019 1016 ctx2.manifest()
1020 1017
1021 1018 if not parentworking:
1022 1019 def bad(f, msg):
1023 1020 if f not in ctx1:
1024 1021 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1025 1022 return False
1026 1023 match.bad = bad
1027 1024
1028 1025 if working: # we need to scan the working dir
1029 1026 s = self.dirstate.status(match, listignored, listclean, listunknown)
1030 1027 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1031 1028
1032 1029 # check for any possibly clean files
1033 1030 if parentworking and cmp:
1034 1031 fixup = []
1035 1032 # do a full compare of any files that might have changed
1036 1033 for f in sorted(cmp):
1037 1034 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1038 1035 or ctx1[f].cmp(ctx2[f].data())):
1039 1036 modified.append(f)
1040 1037 else:
1041 1038 fixup.append(f)
1042 1039
1043 1040 if listclean:
1044 1041 clean += fixup
1045 1042
1046 1043 # update dirstate for files that are actually clean
1047 1044 if fixup:
1048 1045 wlock = None
1049 1046 try:
1050 1047 try:
1051 1048 # updating the dirstate is optional
1052 1049 # so we don't wait on the lock
1053 1050 wlock = self.wlock(False)
1054 1051 for f in fixup:
1055 1052 self.dirstate.normal(f)
1056 1053 except error.LockError:
1057 1054 pass
1058 1055 finally:
1059 1056 release(wlock)
1060 1057
1061 1058 if not parentworking:
1062 1059 mf1 = mfmatches(ctx1)
1063 1060 if working:
1064 1061 # we are comparing working dir against non-parent
1065 1062 # generate a pseudo-manifest for the working dir
1066 1063 mf2 = mfmatches(self['.'])
1067 1064 for f in cmp + modified + added:
1068 1065 mf2[f] = None
1069 1066 mf2.set(f, ctx2.flags(f))
1070 1067 for f in removed:
1071 1068 if f in mf2:
1072 1069 del mf2[f]
1073 1070 else:
1074 1071 # we are comparing two revisions
1075 1072 deleted, unknown, ignored = [], [], []
1076 1073 mf2 = mfmatches(ctx2)
1077 1074
1078 1075 modified, added, clean = [], [], []
1079 1076 for fn in mf2:
1080 1077 if fn in mf1:
1081 1078 if (mf1.flags(fn) != mf2.flags(fn) or
1082 1079 (mf1[fn] != mf2[fn] and
1083 1080 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1084 1081 modified.append(fn)
1085 1082 elif listclean:
1086 1083 clean.append(fn)
1087 1084 del mf1[fn]
1088 1085 else:
1089 1086 added.append(fn)
1090 1087 removed = mf1.keys()
1091 1088
1092 1089 r = modified, added, removed, deleted, unknown, ignored, clean
1093 1090 [l.sort() for l in r]
1094 1091 return r
1095 1092
1096 1093 def add(self, list):
1097 1094 wlock = self.wlock()
1098 1095 try:
1099 1096 rejected = []
1100 1097 for f in list:
1101 1098 p = self.wjoin(f)
1102 1099 try:
1103 1100 st = os.lstat(p)
1104 1101 except:
1105 1102 self.ui.warn(_("%s does not exist!\n") % f)
1106 1103 rejected.append(f)
1107 1104 continue
1108 1105 if st.st_size > 10000000:
1109 1106 self.ui.warn(_("%s: files over 10MB may cause memory and"
1110 1107 " performance problems\n"
1111 1108 "(use 'hg revert %s' to unadd the file)\n")
1112 1109 % (f, f))
1113 1110 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1114 1111 self.ui.warn(_("%s not added: only files and symlinks "
1115 1112 "supported currently\n") % f)
1116 1113 rejected.append(p)
1117 1114 elif self.dirstate[f] in 'amn':
1118 1115 self.ui.warn(_("%s already tracked!\n") % f)
1119 1116 elif self.dirstate[f] == 'r':
1120 1117 self.dirstate.normallookup(f)
1121 1118 else:
1122 1119 self.dirstate.add(f)
1123 1120 return rejected
1124 1121 finally:
1125 1122 wlock.release()
1126 1123
1127 1124 def forget(self, list):
1128 1125 wlock = self.wlock()
1129 1126 try:
1130 1127 for f in list:
1131 1128 if self.dirstate[f] != 'a':
1132 1129 self.ui.warn(_("%s not added!\n") % f)
1133 1130 else:
1134 1131 self.dirstate.forget(f)
1135 1132 finally:
1136 1133 wlock.release()
1137 1134
1138 1135 def remove(self, list, unlink=False):
1139 1136 wlock = None
1140 1137 try:
1141 1138 if unlink:
1142 1139 for f in list:
1143 1140 try:
1144 1141 util.unlink(self.wjoin(f))
1145 1142 except OSError, inst:
1146 1143 if inst.errno != errno.ENOENT:
1147 1144 raise
1148 1145 wlock = self.wlock()
1149 1146 for f in list:
1150 1147 if unlink and os.path.exists(self.wjoin(f)):
1151 1148 self.ui.warn(_("%s still exists!\n") % f)
1152 1149 elif self.dirstate[f] == 'a':
1153 1150 self.dirstate.forget(f)
1154 1151 elif f not in self.dirstate:
1155 1152 self.ui.warn(_("%s not tracked!\n") % f)
1156 1153 else:
1157 1154 self.dirstate.remove(f)
1158 1155 finally:
1159 1156 release(wlock)
1160 1157
1161 1158 def undelete(self, list):
1162 1159 manifests = [self.manifest.read(self.changelog.read(p)[0])
1163 1160 for p in self.dirstate.parents() if p != nullid]
1164 1161 wlock = self.wlock()
1165 1162 try:
1166 1163 for f in list:
1167 1164 if self.dirstate[f] != 'r':
1168 1165 self.ui.warn(_("%s not removed!\n") % f)
1169 1166 else:
1170 1167 m = f in manifests[0] and manifests[0] or manifests[1]
1171 1168 t = self.file(f).read(m[f])
1172 1169 self.wwrite(f, t, m.flags(f))
1173 1170 self.dirstate.normal(f)
1174 1171 finally:
1175 1172 wlock.release()
1176 1173
1177 1174 def copy(self, source, dest):
1178 1175 p = self.wjoin(dest)
1179 1176 if not (os.path.exists(p) or os.path.islink(p)):
1180 1177 self.ui.warn(_("%s does not exist!\n") % dest)
1181 1178 elif not (os.path.isfile(p) or os.path.islink(p)):
1182 1179 self.ui.warn(_("copy failed: %s is not a file or a "
1183 1180 "symbolic link\n") % dest)
1184 1181 else:
1185 1182 wlock = self.wlock()
1186 1183 try:
1187 1184 if self.dirstate[dest] in '?r':
1188 1185 self.dirstate.add(dest)
1189 1186 self.dirstate.copy(source, dest)
1190 1187 finally:
1191 1188 wlock.release()
1192 1189
1193 1190 def heads(self, start=None, closed=True):
1194 1191 heads = self.changelog.heads(start)
1195 1192 def display(head):
1196 1193 if closed:
1197 1194 return True
1198 1195 extras = self.changelog.read(head)[5]
1199 1196 return ('close' not in extras)
1200 1197 # sort the output in rev descending order
1201 1198 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1202 1199 return [n for (r, n) in sorted(heads)]
1203 1200
1204 1201 def branchheads(self, branch=None, start=None, closed=True):
1205 1202 if branch is None:
1206 1203 branch = self[None].branch()
1207 1204 branches = self._branchheads()
1208 1205 if branch not in branches:
1209 1206 return []
1210 1207 bheads = branches[branch]
1211 1208 # the cache returns heads ordered lowest to highest
1212 1209 bheads.reverse()
1213 1210 if start is not None:
1214 1211 # filter out the heads that cannot be reached from startrev
1215 1212 bheads = self.changelog.nodesbetween([start], bheads)[2]
1216 1213 if not closed:
1217 1214 bheads = [h for h in bheads if
1218 1215 ('close' not in self.changelog.read(h)[5])]
1219 1216 return bheads
1220 1217
1221 1218 def branches(self, nodes):
1222 1219 if not nodes:
1223 1220 nodes = [self.changelog.tip()]
1224 1221 b = []
1225 1222 for n in nodes:
1226 1223 t = n
1227 1224 while 1:
1228 1225 p = self.changelog.parents(n)
1229 1226 if p[1] != nullid or p[0] == nullid:
1230 1227 b.append((t, n, p[0], p[1]))
1231 1228 break
1232 1229 n = p[0]
1233 1230 return b
1234 1231
1235 1232 def between(self, pairs):
1236 1233 r = []
1237 1234
1238 1235 for top, bottom in pairs:
1239 1236 n, l, i = top, [], 0
1240 1237 f = 1
1241 1238
1242 1239 while n != bottom and n != nullid:
1243 1240 p = self.changelog.parents(n)[0]
1244 1241 if i == f:
1245 1242 l.append(n)
1246 1243 f = f * 2
1247 1244 n = p
1248 1245 i += 1
1249 1246
1250 1247 r.append(l)
1251 1248
1252 1249 return r
1253 1250
1254 1251 def findincoming(self, remote, base=None, heads=None, force=False):
1255 1252 """Return list of roots of the subsets of missing nodes from remote
1256 1253
1257 1254 If base dict is specified, assume that these nodes and their parents
1258 1255 exist on the remote side and that no child of a node of base exists
1259 1256 in both remote and self.
1260 1257 Furthermore base will be updated to include the nodes that exists
1261 1258 in self and remote but no children exists in self and remote.
1262 1259 If a list of heads is specified, return only nodes which are heads
1263 1260 or ancestors of these heads.
1264 1261
1265 1262 All the ancestors of base are in self and in remote.
1266 1263 All the descendants of the list returned are missing in self.
1267 1264 (and so we know that the rest of the nodes are missing in remote, see
1268 1265 outgoing)
1269 1266 """
1270 1267 return self.findcommonincoming(remote, base, heads, force)[1]
1271 1268
1272 1269 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1273 1270 """Return a tuple (common, missing roots, heads) used to identify
1274 1271 missing nodes from remote.
1275 1272
1276 1273 If base dict is specified, assume that these nodes and their parents
1277 1274 exist on the remote side and that no child of a node of base exists
1278 1275 in both remote and self.
1279 1276 Furthermore base will be updated to include the nodes that exists
1280 1277 in self and remote but no children exists in self and remote.
1281 1278 If a list of heads is specified, return only nodes which are heads
1282 1279 or ancestors of these heads.
1283 1280
1284 1281 All the ancestors of base are in self and in remote.
1285 1282 """
1286 1283 m = self.changelog.nodemap
1287 1284 search = []
1288 1285 fetch = set()
1289 1286 seen = set()
1290 1287 seenbranch = set()
1291 1288 if base == None:
1292 1289 base = {}
1293 1290
1294 1291 if not heads:
1295 1292 heads = remote.heads()
1296 1293
1297 1294 if self.changelog.tip() == nullid:
1298 1295 base[nullid] = 1
1299 1296 if heads != [nullid]:
1300 1297 return [nullid], [nullid], list(heads)
1301 1298 return [nullid], [], []
1302 1299
1303 1300 # assume we're closer to the tip than the root
1304 1301 # and start by examining the heads
1305 1302 self.ui.status(_("searching for changes\n"))
1306 1303
1307 1304 unknown = []
1308 1305 for h in heads:
1309 1306 if h not in m:
1310 1307 unknown.append(h)
1311 1308 else:
1312 1309 base[h] = 1
1313 1310
1314 1311 heads = unknown
1315 1312 if not unknown:
1316 1313 return base.keys(), [], []
1317 1314
1318 1315 req = set(unknown)
1319 1316 reqcnt = 0
1320 1317
1321 1318 # search through remote branches
1322 1319 # a 'branch' here is a linear segment of history, with four parts:
1323 1320 # head, root, first parent, second parent
1324 1321 # (a branch always has two parents (or none) by definition)
1325 1322 unknown = remote.branches(unknown)
1326 1323 while unknown:
1327 1324 r = []
1328 1325 while unknown:
1329 1326 n = unknown.pop(0)
1330 1327 if n[0] in seen:
1331 1328 continue
1332 1329
1333 1330 self.ui.debug(_("examining %s:%s\n")
1334 1331 % (short(n[0]), short(n[1])))
1335 1332 if n[0] == nullid: # found the end of the branch
1336 1333 pass
1337 1334 elif n in seenbranch:
1338 1335 self.ui.debug(_("branch already found\n"))
1339 1336 continue
1340 1337 elif n[1] and n[1] in m: # do we know the base?
1341 1338 self.ui.debug(_("found incomplete branch %s:%s\n")
1342 1339 % (short(n[0]), short(n[1])))
1343 1340 search.append(n[0:2]) # schedule branch range for scanning
1344 1341 seenbranch.add(n)
1345 1342 else:
1346 1343 if n[1] not in seen and n[1] not in fetch:
1347 1344 if n[2] in m and n[3] in m:
1348 1345 self.ui.debug(_("found new changeset %s\n") %
1349 1346 short(n[1]))
1350 1347 fetch.add(n[1]) # earliest unknown
1351 1348 for p in n[2:4]:
1352 1349 if p in m:
1353 1350 base[p] = 1 # latest known
1354 1351
1355 1352 for p in n[2:4]:
1356 1353 if p not in req and p not in m:
1357 1354 r.append(p)
1358 1355 req.add(p)
1359 1356 seen.add(n[0])
1360 1357
1361 1358 if r:
1362 1359 reqcnt += 1
1363 1360 self.ui.debug(_("request %d: %s\n") %
1364 1361 (reqcnt, " ".join(map(short, r))))
1365 1362 for p in xrange(0, len(r), 10):
1366 1363 for b in remote.branches(r[p:p+10]):
1367 1364 self.ui.debug(_("received %s:%s\n") %
1368 1365 (short(b[0]), short(b[1])))
1369 1366 unknown.append(b)
1370 1367
1371 1368 # do binary search on the branches we found
1372 1369 while search:
1373 1370 newsearch = []
1374 1371 reqcnt += 1
1375 1372 for n, l in zip(search, remote.between(search)):
1376 1373 l.append(n[1])
1377 1374 p = n[0]
1378 1375 f = 1
1379 1376 for i in l:
1380 1377 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1381 1378 if i in m:
1382 1379 if f <= 2:
1383 1380 self.ui.debug(_("found new branch changeset %s\n") %
1384 1381 short(p))
1385 1382 fetch.add(p)
1386 1383 base[i] = 1
1387 1384 else:
1388 1385 self.ui.debug(_("narrowed branch search to %s:%s\n")
1389 1386 % (short(p), short(i)))
1390 1387 newsearch.append((p, i))
1391 1388 break
1392 1389 p, f = i, f * 2
1393 1390 search = newsearch
1394 1391
1395 1392 # sanity check our fetch list
1396 1393 for f in fetch:
1397 1394 if f in m:
1398 1395 raise error.RepoError(_("already have changeset ")
1399 1396 + short(f[:4]))
1400 1397
1401 1398 if base.keys() == [nullid]:
1402 1399 if force:
1403 1400 self.ui.warn(_("warning: repository is unrelated\n"))
1404 1401 else:
1405 1402 raise util.Abort(_("repository is unrelated"))
1406 1403
1407 1404 self.ui.debug(_("found new changesets starting at ") +
1408 1405 " ".join([short(f) for f in fetch]) + "\n")
1409 1406
1410 1407 self.ui.debug(_("%d total queries\n") % reqcnt)
1411 1408
1412 1409 return base.keys(), list(fetch), heads
1413 1410
1414 1411 def findoutgoing(self, remote, base=None, heads=None, force=False):
1415 1412 """Return list of nodes that are roots of subsets not in remote
1416 1413
1417 1414 If base dict is specified, assume that these nodes and their parents
1418 1415 exist on the remote side.
1419 1416 If a list of heads is specified, return only nodes which are heads
1420 1417 or ancestors of these heads, and return a second element which
1421 1418 contains all remote heads which get new children.
1422 1419 """
1423 1420 if base == None:
1424 1421 base = {}
1425 1422 self.findincoming(remote, base, heads, force=force)
1426 1423
1427 1424 self.ui.debug(_("common changesets up to ")
1428 1425 + " ".join(map(short, base.keys())) + "\n")
1429 1426
1430 1427 remain = set(self.changelog.nodemap)
1431 1428
1432 1429 # prune everything remote has from the tree
1433 1430 remain.remove(nullid)
1434 1431 remove = base.keys()
1435 1432 while remove:
1436 1433 n = remove.pop(0)
1437 1434 if n in remain:
1438 1435 remain.remove(n)
1439 1436 for p in self.changelog.parents(n):
1440 1437 remove.append(p)
1441 1438
1442 1439 # find every node whose parents have been pruned
1443 1440 subset = []
1444 1441 # find every remote head that will get new children
1445 1442 updated_heads = {}
1446 1443 for n in remain:
1447 1444 p1, p2 = self.changelog.parents(n)
1448 1445 if p1 not in remain and p2 not in remain:
1449 1446 subset.append(n)
1450 1447 if heads:
1451 1448 if p1 in heads:
1452 1449 updated_heads[p1] = True
1453 1450 if p2 in heads:
1454 1451 updated_heads[p2] = True
1455 1452
1456 1453 # this is the set of all roots we have to push
1457 1454 if heads:
1458 1455 return subset, updated_heads.keys()
1459 1456 else:
1460 1457 return subset
1461 1458
1462 1459 def pull(self, remote, heads=None, force=False):
1463 1460 lock = self.lock()
1464 1461 try:
1465 1462 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1466 1463 force=force)
1467 1464 if fetch == [nullid]:
1468 1465 self.ui.status(_("requesting all changes\n"))
1469 1466
1470 1467 if not fetch:
1471 1468 self.ui.status(_("no changes found\n"))
1472 1469 return 0
1473 1470
1474 1471 if heads is None and remote.capable('changegroupsubset'):
1475 1472 heads = rheads
1476 1473
1477 1474 if heads is None:
1478 1475 cg = remote.changegroup(fetch, 'pull')
1479 1476 else:
1480 1477 if not remote.capable('changegroupsubset'):
1481 1478 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1482 1479 cg = remote.changegroupsubset(fetch, heads, 'pull')
1483 1480 return self.addchangegroup(cg, 'pull', remote.url())
1484 1481 finally:
1485 1482 lock.release()
1486 1483
1487 1484 def push(self, remote, force=False, revs=None):
1488 1485 # there are two ways to push to remote repo:
1489 1486 #
1490 1487 # addchangegroup assumes local user can lock remote
1491 1488 # repo (local filesystem, old ssh servers).
1492 1489 #
1493 1490 # unbundle assumes local user cannot lock remote repo (new ssh
1494 1491 # servers, http servers).
1495 1492
1496 1493 if remote.capable('unbundle'):
1497 1494 return self.push_unbundle(remote, force, revs)
1498 1495 return self.push_addchangegroup(remote, force, revs)
1499 1496
1500 1497 def prepush(self, remote, force, revs):
1501 1498 common = {}
1502 1499 remote_heads = remote.heads()
1503 1500 inc = self.findincoming(remote, common, remote_heads, force=force)
1504 1501
1505 1502 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1506 1503 if revs is not None:
1507 1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1508 1505 else:
1509 1506 bases, heads = update, self.changelog.heads()
1510 1507
1511 1508 if not bases:
1512 1509 self.ui.status(_("no changes found\n"))
1513 1510 return None, 1
1514 1511 elif not force:
1515 1512 # check if we're creating new remote heads
1516 1513 # to be a remote head after push, node must be either
1517 1514 # - unknown locally
1518 1515 # - a local outgoing head descended from update
1519 1516 # - a remote head that's known locally and not
1520 1517 # ancestral to an outgoing head
1521 1518
1522 1519 warn = 0
1523 1520
1524 1521 if remote_heads == [nullid]:
1525 1522 warn = 0
1526 1523 elif not revs and len(heads) > len(remote_heads):
1527 1524 warn = 1
1528 1525 else:
1529 1526 newheads = list(heads)
1530 1527 for r in remote_heads:
1531 1528 if r in self.changelog.nodemap:
1532 1529 desc = self.changelog.heads(r, heads)
1533 1530 l = [h for h in heads if h in desc]
1534 1531 if not l:
1535 1532 newheads.append(r)
1536 1533 else:
1537 1534 newheads.append(r)
1538 1535 if len(newheads) > len(remote_heads):
1539 1536 warn = 1
1540 1537
1541 1538 if warn:
1542 1539 self.ui.warn(_("abort: push creates new remote heads!\n"))
1543 1540 self.ui.status(_("(did you forget to merge?"
1544 1541 " use push -f to force)\n"))
1545 1542 return None, 0
1546 1543 elif inc:
1547 1544 self.ui.warn(_("note: unsynced remote changes!\n"))
1548 1545
1549 1546
1550 1547 if revs is None:
1551 1548 # use the fast path, no race possible on push
1552 1549 cg = self._changegroup(common.keys(), 'push')
1553 1550 else:
1554 1551 cg = self.changegroupsubset(update, revs, 'push')
1555 1552 return cg, remote_heads
1556 1553
1557 1554 def push_addchangegroup(self, remote, force, revs):
1558 1555 lock = remote.lock()
1559 1556 try:
1560 1557 ret = self.prepush(remote, force, revs)
1561 1558 if ret[0] is not None:
1562 1559 cg, remote_heads = ret
1563 1560 return remote.addchangegroup(cg, 'push', self.url())
1564 1561 return ret[1]
1565 1562 finally:
1566 1563 lock.release()
1567 1564
1568 1565 def push_unbundle(self, remote, force, revs):
1569 1566 # local repo finds heads on server, finds out what revs it
1570 1567 # must push. once revs transferred, if server finds it has
1571 1568 # different heads (someone else won commit/push race), server
1572 1569 # aborts.
1573 1570
1574 1571 ret = self.prepush(remote, force, revs)
1575 1572 if ret[0] is not None:
1576 1573 cg, remote_heads = ret
1577 1574 if force: remote_heads = ['force']
1578 1575 return remote.unbundle(cg, remote_heads, 'push')
1579 1576 return ret[1]
1580 1577
1581 1578 def changegroupinfo(self, nodes, source):
1582 1579 if self.ui.verbose or source == 'bundle':
1583 1580 self.ui.status(_("%d changesets found\n") % len(nodes))
1584 1581 if self.ui.debugflag:
1585 1582 self.ui.debug(_("list of changesets:\n"))
1586 1583 for node in nodes:
1587 1584 self.ui.debug("%s\n" % hex(node))
1588 1585
1589 1586 def changegroupsubset(self, bases, heads, source, extranodes=None):
1590 1587 """This function generates a changegroup consisting of all the nodes
1591 1588 that are descendents of any of the bases, and ancestors of any of
1592 1589 the heads.
1593 1590
1594 1591 It is fairly complex as determining which filenodes and which
1595 1592 manifest nodes need to be included for the changeset to be complete
1596 1593 is non-trivial.
1597 1594
1598 1595 Another wrinkle is doing the reverse, figuring out which changeset in
1599 1596 the changegroup a particular filenode or manifestnode belongs to.
1600 1597
1601 1598 The caller can specify some nodes that must be included in the
1602 1599 changegroup using the extranodes argument. It should be a dict
1603 1600 where the keys are the filenames (or 1 for the manifest), and the
1604 1601 values are lists of (node, linknode) tuples, where node is a wanted
1605 1602 node and linknode is the changelog node that should be transmitted as
1606 1603 the linkrev.
1607 1604 """
1608 1605
1609 1606 if extranodes is None:
1610 1607 # can we go through the fast path ?
1611 1608 heads.sort()
1612 1609 allheads = self.heads()
1613 1610 allheads.sort()
1614 1611 if heads == allheads:
1615 1612 common = []
1616 1613 # parents of bases are known from both sides
1617 1614 for n in bases:
1618 1615 for p in self.changelog.parents(n):
1619 1616 if p != nullid:
1620 1617 common.append(p)
1621 1618 return self._changegroup(common, source)
1622 1619
1623 1620 self.hook('preoutgoing', throw=True, source=source)
1624 1621
1625 1622 # Set up some initial variables
1626 1623 # Make it easy to refer to self.changelog
1627 1624 cl = self.changelog
1628 1625 # msng is short for missing - compute the list of changesets in this
1629 1626 # changegroup.
1630 1627 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1631 1628 self.changegroupinfo(msng_cl_lst, source)
1632 1629 # Some bases may turn out to be superfluous, and some heads may be
1633 1630 # too. nodesbetween will return the minimal set of bases and heads
1634 1631 # necessary to re-create the changegroup.
1635 1632
1636 1633 # Known heads are the list of heads that it is assumed the recipient
1637 1634 # of this changegroup will know about.
1638 1635 knownheads = {}
1639 1636 # We assume that all parents of bases are known heads.
1640 1637 for n in bases:
1641 1638 for p in cl.parents(n):
1642 1639 if p != nullid:
1643 1640 knownheads[p] = 1
1644 1641 knownheads = knownheads.keys()
1645 1642 if knownheads:
1646 1643 # Now that we know what heads are known, we can compute which
1647 1644 # changesets are known. The recipient must know about all
1648 1645 # changesets required to reach the known heads from the null
1649 1646 # changeset.
1650 1647 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1651 1648 junk = None
1652 1649 # Transform the list into a set.
1653 1650 has_cl_set = set(has_cl_set)
1654 1651 else:
1655 1652 # If there were no known heads, the recipient cannot be assumed to
1656 1653 # know about any changesets.
1657 1654 has_cl_set = set()
1658 1655
1659 1656 # Make it easy to refer to self.manifest
1660 1657 mnfst = self.manifest
1661 1658 # We don't know which manifests are missing yet
1662 1659 msng_mnfst_set = {}
1663 1660 # Nor do we know which filenodes are missing.
1664 1661 msng_filenode_set = {}
1665 1662
1666 1663 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1667 1664 junk = None
1668 1665
1669 1666 # A changeset always belongs to itself, so the changenode lookup
1670 1667 # function for a changenode is identity.
1671 1668 def identity(x):
1672 1669 return x
1673 1670
1674 1671 # A function generating function. Sets up an environment for the
1675 1672 # inner function.
1676 1673 def cmp_by_rev_func(revlog):
1677 1674 # Compare two nodes by their revision number in the environment's
1678 1675 # revision history. Since the revision number both represents the
1679 1676 # most efficient order to read the nodes in, and represents a
1680 1677 # topological sorting of the nodes, this function is often useful.
1681 1678 def cmp_by_rev(a, b):
1682 1679 return cmp(revlog.rev(a), revlog.rev(b))
1683 1680 return cmp_by_rev
1684 1681
1685 1682 # If we determine that a particular file or manifest node must be a
1686 1683 # node that the recipient of the changegroup will already have, we can
1687 1684 # also assume the recipient will have all the parents. This function
1688 1685 # prunes them from the set of missing nodes.
1689 1686 def prune_parents(revlog, hasset, msngset):
1690 1687 haslst = hasset.keys()
1691 1688 haslst.sort(cmp_by_rev_func(revlog))
1692 1689 for node in haslst:
1693 1690 parentlst = [p for p in revlog.parents(node) if p != nullid]
1694 1691 while parentlst:
1695 1692 n = parentlst.pop()
1696 1693 if n not in hasset:
1697 1694 hasset[n] = 1
1698 1695 p = [p for p in revlog.parents(n) if p != nullid]
1699 1696 parentlst.extend(p)
1700 1697 for n in hasset:
1701 1698 msngset.pop(n, None)
1702 1699
1703 1700 # This is a function generating function used to set up an environment
1704 1701 # for the inner function to execute in.
1705 1702 def manifest_and_file_collector(changedfileset):
1706 1703 # This is an information gathering function that gathers
1707 1704 # information from each changeset node that goes out as part of
1708 1705 # the changegroup. The information gathered is a list of which
1709 1706 # manifest nodes are potentially required (the recipient may
1710 1707 # already have them) and total list of all files which were
1711 1708 # changed in any changeset in the changegroup.
1712 1709 #
1713 1710 # We also remember the first changenode we saw any manifest
1714 1711 # referenced by so we can later determine which changenode 'owns'
1715 1712 # the manifest.
1716 1713 def collect_manifests_and_files(clnode):
1717 1714 c = cl.read(clnode)
1718 1715 for f in c[3]:
1719 1716 # This is to make sure we only have one instance of each
1720 1717 # filename string for each filename.
1721 1718 changedfileset.setdefault(f, f)
1722 1719 msng_mnfst_set.setdefault(c[0], clnode)
1723 1720 return collect_manifests_and_files
1724 1721
1725 1722 # Figure out which manifest nodes (of the ones we think might be part
1726 1723 # of the changegroup) the recipient must know about and remove them
1727 1724 # from the changegroup.
1728 1725 def prune_manifests():
1729 1726 has_mnfst_set = {}
1730 1727 for n in msng_mnfst_set:
1731 1728 # If a 'missing' manifest thinks it belongs to a changenode
1732 1729 # the recipient is assumed to have, obviously the recipient
1733 1730 # must have that manifest.
1734 1731 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1735 1732 if linknode in has_cl_set:
1736 1733 has_mnfst_set[n] = 1
1737 1734 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1738 1735
1739 1736 # Use the information collected in collect_manifests_and_files to say
1740 1737 # which changenode any manifestnode belongs to.
1741 1738 def lookup_manifest_link(mnfstnode):
1742 1739 return msng_mnfst_set[mnfstnode]
1743 1740
1744 1741 # A function generating function that sets up the initial environment
1745 1742 # the inner function.
1746 1743 def filenode_collector(changedfiles):
1747 1744 next_rev = [0]
1748 1745 # This gathers information from each manifestnode included in the
1749 1746 # changegroup about which filenodes the manifest node references
1750 1747 # so we can include those in the changegroup too.
1751 1748 #
1752 1749 # It also remembers which changenode each filenode belongs to. It
1753 1750 # does this by assuming the a filenode belongs to the changenode
1754 1751 # the first manifest that references it belongs to.
1755 1752 def collect_msng_filenodes(mnfstnode):
1756 1753 r = mnfst.rev(mnfstnode)
1757 1754 if r == next_rev[0]:
1758 1755 # If the last rev we looked at was the one just previous,
1759 1756 # we only need to see a diff.
1760 1757 deltamf = mnfst.readdelta(mnfstnode)
1761 1758 # For each line in the delta
1762 1759 for f, fnode in deltamf.iteritems():
1763 1760 f = changedfiles.get(f, None)
1764 1761 # And if the file is in the list of files we care
1765 1762 # about.
1766 1763 if f is not None:
1767 1764 # Get the changenode this manifest belongs to
1768 1765 clnode = msng_mnfst_set[mnfstnode]
1769 1766 # Create the set of filenodes for the file if
1770 1767 # there isn't one already.
1771 1768 ndset = msng_filenode_set.setdefault(f, {})
1772 1769 # And set the filenode's changelog node to the
1773 1770 # manifest's if it hasn't been set already.
1774 1771 ndset.setdefault(fnode, clnode)
1775 1772 else:
1776 1773 # Otherwise we need a full manifest.
1777 1774 m = mnfst.read(mnfstnode)
1778 1775 # For every file in we care about.
1779 1776 for f in changedfiles:
1780 1777 fnode = m.get(f, None)
1781 1778 # If it's in the manifest
1782 1779 if fnode is not None:
1783 1780 # See comments above.
1784 1781 clnode = msng_mnfst_set[mnfstnode]
1785 1782 ndset = msng_filenode_set.setdefault(f, {})
1786 1783 ndset.setdefault(fnode, clnode)
1787 1784 # Remember the revision we hope to see next.
1788 1785 next_rev[0] = r + 1
1789 1786 return collect_msng_filenodes
1790 1787
1791 1788 # We have a list of filenodes we think we need for a file, lets remove
1792 1789 # all those we know the recipient must have.
1793 1790 def prune_filenodes(f, filerevlog):
1794 1791 msngset = msng_filenode_set[f]
1795 1792 hasset = {}
1796 1793 # If a 'missing' filenode thinks it belongs to a changenode we
1797 1794 # assume the recipient must have, then the recipient must have
1798 1795 # that filenode.
1799 1796 for n in msngset:
1800 1797 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1801 1798 if clnode in has_cl_set:
1802 1799 hasset[n] = 1
1803 1800 prune_parents(filerevlog, hasset, msngset)
1804 1801
1805 1802 # A function generator function that sets up the a context for the
1806 1803 # inner function.
1807 1804 def lookup_filenode_link_func(fname):
1808 1805 msngset = msng_filenode_set[fname]
1809 1806 # Lookup the changenode the filenode belongs to.
1810 1807 def lookup_filenode_link(fnode):
1811 1808 return msngset[fnode]
1812 1809 return lookup_filenode_link
1813 1810
1814 1811 # Add the nodes that were explicitly requested.
1815 1812 def add_extra_nodes(name, nodes):
1816 1813 if not extranodes or name not in extranodes:
1817 1814 return
1818 1815
1819 1816 for node, linknode in extranodes[name]:
1820 1817 if node not in nodes:
1821 1818 nodes[node] = linknode
1822 1819
1823 1820 # Now that we have all theses utility functions to help out and
1824 1821 # logically divide up the task, generate the group.
1825 1822 def gengroup():
1826 1823 # The set of changed files starts empty.
1827 1824 changedfiles = {}
1828 1825 # Create a changenode group generator that will call our functions
1829 1826 # back to lookup the owning changenode and collect information.
1830 1827 group = cl.group(msng_cl_lst, identity,
1831 1828 manifest_and_file_collector(changedfiles))
1832 1829 for chnk in group:
1833 1830 yield chnk
1834 1831
1835 1832 # The list of manifests has been collected by the generator
1836 1833 # calling our functions back.
1837 1834 prune_manifests()
1838 1835 add_extra_nodes(1, msng_mnfst_set)
1839 1836 msng_mnfst_lst = msng_mnfst_set.keys()
1840 1837 # Sort the manifestnodes by revision number.
1841 1838 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1842 1839 # Create a generator for the manifestnodes that calls our lookup
1843 1840 # and data collection functions back.
1844 1841 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1845 1842 filenode_collector(changedfiles))
1846 1843 for chnk in group:
1847 1844 yield chnk
1848 1845
1849 1846 # These are no longer needed, dereference and toss the memory for
1850 1847 # them.
1851 1848 msng_mnfst_lst = None
1852 1849 msng_mnfst_set.clear()
1853 1850
1854 1851 if extranodes:
1855 1852 for fname in extranodes:
1856 1853 if isinstance(fname, int):
1857 1854 continue
1858 1855 msng_filenode_set.setdefault(fname, {})
1859 1856 changedfiles[fname] = 1
1860 1857 # Go through all our files in order sorted by name.
1861 1858 for fname in sorted(changedfiles):
1862 1859 filerevlog = self.file(fname)
1863 1860 if not len(filerevlog):
1864 1861 raise util.Abort(_("empty or missing revlog for %s") % fname)
1865 1862 # Toss out the filenodes that the recipient isn't really
1866 1863 # missing.
1867 1864 if fname in msng_filenode_set:
1868 1865 prune_filenodes(fname, filerevlog)
1869 1866 add_extra_nodes(fname, msng_filenode_set[fname])
1870 1867 msng_filenode_lst = msng_filenode_set[fname].keys()
1871 1868 else:
1872 1869 msng_filenode_lst = []
1873 1870 # If any filenodes are left, generate the group for them,
1874 1871 # otherwise don't bother.
1875 1872 if len(msng_filenode_lst) > 0:
1876 1873 yield changegroup.chunkheader(len(fname))
1877 1874 yield fname
1878 1875 # Sort the filenodes by their revision #
1879 1876 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1880 1877 # Create a group generator and only pass in a changenode
1881 1878 # lookup function as we need to collect no information
1882 1879 # from filenodes.
1883 1880 group = filerevlog.group(msng_filenode_lst,
1884 1881 lookup_filenode_link_func(fname))
1885 1882 for chnk in group:
1886 1883 yield chnk
1887 1884 if fname in msng_filenode_set:
1888 1885 # Don't need this anymore, toss it to free memory.
1889 1886 del msng_filenode_set[fname]
1890 1887 # Signal that no more groups are left.
1891 1888 yield changegroup.closechunk()
1892 1889
1893 1890 if msng_cl_lst:
1894 1891 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1895 1892
1896 1893 return util.chunkbuffer(gengroup())
1897 1894
1898 1895 def changegroup(self, basenodes, source):
1899 1896 # to avoid a race we use changegroupsubset() (issue1320)
1900 1897 return self.changegroupsubset(basenodes, self.heads(), source)
1901 1898
1902 1899 def _changegroup(self, common, source):
1903 1900 """Generate a changegroup of all nodes that we have that a recipient
1904 1901 doesn't.
1905 1902
1906 1903 This is much easier than the previous function as we can assume that
1907 1904 the recipient has any changenode we aren't sending them.
1908 1905
1909 1906 common is the set of common nodes between remote and self"""
1910 1907
1911 1908 self.hook('preoutgoing', throw=True, source=source)
1912 1909
1913 1910 cl = self.changelog
1914 1911 nodes = cl.findmissing(common)
1915 1912 revset = set([cl.rev(n) for n in nodes])
1916 1913 self.changegroupinfo(nodes, source)
1917 1914
1918 1915 def identity(x):
1919 1916 return x
1920 1917
1921 1918 def gennodelst(log):
1922 1919 for r in log:
1923 1920 if log.linkrev(r) in revset:
1924 1921 yield log.node(r)
1925 1922
1926 1923 def changed_file_collector(changedfileset):
1927 1924 def collect_changed_files(clnode):
1928 1925 c = cl.read(clnode)
1929 1926 for fname in c[3]:
1930 1927 changedfileset[fname] = 1
1931 1928 return collect_changed_files
1932 1929
1933 1930 def lookuprevlink_func(revlog):
1934 1931 def lookuprevlink(n):
1935 1932 return cl.node(revlog.linkrev(revlog.rev(n)))
1936 1933 return lookuprevlink
1937 1934
1938 1935 def gengroup():
1939 1936 # construct a list of all changed files
1940 1937 changedfiles = {}
1941 1938
1942 1939 for chnk in cl.group(nodes, identity,
1943 1940 changed_file_collector(changedfiles)):
1944 1941 yield chnk
1945 1942
1946 1943 mnfst = self.manifest
1947 1944 nodeiter = gennodelst(mnfst)
1948 1945 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1949 1946 yield chnk
1950 1947
1951 1948 for fname in sorted(changedfiles):
1952 1949 filerevlog = self.file(fname)
1953 1950 if not len(filerevlog):
1954 1951 raise util.Abort(_("empty or missing revlog for %s") % fname)
1955 1952 nodeiter = gennodelst(filerevlog)
1956 1953 nodeiter = list(nodeiter)
1957 1954 if nodeiter:
1958 1955 yield changegroup.chunkheader(len(fname))
1959 1956 yield fname
1960 1957 lookup = lookuprevlink_func(filerevlog)
1961 1958 for chnk in filerevlog.group(nodeiter, lookup):
1962 1959 yield chnk
1963 1960
1964 1961 yield changegroup.closechunk()
1965 1962
1966 1963 if nodes:
1967 1964 self.hook('outgoing', node=hex(nodes[0]), source=source)
1968 1965
1969 1966 return util.chunkbuffer(gengroup())
1970 1967
1971 1968 def addchangegroup(self, source, srctype, url, emptyok=False):
1972 1969 """add changegroup to repo.
1973 1970
1974 1971 return values:
1975 1972 - nothing changed or no source: 0
1976 1973 - more heads than before: 1+added heads (2..n)
1977 1974 - less heads than before: -1-removed heads (-2..-n)
1978 1975 - number of heads stays the same: 1
1979 1976 """
1980 1977 def csmap(x):
1981 1978 self.ui.debug(_("add changeset %s\n") % short(x))
1982 1979 return len(cl)
1983 1980
1984 1981 def revmap(x):
1985 1982 return cl.rev(x)
1986 1983
1987 1984 if not source:
1988 1985 return 0
1989 1986
1990 1987 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1991 1988
1992 1989 changesets = files = revisions = 0
1993 1990
1994 1991 # write changelog data to temp files so concurrent readers will not see
1995 1992 # inconsistent view
1996 1993 cl = self.changelog
1997 1994 cl.delayupdate()
1998 1995 oldheads = len(cl.heads())
1999 1996
2000 1997 tr = self.transaction()
2001 1998 try:
2002 1999 trp = weakref.proxy(tr)
2003 2000 # pull off the changeset group
2004 2001 self.ui.status(_("adding changesets\n"))
2005 2002 clstart = len(cl)
2006 2003 chunkiter = changegroup.chunkiter(source)
2007 2004 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2008 2005 raise util.Abort(_("received changelog group is empty"))
2009 2006 clend = len(cl)
2010 2007 changesets = clend - clstart
2011 2008
2012 2009 # pull off the manifest group
2013 2010 self.ui.status(_("adding manifests\n"))
2014 2011 chunkiter = changegroup.chunkiter(source)
2015 2012 # no need to check for empty manifest group here:
2016 2013 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2017 2014 # no new manifest will be created and the manifest group will
2018 2015 # be empty during the pull
2019 2016 self.manifest.addgroup(chunkiter, revmap, trp)
2020 2017
2021 2018 # process the files
2022 2019 self.ui.status(_("adding file changes\n"))
2023 2020 while 1:
2024 2021 f = changegroup.getchunk(source)
2025 2022 if not f:
2026 2023 break
2027 2024 self.ui.debug(_("adding %s revisions\n") % f)
2028 2025 fl = self.file(f)
2029 2026 o = len(fl)
2030 2027 chunkiter = changegroup.chunkiter(source)
2031 2028 if fl.addgroup(chunkiter, revmap, trp) is None:
2032 2029 raise util.Abort(_("received file revlog group is empty"))
2033 2030 revisions += len(fl) - o
2034 2031 files += 1
2035 2032
2036 2033 newheads = len(cl.heads())
2037 2034 heads = ""
2038 2035 if oldheads and newheads != oldheads:
2039 2036 heads = _(" (%+d heads)") % (newheads - oldheads)
2040 2037
2041 2038 self.ui.status(_("added %d changesets"
2042 2039 " with %d changes to %d files%s\n")
2043 2040 % (changesets, revisions, files, heads))
2044 2041
2045 2042 if changesets > 0:
2046 2043 p = lambda: cl.writepending() and self.root or ""
2047 2044 self.hook('pretxnchangegroup', throw=True,
2048 2045 node=hex(cl.node(clstart)), source=srctype,
2049 2046 url=url, pending=p)
2050 2047
2051 2048 # make changelog see real files again
2052 2049 cl.finalize(trp)
2053 2050
2054 2051 tr.close()
2055 2052 finally:
2056 2053 del tr
2057 2054
2058 2055 if changesets > 0:
2059 2056 # forcefully update the on-disk branch cache
2060 2057 self.ui.debug(_("updating the branch cache\n"))
2061 2058 self.branchtags()
2062 2059 self.hook("changegroup", node=hex(cl.node(clstart)),
2063 2060 source=srctype, url=url)
2064 2061
2065 2062 for i in xrange(clstart, clend):
2066 2063 self.hook("incoming", node=hex(cl.node(i)),
2067 2064 source=srctype, url=url)
2068 2065
2069 2066 # never return 0 here:
2070 2067 if newheads < oldheads:
2071 2068 return newheads - oldheads - 1
2072 2069 else:
2073 2070 return newheads - oldheads + 1
2074 2071
2075 2072
2076 2073 def stream_in(self, remote):
2077 2074 fp = remote.stream_out()
2078 2075 l = fp.readline()
2079 2076 try:
2080 2077 resp = int(l)
2081 2078 except ValueError:
2082 2079 raise error.ResponseError(
2083 2080 _('Unexpected response from remote server:'), l)
2084 2081 if resp == 1:
2085 2082 raise util.Abort(_('operation forbidden by server'))
2086 2083 elif resp == 2:
2087 2084 raise util.Abort(_('locking the remote repository failed'))
2088 2085 elif resp != 0:
2089 2086 raise util.Abort(_('the server sent an unknown error code'))
2090 2087 self.ui.status(_('streaming all changes\n'))
2091 2088 l = fp.readline()
2092 2089 try:
2093 2090 total_files, total_bytes = map(int, l.split(' ', 1))
2094 2091 except (ValueError, TypeError):
2095 2092 raise error.ResponseError(
2096 2093 _('Unexpected response from remote server:'), l)
2097 2094 self.ui.status(_('%d files to transfer, %s of data\n') %
2098 2095 (total_files, util.bytecount(total_bytes)))
2099 2096 start = time.time()
2100 2097 for i in xrange(total_files):
2101 2098 # XXX doesn't support '\n' or '\r' in filenames
2102 2099 l = fp.readline()
2103 2100 try:
2104 2101 name, size = l.split('\0', 1)
2105 2102 size = int(size)
2106 2103 except (ValueError, TypeError):
2107 2104 raise error.ResponseError(
2108 2105 _('Unexpected response from remote server:'), l)
2109 2106 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2110 2107 ofp = self.sopener(name, 'w')
2111 2108 for chunk in util.filechunkiter(fp, limit=size):
2112 2109 ofp.write(chunk)
2113 2110 ofp.close()
2114 2111 elapsed = time.time() - start
2115 2112 if elapsed <= 0:
2116 2113 elapsed = 0.001
2117 2114 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2118 2115 (util.bytecount(total_bytes), elapsed,
2119 2116 util.bytecount(total_bytes / elapsed)))
2120 2117 self.invalidate()
2121 2118 return len(self.heads()) + 1
2122 2119
2123 2120 def clone(self, remote, heads=[], stream=False):
2124 2121 '''clone remote repository.
2125 2122
2126 2123 keyword arguments:
2127 2124 heads: list of revs to clone (forces use of pull)
2128 2125 stream: use streaming clone if possible'''
2129 2126
2130 2127 # now, all clients that can request uncompressed clones can
2131 2128 # read repo formats supported by all servers that can serve
2132 2129 # them.
2133 2130
2134 2131 # if revlog format changes, client will have to check version
2135 2132 # and format flags on "stream" capability, and use
2136 2133 # uncompressed only if compatible.
2137 2134
2138 2135 if stream and not heads and remote.capable('stream'):
2139 2136 return self.stream_in(remote)
2140 2137 return self.pull(remote, heads)
2141 2138
2142 2139 # used to avoid circular references so destructors work
2143 2140 def aftertrans(files):
2144 2141 renamefiles = [tuple(t) for t in files]
2145 2142 def a():
2146 2143 for src, dest in renamefiles:
2147 2144 util.rename(src, dest)
2148 2145 return a
2149 2146
2150 2147 def instance(ui, path, create):
2151 2148 return localrepository(ui, util.drop_scheme('file', path), create)
2152 2149
2153 2150 def islocal(path):
2154 2151 return True
General Comments 0
You need to be logged in to leave comments. Login now