##// END OF EJS Templates
lookup: optimize '.'...
Matt Mackall -
r6736:369ddc9c default
parent child Browse files
Show More
@@ -1,2147 +1,2142 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15
16 16 class localrepository(repo.repository):
17 17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 18 supported = ('revlogv1', 'store')
19 19
20 20 def __init__(self, parentui, path=None, create=0):
21 21 repo.repository.__init__(self)
22 22 self.root = os.path.realpath(path)
23 23 self.path = os.path.join(self.root, ".hg")
24 24 self.origroot = path
25 25 self.opener = util.opener(self.path)
26 26 self.wopener = util.opener(self.root)
27 27
28 28 if not os.path.isdir(self.path):
29 29 if create:
30 30 if not os.path.exists(path):
31 31 os.mkdir(path)
32 32 os.mkdir(self.path)
33 33 requirements = ["revlogv1"]
34 34 if parentui.configbool('format', 'usestore', True):
35 35 os.mkdir(os.path.join(self.path, "store"))
36 36 requirements.append("store")
37 37 # create an invalid changelog
38 38 self.opener("00changelog.i", "a").write(
39 39 '\0\0\0\2' # represents revlogv2
40 40 ' dummy changelog to prevent using the old repo layout'
41 41 )
42 42 reqfile = self.opener("requires", "w")
43 43 for r in requirements:
44 44 reqfile.write("%s\n" % r)
45 45 reqfile.close()
46 46 else:
47 47 raise repo.RepoError(_("repository %s not found") % path)
48 48 elif create:
49 49 raise repo.RepoError(_("repository %s already exists") % path)
50 50 else:
51 51 # find requirements
52 52 try:
53 53 requirements = self.opener("requires").read().splitlines()
54 54 except IOError, inst:
55 55 if inst.errno != errno.ENOENT:
56 56 raise
57 57 requirements = []
58 58 # check them
59 59 for r in requirements:
60 60 if r not in self.supported:
61 61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62 62
63 63 # setup store
64 64 if "store" in requirements:
65 65 self.encodefn = util.encodefilename
66 66 self.decodefn = util.decodefilename
67 67 self.spath = os.path.join(self.path, "store")
68 68 else:
69 69 self.encodefn = lambda x: x
70 70 self.decodefn = lambda x: x
71 71 self.spath = self.path
72 72
73 73 try:
74 74 # files in .hg/ will be created using this mode
75 75 mode = os.stat(self.spath).st_mode
76 76 # avoid some useless chmods
77 77 if (0777 & ~util._umask) == (0777 & mode):
78 78 mode = None
79 79 except OSError:
80 80 mode = None
81 81
82 82 self._createmode = mode
83 83 self.opener.createmode = mode
84 84 sopener = util.opener(self.spath)
85 85 sopener.createmode = mode
86 86 self.sopener = util.encodedopener(sopener, self.encodefn)
87 87
88 88 self.ui = ui.ui(parentui=parentui)
89 89 try:
90 90 self.ui.readconfig(self.join("hgrc"), self.root)
91 91 extensions.loadall(self.ui)
92 92 except IOError:
93 93 pass
94 94
95 95 self.tagscache = None
96 96 self._tagstypecache = None
97 97 self.branchcache = None
98 98 self._ubranchcache = None # UTF-8 version of branchcache
99 99 self._branchcachetip = None
100 100 self.nodetagscache = None
101 101 self.filterpats = {}
102 102 self._datafilters = {}
103 103 self._transref = self._lockref = self._wlockref = None
104 104
105 105 def __getattr__(self, name):
106 106 if name == 'changelog':
107 107 self.changelog = changelog.changelog(self.sopener)
108 108 self.sopener.defversion = self.changelog.version
109 109 return self.changelog
110 110 if name == 'manifest':
111 111 self.changelog
112 112 self.manifest = manifest.manifest(self.sopener)
113 113 return self.manifest
114 114 if name == 'dirstate':
115 115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 116 return self.dirstate
117 117 else:
118 118 raise AttributeError, name
119 119
120 120 def url(self):
121 121 return 'file:' + self.root
122 122
123 123 def hook(self, name, throw=False, **args):
124 124 return hook.hook(self.ui, self, name, throw, **args)
125 125
126 126 tag_disallowed = ':\r\n'
127 127
128 128 def _tag(self, names, node, message, local, user, date, parent=None,
129 129 extra={}):
130 130 use_dirstate = parent is None
131 131
132 132 if isinstance(names, str):
133 133 allchars = names
134 134 names = (names,)
135 135 else:
136 136 allchars = ''.join(names)
137 137 for c in self.tag_disallowed:
138 138 if c in allchars:
139 139 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 140
141 141 for name in names:
142 142 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 143 local=local)
144 144
145 145 def writetags(fp, names, munge, prevtags):
146 146 fp.seek(0, 2)
147 147 if prevtags and prevtags[-1] != '\n':
148 148 fp.write('\n')
149 149 for name in names:
150 150 m = munge and munge(name) or name
151 151 if self._tagstypecache and name in self._tagstypecache:
152 152 old = self.tagscache.get(name, nullid)
153 153 fp.write('%s %s\n' % (hex(old), m))
154 154 fp.write('%s %s\n' % (hex(node), m))
155 155 fp.close()
156 156
157 157 prevtags = ''
158 158 if local:
159 159 try:
160 160 fp = self.opener('localtags', 'r+')
161 161 except IOError, err:
162 162 fp = self.opener('localtags', 'a')
163 163 else:
164 164 prevtags = fp.read()
165 165
166 166 # local tags are stored in the current charset
167 167 writetags(fp, names, None, prevtags)
168 168 for name in names:
169 169 self.hook('tag', node=hex(node), tag=name, local=local)
170 170 return
171 171
172 172 if use_dirstate:
173 173 try:
174 174 fp = self.wfile('.hgtags', 'rb+')
175 175 except IOError, err:
176 176 fp = self.wfile('.hgtags', 'ab')
177 177 else:
178 178 prevtags = fp.read()
179 179 else:
180 180 try:
181 181 prevtags = self.filectx('.hgtags', parent).data()
182 182 except revlog.LookupError:
183 183 pass
184 184 fp = self.wfile('.hgtags', 'wb')
185 185 if prevtags:
186 186 fp.write(prevtags)
187 187
188 188 # committed tags are stored in UTF-8
189 189 writetags(fp, names, util.fromlocal, prevtags)
190 190
191 191 if use_dirstate and '.hgtags' not in self.dirstate:
192 192 self.add(['.hgtags'])
193 193
194 194 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
195 195 extra=extra)
196 196
197 197 for name in names:
198 198 self.hook('tag', node=hex(node), tag=name, local=local)
199 199
200 200 return tagnode
201 201
202 202 def tag(self, names, node, message, local, user, date):
203 203 '''tag a revision with one or more symbolic names.
204 204
205 205 names is a list of strings or, when adding a single tag, names may be a
206 206 string.
207 207
208 208 if local is True, the tags are stored in a per-repository file.
209 209 otherwise, they are stored in the .hgtags file, and a new
210 210 changeset is committed with the change.
211 211
212 212 keyword arguments:
213 213
214 214 local: whether to store tags in non-version-controlled file
215 215 (default False)
216 216
217 217 message: commit message to use if committing
218 218
219 219 user: name of user to use if committing
220 220
221 221 date: date tuple to use if committing'''
222 222
223 223 for x in self.status()[:5]:
224 224 if '.hgtags' in x:
225 225 raise util.Abort(_('working copy of .hgtags is changed '
226 226 '(please commit .hgtags manually)'))
227 227
228 228 self._tag(names, node, message, local, user, date)
229 229
230 230 def tags(self):
231 231 '''return a mapping of tag to node'''
232 232 if self.tagscache:
233 233 return self.tagscache
234 234
235 235 globaltags = {}
236 236 tagtypes = {}
237 237
238 238 def readtags(lines, fn, tagtype):
239 239 filetags = {}
240 240 count = 0
241 241
242 242 def warn(msg):
243 243 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
244 244
245 245 for l in lines:
246 246 count += 1
247 247 if not l:
248 248 continue
249 249 s = l.split(" ", 1)
250 250 if len(s) != 2:
251 251 warn(_("cannot parse entry"))
252 252 continue
253 253 node, key = s
254 254 key = util.tolocal(key.strip()) # stored in UTF-8
255 255 try:
256 256 bin_n = bin(node)
257 257 except TypeError:
258 258 warn(_("node '%s' is not well formed") % node)
259 259 continue
260 260 if bin_n not in self.changelog.nodemap:
261 261 warn(_("tag '%s' refers to unknown node") % key)
262 262 continue
263 263
264 264 h = []
265 265 if key in filetags:
266 266 n, h = filetags[key]
267 267 h.append(n)
268 268 filetags[key] = (bin_n, h)
269 269
270 270 for k, nh in filetags.items():
271 271 if k not in globaltags:
272 272 globaltags[k] = nh
273 273 tagtypes[k] = tagtype
274 274 continue
275 275
276 276 # we prefer the global tag if:
277 277 # it supercedes us OR
278 278 # mutual supercedes and it has a higher rank
279 279 # otherwise we win because we're tip-most
280 280 an, ah = nh
281 281 bn, bh = globaltags[k]
282 282 if (bn != an and an in bh and
283 283 (bn not in ah or len(bh) > len(ah))):
284 284 an = bn
285 285 ah.extend([n for n in bh if n not in ah])
286 286 globaltags[k] = an, ah
287 287 tagtypes[k] = tagtype
288 288
289 289 # read the tags file from each head, ending with the tip
290 290 f = None
291 291 for rev, node, fnode in self._hgtagsnodes():
292 292 f = (f and f.filectx(fnode) or
293 293 self.filectx('.hgtags', fileid=fnode))
294 294 readtags(f.data().splitlines(), f, "global")
295 295
296 296 try:
297 297 data = util.fromlocal(self.opener("localtags").read())
298 298 # localtags are stored in the local character set
299 299 # while the internal tag table is stored in UTF-8
300 300 readtags(data.splitlines(), "localtags", "local")
301 301 except IOError:
302 302 pass
303 303
304 304 self.tagscache = {}
305 305 self._tagstypecache = {}
306 306 for k,nh in globaltags.items():
307 307 n = nh[0]
308 308 if n != nullid:
309 309 self.tagscache[k] = n
310 310 self._tagstypecache[k] = tagtypes[k]
311 311 self.tagscache['tip'] = self.changelog.tip()
312 312 return self.tagscache
313 313
314 314 def tagtype(self, tagname):
315 315 '''
316 316 return the type of the given tag. result can be:
317 317
318 318 'local' : a local tag
319 319 'global' : a global tag
320 320 None : tag does not exist
321 321 '''
322 322
323 323 self.tags()
324 324
325 325 return self._tagstypecache.get(tagname)
326 326
327 327 def _hgtagsnodes(self):
328 328 heads = self.heads()
329 329 heads.reverse()
330 330 last = {}
331 331 ret = []
332 332 for node in heads:
333 333 c = self.changectx(node)
334 334 rev = c.rev()
335 335 try:
336 336 fnode = c.filenode('.hgtags')
337 337 except revlog.LookupError:
338 338 continue
339 339 ret.append((rev, node, fnode))
340 340 if fnode in last:
341 341 ret[last[fnode]] = None
342 342 last[fnode] = len(ret) - 1
343 343 return [item for item in ret if item]
344 344
345 345 def tagslist(self):
346 346 '''return a list of tags ordered by revision'''
347 347 l = []
348 348 for t, n in self.tags().items():
349 349 try:
350 350 r = self.changelog.rev(n)
351 351 except:
352 352 r = -2 # sort to the beginning of the list if unknown
353 353 l.append((r, t, n))
354 354 l.sort()
355 355 return [(t, n) for r, t, n in l]
356 356
357 357 def nodetags(self, node):
358 358 '''return the tags associated with a node'''
359 359 if not self.nodetagscache:
360 360 self.nodetagscache = {}
361 361 for t, n in self.tags().items():
362 362 self.nodetagscache.setdefault(n, []).append(t)
363 363 return self.nodetagscache.get(node, [])
364 364
365 365 def _branchtags(self, partial, lrev):
366 366 tiprev = self.changelog.count() - 1
367 367 if lrev != tiprev:
368 368 self._updatebranchcache(partial, lrev+1, tiprev+1)
369 369 self._writebranchcache(partial, self.changelog.tip(), tiprev)
370 370
371 371 return partial
372 372
373 373 def branchtags(self):
374 374 tip = self.changelog.tip()
375 375 if self.branchcache is not None and self._branchcachetip == tip:
376 376 return self.branchcache
377 377
378 378 oldtip = self._branchcachetip
379 379 self._branchcachetip = tip
380 380 if self.branchcache is None:
381 381 self.branchcache = {} # avoid recursion in changectx
382 382 else:
383 383 self.branchcache.clear() # keep using the same dict
384 384 if oldtip is None or oldtip not in self.changelog.nodemap:
385 385 partial, last, lrev = self._readbranchcache()
386 386 else:
387 387 lrev = self.changelog.rev(oldtip)
388 388 partial = self._ubranchcache
389 389
390 390 self._branchtags(partial, lrev)
391 391
392 392 # the branch cache is stored on disk as UTF-8, but in the local
393 393 # charset internally
394 394 for k, v in partial.items():
395 395 self.branchcache[util.tolocal(k)] = v
396 396 self._ubranchcache = partial
397 397 return self.branchcache
398 398
399 399 def _readbranchcache(self):
400 400 partial = {}
401 401 try:
402 402 f = self.opener("branch.cache")
403 403 lines = f.read().split('\n')
404 404 f.close()
405 405 except (IOError, OSError):
406 406 return {}, nullid, nullrev
407 407
408 408 try:
409 409 last, lrev = lines.pop(0).split(" ", 1)
410 410 last, lrev = bin(last), int(lrev)
411 411 if not (lrev < self.changelog.count() and
412 412 self.changelog.node(lrev) == last): # sanity check
413 413 # invalidate the cache
414 414 raise ValueError('invalidating branch cache (tip differs)')
415 415 for l in lines:
416 416 if not l: continue
417 417 node, label = l.split(" ", 1)
418 418 partial[label.strip()] = bin(node)
419 419 except (KeyboardInterrupt, util.SignalInterrupt):
420 420 raise
421 421 except Exception, inst:
422 422 if self.ui.debugflag:
423 423 self.ui.warn(str(inst), '\n')
424 424 partial, last, lrev = {}, nullid, nullrev
425 425 return partial, last, lrev
426 426
427 427 def _writebranchcache(self, branches, tip, tiprev):
428 428 try:
429 429 f = self.opener("branch.cache", "w", atomictemp=True)
430 430 f.write("%s %s\n" % (hex(tip), tiprev))
431 431 for label, node in branches.iteritems():
432 432 f.write("%s %s\n" % (hex(node), label))
433 433 f.rename()
434 434 except (IOError, OSError):
435 435 pass
436 436
437 437 def _updatebranchcache(self, partial, start, end):
438 438 for r in xrange(start, end):
439 439 c = self.changectx(r)
440 440 b = c.branch()
441 441 partial[b] = c.node()
442 442
443 443 def lookup(self, key):
444 444 if key == '.':
445 key, second = self.dirstate.parents()
446 if key == nullid:
447 raise repo.RepoError(_("no revision checked out"))
448 if second != nullid:
449 self.ui.warn(_("warning: working directory has two parents, "
450 "tag '.' uses the first\n"))
445 return self.dirstate.parents()[0]
451 446 elif key == 'null':
452 447 return nullid
453 448 n = self.changelog._match(key)
454 449 if n:
455 450 return n
456 451 if key in self.tags():
457 452 return self.tags()[key]
458 453 if key in self.branchtags():
459 454 return self.branchtags()[key]
460 455 n = self.changelog._partialmatch(key)
461 456 if n:
462 457 return n
463 458 try:
464 459 if len(key) == 20:
465 460 key = hex(key)
466 461 except:
467 462 pass
468 463 raise repo.RepoError(_("unknown revision '%s'") % key)
469 464
470 465 def local(self):
471 466 return True
472 467
473 468 def join(self, f):
474 469 return os.path.join(self.path, f)
475 470
476 471 def sjoin(self, f):
477 472 f = self.encodefn(f)
478 473 return os.path.join(self.spath, f)
479 474
480 475 def wjoin(self, f):
481 476 return os.path.join(self.root, f)
482 477
483 478 def rjoin(self, f):
484 479 return os.path.join(self.root, util.pconvert(f))
485 480
486 481 def file(self, f):
487 482 if f[0] == '/':
488 483 f = f[1:]
489 484 return filelog.filelog(self.sopener, f)
490 485
491 486 def changectx(self, changeid=None):
492 487 return context.changectx(self, changeid)
493 488
494 489 def workingctx(self):
495 490 return context.workingctx(self)
496 491
497 492 def parents(self, changeid=None):
498 493 '''
499 494 get list of changectxs for parents of changeid or working directory
500 495 '''
501 496 if changeid is None:
502 497 pl = self.dirstate.parents()
503 498 else:
504 499 n = self.changelog.lookup(changeid)
505 500 pl = self.changelog.parents(n)
506 501 if pl[1] == nullid:
507 502 return [self.changectx(pl[0])]
508 503 return [self.changectx(pl[0]), self.changectx(pl[1])]
509 504
510 505 def filectx(self, path, changeid=None, fileid=None):
511 506 """changeid can be a changeset revision, node, or tag.
512 507 fileid can be a file revision or node."""
513 508 return context.filectx(self, path, changeid, fileid)
514 509
515 510 def getcwd(self):
516 511 return self.dirstate.getcwd()
517 512
518 513 def pathto(self, f, cwd=None):
519 514 return self.dirstate.pathto(f, cwd)
520 515
521 516 def wfile(self, f, mode='r'):
522 517 return self.wopener(f, mode)
523 518
524 519 def _link(self, f):
525 520 return os.path.islink(self.wjoin(f))
526 521
527 522 def _filter(self, filter, filename, data):
528 523 if filter not in self.filterpats:
529 524 l = []
530 525 for pat, cmd in self.ui.configitems(filter):
531 526 mf = util.matcher(self.root, "", [pat], [], [])[1]
532 527 fn = None
533 528 params = cmd
534 529 for name, filterfn in self._datafilters.iteritems():
535 530 if cmd.startswith(name):
536 531 fn = filterfn
537 532 params = cmd[len(name):].lstrip()
538 533 break
539 534 if not fn:
540 535 fn = lambda s, c, **kwargs: util.filter(s, c)
541 536 # Wrap old filters not supporting keyword arguments
542 537 if not inspect.getargspec(fn)[2]:
543 538 oldfn = fn
544 539 fn = lambda s, c, **kwargs: oldfn(s, c)
545 540 l.append((mf, fn, params))
546 541 self.filterpats[filter] = l
547 542
548 543 for mf, fn, cmd in self.filterpats[filter]:
549 544 if mf(filename):
550 545 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
551 546 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
552 547 break
553 548
554 549 return data
555 550
556 551 def adddatafilter(self, name, filter):
557 552 self._datafilters[name] = filter
558 553
559 554 def wread(self, filename):
560 555 if self._link(filename):
561 556 data = os.readlink(self.wjoin(filename))
562 557 else:
563 558 data = self.wopener(filename, 'r').read()
564 559 return self._filter("encode", filename, data)
565 560
566 561 def wwrite(self, filename, data, flags):
567 562 data = self._filter("decode", filename, data)
568 563 try:
569 564 os.unlink(self.wjoin(filename))
570 565 except OSError:
571 566 pass
572 567 self.wopener(filename, 'w').write(data)
573 568 util.set_flags(self.wjoin(filename), flags)
574 569
575 570 def wwritedata(self, filename, data):
576 571 return self._filter("decode", filename, data)
577 572
578 573 def transaction(self):
579 574 if self._transref and self._transref():
580 575 return self._transref().nest()
581 576
582 577 # abort here if the journal already exists
583 578 if os.path.exists(self.sjoin("journal")):
584 579 raise repo.RepoError(_("journal already exists - run hg recover"))
585 580
586 581 # save dirstate for rollback
587 582 try:
588 583 ds = self.opener("dirstate").read()
589 584 except IOError:
590 585 ds = ""
591 586 self.opener("journal.dirstate", "w").write(ds)
592 587 self.opener("journal.branch", "w").write(self.dirstate.branch())
593 588
594 589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
595 590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
596 591 (self.join("journal.branch"), self.join("undo.branch"))]
597 592 tr = transaction.transaction(self.ui.warn, self.sopener,
598 593 self.sjoin("journal"),
599 594 aftertrans(renames),
600 595 self._createmode)
601 596 self._transref = weakref.ref(tr)
602 597 return tr
603 598
604 599 def recover(self):
605 600 l = self.lock()
606 601 try:
607 602 if os.path.exists(self.sjoin("journal")):
608 603 self.ui.status(_("rolling back interrupted transaction\n"))
609 604 transaction.rollback(self.sopener, self.sjoin("journal"))
610 605 self.invalidate()
611 606 return True
612 607 else:
613 608 self.ui.warn(_("no interrupted transaction available\n"))
614 609 return False
615 610 finally:
616 611 del l
617 612
618 613 def rollback(self):
619 614 wlock = lock = None
620 615 try:
621 616 wlock = self.wlock()
622 617 lock = self.lock()
623 618 if os.path.exists(self.sjoin("undo")):
624 619 self.ui.status(_("rolling back last transaction\n"))
625 620 transaction.rollback(self.sopener, self.sjoin("undo"))
626 621 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
627 622 try:
628 623 branch = self.opener("undo.branch").read()
629 624 self.dirstate.setbranch(branch)
630 625 except IOError:
631 626 self.ui.warn(_("Named branch could not be reset, "
632 627 "current branch still is: %s\n")
633 628 % util.tolocal(self.dirstate.branch()))
634 629 self.invalidate()
635 630 self.dirstate.invalidate()
636 631 else:
637 632 self.ui.warn(_("no rollback information available\n"))
638 633 finally:
639 634 del lock, wlock
640 635
641 636 def invalidate(self):
642 637 for a in "changelog manifest".split():
643 638 if a in self.__dict__:
644 639 delattr(self, a)
645 640 self.tagscache = None
646 641 self._tagstypecache = None
647 642 self.nodetagscache = None
648 643 self.branchcache = None
649 644 self._ubranchcache = None
650 645 self._branchcachetip = None
651 646
652 647 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
653 648 try:
654 649 l = lock.lock(lockname, 0, releasefn, desc=desc)
655 650 except lock.LockHeld, inst:
656 651 if not wait:
657 652 raise
658 653 self.ui.warn(_("waiting for lock on %s held by %r\n") %
659 654 (desc, inst.locker))
660 655 # default to 600 seconds timeout
661 656 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
662 657 releasefn, desc=desc)
663 658 if acquirefn:
664 659 acquirefn()
665 660 return l
666 661
667 662 def lock(self, wait=True):
668 663 if self._lockref and self._lockref():
669 664 return self._lockref()
670 665
671 666 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
672 667 _('repository %s') % self.origroot)
673 668 self._lockref = weakref.ref(l)
674 669 return l
675 670
676 671 def wlock(self, wait=True):
677 672 if self._wlockref and self._wlockref():
678 673 return self._wlockref()
679 674
680 675 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
681 676 self.dirstate.invalidate, _('working directory of %s') %
682 677 self.origroot)
683 678 self._wlockref = weakref.ref(l)
684 679 return l
685 680
686 681 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
687 682 """
688 683 commit an individual file as part of a larger transaction
689 684 """
690 685
691 686 fn = fctx.path()
692 687 t = fctx.data()
693 688 fl = self.file(fn)
694 689 fp1 = manifest1.get(fn, nullid)
695 690 fp2 = manifest2.get(fn, nullid)
696 691
697 692 meta = {}
698 693 cp = fctx.renamed()
699 694 if cp and cp[0] != fn:
700 695 cp = cp[0]
701 696 # Mark the new revision of this file as a copy of another
702 697 # file. This copy data will effectively act as a parent
703 698 # of this new revision. If this is a merge, the first
704 699 # parent will be the nullid (meaning "look up the copy data")
705 700 # and the second one will be the other parent. For example:
706 701 #
707 702 # 0 --- 1 --- 3 rev1 changes file foo
708 703 # \ / rev2 renames foo to bar and changes it
709 704 # \- 2 -/ rev3 should have bar with all changes and
710 705 # should record that bar descends from
711 706 # bar in rev2 and foo in rev1
712 707 #
713 708 # this allows this merge to succeed:
714 709 #
715 710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
716 711 # \ / merging rev3 and rev4 should use bar@rev2
717 712 # \- 2 --- 4 as the merge base
718 713 #
719 714 meta["copy"] = cp
720 715 if not manifest2: # not a branch merge
721 716 meta["copyrev"] = hex(manifest1[cp])
722 717 fp2 = nullid
723 718 elif fp2 != nullid: # copied on remote side
724 719 meta["copyrev"] = hex(manifest1[cp])
725 720 elif fp1 != nullid: # copied on local side, reversed
726 721 meta["copyrev"] = hex(manifest2[cp])
727 722 fp2 = fp1
728 723 elif cp in manifest2: # directory rename on local side
729 724 meta["copyrev"] = hex(manifest2[cp])
730 725 else: # directory rename on remote side
731 726 meta["copyrev"] = hex(manifest1[cp])
732 727 self.ui.debug(_(" %s: copy %s:%s\n") %
733 728 (fn, cp, meta["copyrev"]))
734 729 fp1 = nullid
735 730 elif fp2 != nullid:
736 731 # is one parent an ancestor of the other?
737 732 fpa = fl.ancestor(fp1, fp2)
738 733 if fpa == fp1:
739 734 fp1, fp2 = fp2, nullid
740 735 elif fpa == fp2:
741 736 fp2 = nullid
742 737
743 738 # is the file unmodified from the parent? report existing entry
744 739 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
745 740 return fp1
746 741
747 742 changelist.append(fn)
748 743 return fl.add(t, meta, tr, linkrev, fp1, fp2)
749 744
750 745 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
751 746 if p1 is None:
752 747 p1, p2 = self.dirstate.parents()
753 748 return self.commit(files=files, text=text, user=user, date=date,
754 749 p1=p1, p2=p2, extra=extra, empty_ok=True)
755 750
756 751 def commit(self, files=None, text="", user=None, date=None,
757 752 match=None, force=False, force_editor=False,
758 753 p1=None, p2=None, extra={}, empty_ok=False):
759 754 wlock = lock = None
760 755 if files:
761 756 files = util.unique(files)
762 757 try:
763 758 wlock = self.wlock()
764 759 lock = self.lock()
765 760 use_dirstate = (p1 is None) # not rawcommit
766 761
767 762 if use_dirstate:
768 763 p1, p2 = self.dirstate.parents()
769 764 update_dirstate = True
770 765
771 766 if (not force and p2 != nullid and
772 767 (match and (match.files() or match.anypats()))):
773 768 raise util.Abort(_('cannot partially commit a merge '
774 769 '(do not specify files or patterns)'))
775 770
776 771 if files:
777 772 modified, removed = [], []
778 773 for f in files:
779 774 s = self.dirstate[f]
780 775 if s in 'nma':
781 776 modified.append(f)
782 777 elif s == 'r':
783 778 removed.append(f)
784 779 else:
785 780 self.ui.warn(_("%s not tracked!\n") % f)
786 781 changes = [modified, [], removed, [], []]
787 782 else:
788 783 changes = self.status(match=match)
789 784 else:
790 785 p1, p2 = p1, p2 or nullid
791 786 update_dirstate = (self.dirstate.parents()[0] == p1)
792 787 changes = [files, [], [], [], []]
793 788
794 789 wctx = context.workingctx(self, (p1, p2), text, user, date,
795 790 extra, changes)
796 791 return self._commitctx(wctx, force, force_editor, empty_ok,
797 792 use_dirstate, update_dirstate)
798 793 finally:
799 794 del lock, wlock
800 795
801 796 def commitctx(self, ctx):
802 797 wlock = lock = None
803 798 try:
804 799 wlock = self.wlock()
805 800 lock = self.lock()
806 801 return self._commitctx(ctx, force=True, force_editor=False,
807 802 empty_ok=True, use_dirstate=False,
808 803 update_dirstate=False)
809 804 finally:
810 805 del lock, wlock
811 806
812 807 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
813 808 use_dirstate=True, update_dirstate=True):
814 809 tr = None
815 810 valid = 0 # don't save the dirstate if this isn't set
816 811 try:
817 812 commit = wctx.modified() + wctx.added()
818 813 remove = wctx.removed()
819 814 extra = wctx.extra().copy()
820 815 branchname = extra['branch']
821 816 user = wctx.user()
822 817 text = wctx.description()
823 818
824 819 p1, p2 = [p.node() for p in wctx.parents()]
825 820 c1 = self.changelog.read(p1)
826 821 c2 = self.changelog.read(p2)
827 822 m1 = self.manifest.read(c1[0]).copy()
828 823 m2 = self.manifest.read(c2[0])
829 824
830 825 if use_dirstate:
831 826 oldname = c1[5].get("branch") # stored in UTF-8
832 827 if (not commit and not remove and not force and p2 == nullid
833 828 and branchname == oldname):
834 829 self.ui.status(_("nothing changed\n"))
835 830 return None
836 831
837 832 xp1 = hex(p1)
838 833 if p2 == nullid: xp2 = ''
839 834 else: xp2 = hex(p2)
840 835
841 836 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
842 837
843 838 tr = self.transaction()
844 839 trp = weakref.proxy(tr)
845 840
846 841 # check in files
847 842 new = {}
848 843 changed = []
849 844 linkrev = self.changelog.count()
850 845 commit.sort()
851 846 for f in commit:
852 847 self.ui.note(f + "\n")
853 848 try:
854 849 fctx = wctx.filectx(f)
855 850 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
856 851 new_exec = fctx.isexec()
857 852 new_link = fctx.islink()
858 853 if ((not changed or changed[-1] != f) and
859 854 m2.get(f) != new[f]):
860 855 # mention the file in the changelog if some
861 856 # flag changed, even if there was no content
862 857 # change.
863 858 old_exec = m1.execf(f)
864 859 old_link = m1.linkf(f)
865 860 if old_exec != new_exec or old_link != new_link:
866 861 changed.append(f)
867 862 m1.set(f, new_exec, new_link)
868 863 if use_dirstate:
869 864 self.dirstate.normal(f)
870 865
871 866 except (OSError, IOError):
872 867 if use_dirstate:
873 868 self.ui.warn(_("trouble committing %s!\n") % f)
874 869 raise
875 870 else:
876 871 remove.append(f)
877 872
878 873 # update manifest
879 874 m1.update(new)
880 875 remove.sort()
881 876 removed = []
882 877
883 878 for f in remove:
884 879 if f in m1:
885 880 del m1[f]
886 881 removed.append(f)
887 882 elif f in m2:
888 883 removed.append(f)
889 884 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
890 885 (new, removed))
891 886
892 887 # add changeset
893 888 if (not empty_ok and not text) or force_editor:
894 889 edittext = []
895 890 if text:
896 891 edittext.append(text)
897 892 edittext.append("")
898 893 edittext.append(_("HG: Enter commit message."
899 894 " Lines beginning with 'HG:' are removed."))
900 895 edittext.append("HG: --")
901 896 edittext.append("HG: user: %s" % user)
902 897 if p2 != nullid:
903 898 edittext.append("HG: branch merge")
904 899 if branchname:
905 900 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
906 901 edittext.extend(["HG: changed %s" % f for f in changed])
907 902 edittext.extend(["HG: removed %s" % f for f in removed])
908 903 if not changed and not remove:
909 904 edittext.append("HG: no files changed")
910 905 edittext.append("")
911 906 # run editor in the repository root
912 907 olddir = os.getcwd()
913 908 os.chdir(self.root)
914 909 text = self.ui.edit("\n".join(edittext), user)
915 910 os.chdir(olddir)
916 911
917 912 lines = [line.rstrip() for line in text.rstrip().splitlines()]
918 913 while lines and not lines[0]:
919 914 del lines[0]
920 915 if not lines and use_dirstate:
921 916 raise util.Abort(_("empty commit message"))
922 917 text = '\n'.join(lines)
923 918
924 919 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
925 920 user, wctx.date(), extra)
926 921 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
927 922 parent2=xp2)
928 923 tr.close()
929 924
930 925 if self.branchcache:
931 926 self.branchtags()
932 927
933 928 if use_dirstate or update_dirstate:
934 929 self.dirstate.setparents(n)
935 930 if use_dirstate:
936 931 for f in removed:
937 932 self.dirstate.forget(f)
938 933 valid = 1 # our dirstate updates are complete
939 934
940 935 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
941 936 return n
942 937 finally:
943 938 if not valid: # don't save our updated dirstate
944 939 self.dirstate.invalidate()
945 940 del tr
946 941
947 942 def walk(self, match, node=None):
948 943 '''
949 944 walk recursively through the directory tree or a given
950 945 changeset, finding all files matched by the match
951 946 function
952 947 '''
953 948
954 949 if node:
955 950 fdict = dict.fromkeys(match.files())
956 951 # for dirstate.walk, files=['.'] means "walk the whole tree".
957 952 # follow that here, too
958 953 fdict.pop('.', None)
959 954 mdict = self.manifest.read(self.changelog.read(node)[0])
960 955 mfiles = mdict.keys()
961 956 mfiles.sort()
962 957 for fn in mfiles:
963 958 for ffn in fdict:
964 959 # match if the file is the exact name or a directory
965 960 if ffn == fn or fn.startswith("%s/" % ffn):
966 961 del fdict[ffn]
967 962 break
968 963 if match(fn):
969 964 yield fn
970 965 ffiles = fdict.keys()
971 966 ffiles.sort()
972 967 for fn in ffiles:
973 968 if match.bad(fn, 'No such file in rev ' + short(node)) \
974 969 and match(fn):
975 970 yield fn
976 971 else:
977 972 for fn in self.dirstate.walk(match):
978 973 yield fn
979 974
980 975 def status(self, node1=None, node2=None, match=None,
981 976 list_ignored=False, list_clean=False, list_unknown=True):
982 977 """return status of files between two nodes or node and working directory
983 978
984 979 If node1 is None, use the first dirstate parent instead.
985 980 If node2 is None, compare node1 with working directory.
986 981 """
987 982
988 983 def fcmp(fn, getnode):
989 984 t1 = self.wread(fn)
990 985 return self.file(fn).cmp(getnode(fn), t1)
991 986
992 987 def mfmatches(node):
993 988 change = self.changelog.read(node)
994 989 mf = self.manifest.read(change[0]).copy()
995 990 for fn in mf.keys():
996 991 if not match(fn):
997 992 del mf[fn]
998 993 return mf
999 994
1000 995 if not match:
1001 996 match = match_.always(self.root, self.getcwd())
1002 997
1003 998 modified, added, removed, deleted, unknown = [], [], [], [], []
1004 999 ignored, clean = [], []
1005 1000
1006 1001 compareworking = False
1007 1002 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1008 1003 compareworking = True
1009 1004
1010 1005 if not compareworking:
1011 1006 # read the manifest from node1 before the manifest from node2,
1012 1007 # so that we'll hit the manifest cache if we're going through
1013 1008 # all the revisions in parent->child order.
1014 1009 mf1 = mfmatches(node1)
1015 1010
1016 1011 # are we comparing the working directory?
1017 1012 if not node2:
1018 1013 (lookup, modified, added, removed, deleted, unknown,
1019 1014 ignored, clean) = self.dirstate.status(match, list_ignored,
1020 1015 list_clean, list_unknown)
1021 1016 # are we comparing working dir against its parent?
1022 1017 if compareworking:
1023 1018 if lookup:
1024 1019 fixup = []
1025 1020 # do a full compare of any files that might have changed
1026 1021 ctx = self.changectx()
1027 1022 mexec = lambda f: 'x' in ctx.fileflags(f)
1028 1023 mlink = lambda f: 'l' in ctx.fileflags(f)
1029 1024 is_exec = util.execfunc(self.root, mexec)
1030 1025 is_link = util.linkfunc(self.root, mlink)
1031 1026 def flags(f):
1032 1027 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1033 1028 for f in lookup:
1034 1029 if (f not in ctx or flags(f) != ctx.fileflags(f)
1035 1030 or ctx[f].cmp(self.wread(f))):
1036 1031 modified.append(f)
1037 1032 else:
1038 1033 fixup.append(f)
1039 1034 if list_clean:
1040 1035 clean.append(f)
1041 1036
1042 1037 # update dirstate for files that are actually clean
1043 1038 if fixup:
1044 1039 wlock = None
1045 1040 try:
1046 1041 try:
1047 1042 wlock = self.wlock(False)
1048 1043 except lock.LockException:
1049 1044 pass
1050 1045 if wlock:
1051 1046 for f in fixup:
1052 1047 self.dirstate.normal(f)
1053 1048 finally:
1054 1049 del wlock
1055 1050 else:
1056 1051 # we are comparing working dir against non-parent
1057 1052 # generate a pseudo-manifest for the working dir
1058 1053 # XXX: create it in dirstate.py ?
1059 1054 mf2 = mfmatches(self.dirstate.parents()[0])
1060 1055 is_exec = util.execfunc(self.root, mf2.execf)
1061 1056 is_link = util.linkfunc(self.root, mf2.linkf)
1062 1057 for f in lookup + modified + added:
1063 1058 mf2[f] = ""
1064 1059 mf2.set(f, is_exec(f), is_link(f))
1065 1060 for f in removed:
1066 1061 if f in mf2:
1067 1062 del mf2[f]
1068 1063
1069 1064 else:
1070 1065 # we are comparing two revisions
1071 1066 mf2 = mfmatches(node2)
1072 1067
1073 1068 if not compareworking:
1074 1069 # flush lists from dirstate before comparing manifests
1075 1070 modified, added, clean = [], [], []
1076 1071
1077 1072 # make sure to sort the files so we talk to the disk in a
1078 1073 # reasonable order
1079 1074 mf2keys = mf2.keys()
1080 1075 mf2keys.sort()
1081 1076 getnode = lambda fn: mf1.get(fn, nullid)
1082 1077 for fn in mf2keys:
1083 1078 if fn in mf1:
1084 1079 if (mf1.flags(fn) != mf2.flags(fn) or
1085 1080 (mf1[fn] != mf2[fn] and
1086 1081 (mf2[fn] != "" or fcmp(fn, getnode)))):
1087 1082 modified.append(fn)
1088 1083 elif list_clean:
1089 1084 clean.append(fn)
1090 1085 del mf1[fn]
1091 1086 else:
1092 1087 added.append(fn)
1093 1088
1094 1089 removed = mf1.keys()
1095 1090
1096 1091 # sort and return results:
1097 1092 for l in modified, added, removed, deleted, unknown, ignored, clean:
1098 1093 l.sort()
1099 1094 return (modified, added, removed, deleted, unknown, ignored, clean)
1100 1095
1101 1096 def add(self, list):
1102 1097 wlock = self.wlock()
1103 1098 try:
1104 1099 rejected = []
1105 1100 for f in list:
1106 1101 p = self.wjoin(f)
1107 1102 try:
1108 1103 st = os.lstat(p)
1109 1104 except:
1110 1105 self.ui.warn(_("%s does not exist!\n") % f)
1111 1106 rejected.append(f)
1112 1107 continue
1113 1108 if st.st_size > 10000000:
1114 1109 self.ui.warn(_("%s: files over 10MB may cause memory and"
1115 1110 " performance problems\n"
1116 1111 "(use 'hg revert %s' to unadd the file)\n")
1117 1112 % (f, f))
1118 1113 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1119 1114 self.ui.warn(_("%s not added: only files and symlinks "
1120 1115 "supported currently\n") % f)
1121 1116 rejected.append(p)
1122 1117 elif self.dirstate[f] in 'amn':
1123 1118 self.ui.warn(_("%s already tracked!\n") % f)
1124 1119 elif self.dirstate[f] == 'r':
1125 1120 self.dirstate.normallookup(f)
1126 1121 else:
1127 1122 self.dirstate.add(f)
1128 1123 return rejected
1129 1124 finally:
1130 1125 del wlock
1131 1126
1132 1127 def forget(self, list):
1133 1128 wlock = self.wlock()
1134 1129 try:
1135 1130 for f in list:
1136 1131 if self.dirstate[f] != 'a':
1137 1132 self.ui.warn(_("%s not added!\n") % f)
1138 1133 else:
1139 1134 self.dirstate.forget(f)
1140 1135 finally:
1141 1136 del wlock
1142 1137
1143 1138 def remove(self, list, unlink=False):
1144 1139 wlock = None
1145 1140 try:
1146 1141 if unlink:
1147 1142 for f in list:
1148 1143 try:
1149 1144 util.unlink(self.wjoin(f))
1150 1145 except OSError, inst:
1151 1146 if inst.errno != errno.ENOENT:
1152 1147 raise
1153 1148 wlock = self.wlock()
1154 1149 for f in list:
1155 1150 if unlink and os.path.exists(self.wjoin(f)):
1156 1151 self.ui.warn(_("%s still exists!\n") % f)
1157 1152 elif self.dirstate[f] == 'a':
1158 1153 self.dirstate.forget(f)
1159 1154 elif f not in self.dirstate:
1160 1155 self.ui.warn(_("%s not tracked!\n") % f)
1161 1156 else:
1162 1157 self.dirstate.remove(f)
1163 1158 finally:
1164 1159 del wlock
1165 1160
1166 1161 def undelete(self, list):
1167 1162 wlock = None
1168 1163 try:
1169 1164 manifests = [self.manifest.read(self.changelog.read(p)[0])
1170 1165 for p in self.dirstate.parents() if p != nullid]
1171 1166 wlock = self.wlock()
1172 1167 for f in list:
1173 1168 if self.dirstate[f] != 'r':
1174 1169 self.ui.warn("%s not removed!\n" % f)
1175 1170 else:
1176 1171 m = f in manifests[0] and manifests[0] or manifests[1]
1177 1172 t = self.file(f).read(m[f])
1178 1173 self.wwrite(f, t, m.flags(f))
1179 1174 self.dirstate.normal(f)
1180 1175 finally:
1181 1176 del wlock
1182 1177
1183 1178 def copy(self, source, dest):
1184 1179 wlock = None
1185 1180 try:
1186 1181 p = self.wjoin(dest)
1187 1182 if not (os.path.exists(p) or os.path.islink(p)):
1188 1183 self.ui.warn(_("%s does not exist!\n") % dest)
1189 1184 elif not (os.path.isfile(p) or os.path.islink(p)):
1190 1185 self.ui.warn(_("copy failed: %s is not a file or a "
1191 1186 "symbolic link\n") % dest)
1192 1187 else:
1193 1188 wlock = self.wlock()
1194 1189 if dest not in self.dirstate:
1195 1190 self.dirstate.add(dest)
1196 1191 self.dirstate.copy(source, dest)
1197 1192 finally:
1198 1193 del wlock
1199 1194
1200 1195 def heads(self, start=None):
1201 1196 heads = self.changelog.heads(start)
1202 1197 # sort the output in rev descending order
1203 1198 heads = [(-self.changelog.rev(h), h) for h in heads]
1204 1199 heads.sort()
1205 1200 return [n for (r, n) in heads]
1206 1201
1207 1202 def branchheads(self, branch=None, start=None):
1208 1203 branch = branch is None and self.workingctx().branch() or branch
1209 1204 branches = self.branchtags()
1210 1205 if branch not in branches:
1211 1206 return []
1212 1207 # The basic algorithm is this:
1213 1208 #
1214 1209 # Start from the branch tip since there are no later revisions that can
1215 1210 # possibly be in this branch, and the tip is a guaranteed head.
1216 1211 #
1217 1212 # Remember the tip's parents as the first ancestors, since these by
1218 1213 # definition are not heads.
1219 1214 #
1220 1215 # Step backwards from the brach tip through all the revisions. We are
1221 1216 # guaranteed by the rules of Mercurial that we will now be visiting the
1222 1217 # nodes in reverse topological order (children before parents).
1223 1218 #
1224 1219 # If a revision is one of the ancestors of a head then we can toss it
1225 1220 # out of the ancestors set (we've already found it and won't be
1226 1221 # visiting it again) and put its parents in the ancestors set.
1227 1222 #
1228 1223 # Otherwise, if a revision is in the branch it's another head, since it
1229 1224 # wasn't in the ancestor list of an existing head. So add it to the
1230 1225 # head list, and add its parents to the ancestor list.
1231 1226 #
1232 1227 # If it is not in the branch ignore it.
1233 1228 #
1234 1229 # Once we have a list of heads, use nodesbetween to filter out all the
1235 1230 # heads that cannot be reached from startrev. There may be a more
1236 1231 # efficient way to do this as part of the previous algorithm.
1237 1232
1238 1233 set = util.set
1239 1234 heads = [self.changelog.rev(branches[branch])]
1240 1235 # Don't care if ancestors contains nullrev or not.
1241 1236 ancestors = set(self.changelog.parentrevs(heads[0]))
1242 1237 for rev in xrange(heads[0] - 1, nullrev, -1):
1243 1238 if rev in ancestors:
1244 1239 ancestors.update(self.changelog.parentrevs(rev))
1245 1240 ancestors.remove(rev)
1246 1241 elif self.changectx(rev).branch() == branch:
1247 1242 heads.append(rev)
1248 1243 ancestors.update(self.changelog.parentrevs(rev))
1249 1244 heads = [self.changelog.node(rev) for rev in heads]
1250 1245 if start is not None:
1251 1246 heads = self.changelog.nodesbetween([start], heads)[2]
1252 1247 return heads
1253 1248
1254 1249 def branches(self, nodes):
1255 1250 if not nodes:
1256 1251 nodes = [self.changelog.tip()]
1257 1252 b = []
1258 1253 for n in nodes:
1259 1254 t = n
1260 1255 while 1:
1261 1256 p = self.changelog.parents(n)
1262 1257 if p[1] != nullid or p[0] == nullid:
1263 1258 b.append((t, n, p[0], p[1]))
1264 1259 break
1265 1260 n = p[0]
1266 1261 return b
1267 1262
1268 1263 def between(self, pairs):
1269 1264 r = []
1270 1265
1271 1266 for top, bottom in pairs:
1272 1267 n, l, i = top, [], 0
1273 1268 f = 1
1274 1269
1275 1270 while n != bottom:
1276 1271 p = self.changelog.parents(n)[0]
1277 1272 if i == f:
1278 1273 l.append(n)
1279 1274 f = f * 2
1280 1275 n = p
1281 1276 i += 1
1282 1277
1283 1278 r.append(l)
1284 1279
1285 1280 return r
1286 1281
1287 1282 def findincoming(self, remote, base=None, heads=None, force=False):
1288 1283 """Return list of roots of the subsets of missing nodes from remote
1289 1284
1290 1285 If base dict is specified, assume that these nodes and their parents
1291 1286 exist on the remote side and that no child of a node of base exists
1292 1287 in both remote and self.
1293 1288 Furthermore base will be updated to include the nodes that exists
1294 1289 in self and remote but no children exists in self and remote.
1295 1290 If a list of heads is specified, return only nodes which are heads
1296 1291 or ancestors of these heads.
1297 1292
1298 1293 All the ancestors of base are in self and in remote.
1299 1294 All the descendants of the list returned are missing in self.
1300 1295 (and so we know that the rest of the nodes are missing in remote, see
1301 1296 outgoing)
1302 1297 """
1303 1298 m = self.changelog.nodemap
1304 1299 search = []
1305 1300 fetch = {}
1306 1301 seen = {}
1307 1302 seenbranch = {}
1308 1303 if base == None:
1309 1304 base = {}
1310 1305
1311 1306 if not heads:
1312 1307 heads = remote.heads()
1313 1308
1314 1309 if self.changelog.tip() == nullid:
1315 1310 base[nullid] = 1
1316 1311 if heads != [nullid]:
1317 1312 return [nullid]
1318 1313 return []
1319 1314
1320 1315 # assume we're closer to the tip than the root
1321 1316 # and start by examining the heads
1322 1317 self.ui.status(_("searching for changes\n"))
1323 1318
1324 1319 unknown = []
1325 1320 for h in heads:
1326 1321 if h not in m:
1327 1322 unknown.append(h)
1328 1323 else:
1329 1324 base[h] = 1
1330 1325
1331 1326 if not unknown:
1332 1327 return []
1333 1328
1334 1329 req = dict.fromkeys(unknown)
1335 1330 reqcnt = 0
1336 1331
1337 1332 # search through remote branches
1338 1333 # a 'branch' here is a linear segment of history, with four parts:
1339 1334 # head, root, first parent, second parent
1340 1335 # (a branch always has two parents (or none) by definition)
1341 1336 unknown = remote.branches(unknown)
1342 1337 while unknown:
1343 1338 r = []
1344 1339 while unknown:
1345 1340 n = unknown.pop(0)
1346 1341 if n[0] in seen:
1347 1342 continue
1348 1343
1349 1344 self.ui.debug(_("examining %s:%s\n")
1350 1345 % (short(n[0]), short(n[1])))
1351 1346 if n[0] == nullid: # found the end of the branch
1352 1347 pass
1353 1348 elif n in seenbranch:
1354 1349 self.ui.debug(_("branch already found\n"))
1355 1350 continue
1356 1351 elif n[1] and n[1] in m: # do we know the base?
1357 1352 self.ui.debug(_("found incomplete branch %s:%s\n")
1358 1353 % (short(n[0]), short(n[1])))
1359 1354 search.append(n) # schedule branch range for scanning
1360 1355 seenbranch[n] = 1
1361 1356 else:
1362 1357 if n[1] not in seen and n[1] not in fetch:
1363 1358 if n[2] in m and n[3] in m:
1364 1359 self.ui.debug(_("found new changeset %s\n") %
1365 1360 short(n[1]))
1366 1361 fetch[n[1]] = 1 # earliest unknown
1367 1362 for p in n[2:4]:
1368 1363 if p in m:
1369 1364 base[p] = 1 # latest known
1370 1365
1371 1366 for p in n[2:4]:
1372 1367 if p not in req and p not in m:
1373 1368 r.append(p)
1374 1369 req[p] = 1
1375 1370 seen[n[0]] = 1
1376 1371
1377 1372 if r:
1378 1373 reqcnt += 1
1379 1374 self.ui.debug(_("request %d: %s\n") %
1380 1375 (reqcnt, " ".join(map(short, r))))
1381 1376 for p in xrange(0, len(r), 10):
1382 1377 for b in remote.branches(r[p:p+10]):
1383 1378 self.ui.debug(_("received %s:%s\n") %
1384 1379 (short(b[0]), short(b[1])))
1385 1380 unknown.append(b)
1386 1381
1387 1382 # do binary search on the branches we found
1388 1383 while search:
1389 1384 n = search.pop(0)
1390 1385 reqcnt += 1
1391 1386 l = remote.between([(n[0], n[1])])[0]
1392 1387 l.append(n[1])
1393 1388 p = n[0]
1394 1389 f = 1
1395 1390 for i in l:
1396 1391 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1397 1392 if i in m:
1398 1393 if f <= 2:
1399 1394 self.ui.debug(_("found new branch changeset %s\n") %
1400 1395 short(p))
1401 1396 fetch[p] = 1
1402 1397 base[i] = 1
1403 1398 else:
1404 1399 self.ui.debug(_("narrowed branch search to %s:%s\n")
1405 1400 % (short(p), short(i)))
1406 1401 search.append((p, i))
1407 1402 break
1408 1403 p, f = i, f * 2
1409 1404
1410 1405 # sanity check our fetch list
1411 1406 for f in fetch.keys():
1412 1407 if f in m:
1413 1408 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1414 1409
1415 1410 if base.keys() == [nullid]:
1416 1411 if force:
1417 1412 self.ui.warn(_("warning: repository is unrelated\n"))
1418 1413 else:
1419 1414 raise util.Abort(_("repository is unrelated"))
1420 1415
1421 1416 self.ui.debug(_("found new changesets starting at ") +
1422 1417 " ".join([short(f) for f in fetch]) + "\n")
1423 1418
1424 1419 self.ui.debug(_("%d total queries\n") % reqcnt)
1425 1420
1426 1421 return fetch.keys()
1427 1422
1428 1423 def findoutgoing(self, remote, base=None, heads=None, force=False):
1429 1424 """Return list of nodes that are roots of subsets not in remote
1430 1425
1431 1426 If base dict is specified, assume that these nodes and their parents
1432 1427 exist on the remote side.
1433 1428 If a list of heads is specified, return only nodes which are heads
1434 1429 or ancestors of these heads, and return a second element which
1435 1430 contains all remote heads which get new children.
1436 1431 """
1437 1432 if base == None:
1438 1433 base = {}
1439 1434 self.findincoming(remote, base, heads, force=force)
1440 1435
1441 1436 self.ui.debug(_("common changesets up to ")
1442 1437 + " ".join(map(short, base.keys())) + "\n")
1443 1438
1444 1439 remain = dict.fromkeys(self.changelog.nodemap)
1445 1440
1446 1441 # prune everything remote has from the tree
1447 1442 del remain[nullid]
1448 1443 remove = base.keys()
1449 1444 while remove:
1450 1445 n = remove.pop(0)
1451 1446 if n in remain:
1452 1447 del remain[n]
1453 1448 for p in self.changelog.parents(n):
1454 1449 remove.append(p)
1455 1450
1456 1451 # find every node whose parents have been pruned
1457 1452 subset = []
1458 1453 # find every remote head that will get new children
1459 1454 updated_heads = {}
1460 1455 for n in remain:
1461 1456 p1, p2 = self.changelog.parents(n)
1462 1457 if p1 not in remain and p2 not in remain:
1463 1458 subset.append(n)
1464 1459 if heads:
1465 1460 if p1 in heads:
1466 1461 updated_heads[p1] = True
1467 1462 if p2 in heads:
1468 1463 updated_heads[p2] = True
1469 1464
1470 1465 # this is the set of all roots we have to push
1471 1466 if heads:
1472 1467 return subset, updated_heads.keys()
1473 1468 else:
1474 1469 return subset
1475 1470
1476 1471 def pull(self, remote, heads=None, force=False):
1477 1472 lock = self.lock()
1478 1473 try:
1479 1474 fetch = self.findincoming(remote, heads=heads, force=force)
1480 1475 if fetch == [nullid]:
1481 1476 self.ui.status(_("requesting all changes\n"))
1482 1477
1483 1478 if not fetch:
1484 1479 self.ui.status(_("no changes found\n"))
1485 1480 return 0
1486 1481
1487 1482 if heads is None:
1488 1483 cg = remote.changegroup(fetch, 'pull')
1489 1484 else:
1490 1485 if 'changegroupsubset' not in remote.capabilities:
1491 1486 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1492 1487 cg = remote.changegroupsubset(fetch, heads, 'pull')
1493 1488 return self.addchangegroup(cg, 'pull', remote.url())
1494 1489 finally:
1495 1490 del lock
1496 1491
1497 1492 def push(self, remote, force=False, revs=None):
1498 1493 # there are two ways to push to remote repo:
1499 1494 #
1500 1495 # addchangegroup assumes local user can lock remote
1501 1496 # repo (local filesystem, old ssh servers).
1502 1497 #
1503 1498 # unbundle assumes local user cannot lock remote repo (new ssh
1504 1499 # servers, http servers).
1505 1500
1506 1501 if remote.capable('unbundle'):
1507 1502 return self.push_unbundle(remote, force, revs)
1508 1503 return self.push_addchangegroup(remote, force, revs)
1509 1504
1510 1505 def prepush(self, remote, force, revs):
1511 1506 base = {}
1512 1507 remote_heads = remote.heads()
1513 1508 inc = self.findincoming(remote, base, remote_heads, force=force)
1514 1509
1515 1510 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1516 1511 if revs is not None:
1517 1512 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1518 1513 else:
1519 1514 bases, heads = update, self.changelog.heads()
1520 1515
1521 1516 if not bases:
1522 1517 self.ui.status(_("no changes found\n"))
1523 1518 return None, 1
1524 1519 elif not force:
1525 1520 # check if we're creating new remote heads
1526 1521 # to be a remote head after push, node must be either
1527 1522 # - unknown locally
1528 1523 # - a local outgoing head descended from update
1529 1524 # - a remote head that's known locally and not
1530 1525 # ancestral to an outgoing head
1531 1526
1532 1527 warn = 0
1533 1528
1534 1529 if remote_heads == [nullid]:
1535 1530 warn = 0
1536 1531 elif not revs and len(heads) > len(remote_heads):
1537 1532 warn = 1
1538 1533 else:
1539 1534 newheads = list(heads)
1540 1535 for r in remote_heads:
1541 1536 if r in self.changelog.nodemap:
1542 1537 desc = self.changelog.heads(r, heads)
1543 1538 l = [h for h in heads if h in desc]
1544 1539 if not l:
1545 1540 newheads.append(r)
1546 1541 else:
1547 1542 newheads.append(r)
1548 1543 if len(newheads) > len(remote_heads):
1549 1544 warn = 1
1550 1545
1551 1546 if warn:
1552 1547 self.ui.warn(_("abort: push creates new remote heads!\n"))
1553 1548 self.ui.status(_("(did you forget to merge?"
1554 1549 " use push -f to force)\n"))
1555 1550 return None, 0
1556 1551 elif inc:
1557 1552 self.ui.warn(_("note: unsynced remote changes!\n"))
1558 1553
1559 1554
1560 1555 if revs is None:
1561 1556 cg = self.changegroup(update, 'push')
1562 1557 else:
1563 1558 cg = self.changegroupsubset(update, revs, 'push')
1564 1559 return cg, remote_heads
1565 1560
1566 1561 def push_addchangegroup(self, remote, force, revs):
1567 1562 lock = remote.lock()
1568 1563 try:
1569 1564 ret = self.prepush(remote, force, revs)
1570 1565 if ret[0] is not None:
1571 1566 cg, remote_heads = ret
1572 1567 return remote.addchangegroup(cg, 'push', self.url())
1573 1568 return ret[1]
1574 1569 finally:
1575 1570 del lock
1576 1571
1577 1572 def push_unbundle(self, remote, force, revs):
1578 1573 # local repo finds heads on server, finds out what revs it
1579 1574 # must push. once revs transferred, if server finds it has
1580 1575 # different heads (someone else won commit/push race), server
1581 1576 # aborts.
1582 1577
1583 1578 ret = self.prepush(remote, force, revs)
1584 1579 if ret[0] is not None:
1585 1580 cg, remote_heads = ret
1586 1581 if force: remote_heads = ['force']
1587 1582 return remote.unbundle(cg, remote_heads, 'push')
1588 1583 return ret[1]
1589 1584
1590 1585 def changegroupinfo(self, nodes, source):
1591 1586 if self.ui.verbose or source == 'bundle':
1592 1587 self.ui.status(_("%d changesets found\n") % len(nodes))
1593 1588 if self.ui.debugflag:
1594 1589 self.ui.debug(_("List of changesets:\n"))
1595 1590 for node in nodes:
1596 1591 self.ui.debug("%s\n" % hex(node))
1597 1592
1598 1593 def changegroupsubset(self, bases, heads, source, extranodes=None):
1599 1594 """This function generates a changegroup consisting of all the nodes
1600 1595 that are descendents of any of the bases, and ancestors of any of
1601 1596 the heads.
1602 1597
1603 1598 It is fairly complex as determining which filenodes and which
1604 1599 manifest nodes need to be included for the changeset to be complete
1605 1600 is non-trivial.
1606 1601
1607 1602 Another wrinkle is doing the reverse, figuring out which changeset in
1608 1603 the changegroup a particular filenode or manifestnode belongs to.
1609 1604
1610 1605 The caller can specify some nodes that must be included in the
1611 1606 changegroup using the extranodes argument. It should be a dict
1612 1607 where the keys are the filenames (or 1 for the manifest), and the
1613 1608 values are lists of (node, linknode) tuples, where node is a wanted
1614 1609 node and linknode is the changelog node that should be transmitted as
1615 1610 the linkrev.
1616 1611 """
1617 1612
1618 1613 self.hook('preoutgoing', throw=True, source=source)
1619 1614
1620 1615 # Set up some initial variables
1621 1616 # Make it easy to refer to self.changelog
1622 1617 cl = self.changelog
1623 1618 # msng is short for missing - compute the list of changesets in this
1624 1619 # changegroup.
1625 1620 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1626 1621 self.changegroupinfo(msng_cl_lst, source)
1627 1622 # Some bases may turn out to be superfluous, and some heads may be
1628 1623 # too. nodesbetween will return the minimal set of bases and heads
1629 1624 # necessary to re-create the changegroup.
1630 1625
1631 1626 # Known heads are the list of heads that it is assumed the recipient
1632 1627 # of this changegroup will know about.
1633 1628 knownheads = {}
1634 1629 # We assume that all parents of bases are known heads.
1635 1630 for n in bases:
1636 1631 for p in cl.parents(n):
1637 1632 if p != nullid:
1638 1633 knownheads[p] = 1
1639 1634 knownheads = knownheads.keys()
1640 1635 if knownheads:
1641 1636 # Now that we know what heads are known, we can compute which
1642 1637 # changesets are known. The recipient must know about all
1643 1638 # changesets required to reach the known heads from the null
1644 1639 # changeset.
1645 1640 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1646 1641 junk = None
1647 1642 # Transform the list into an ersatz set.
1648 1643 has_cl_set = dict.fromkeys(has_cl_set)
1649 1644 else:
1650 1645 # If there were no known heads, the recipient cannot be assumed to
1651 1646 # know about any changesets.
1652 1647 has_cl_set = {}
1653 1648
1654 1649 # Make it easy to refer to self.manifest
1655 1650 mnfst = self.manifest
1656 1651 # We don't know which manifests are missing yet
1657 1652 msng_mnfst_set = {}
1658 1653 # Nor do we know which filenodes are missing.
1659 1654 msng_filenode_set = {}
1660 1655
1661 1656 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1662 1657 junk = None
1663 1658
1664 1659 # A changeset always belongs to itself, so the changenode lookup
1665 1660 # function for a changenode is identity.
1666 1661 def identity(x):
1667 1662 return x
1668 1663
1669 1664 # A function generating function. Sets up an environment for the
1670 1665 # inner function.
1671 1666 def cmp_by_rev_func(revlog):
1672 1667 # Compare two nodes by their revision number in the environment's
1673 1668 # revision history. Since the revision number both represents the
1674 1669 # most efficient order to read the nodes in, and represents a
1675 1670 # topological sorting of the nodes, this function is often useful.
1676 1671 def cmp_by_rev(a, b):
1677 1672 return cmp(revlog.rev(a), revlog.rev(b))
1678 1673 return cmp_by_rev
1679 1674
1680 1675 # If we determine that a particular file or manifest node must be a
1681 1676 # node that the recipient of the changegroup will already have, we can
1682 1677 # also assume the recipient will have all the parents. This function
1683 1678 # prunes them from the set of missing nodes.
1684 1679 def prune_parents(revlog, hasset, msngset):
1685 1680 haslst = hasset.keys()
1686 1681 haslst.sort(cmp_by_rev_func(revlog))
1687 1682 for node in haslst:
1688 1683 parentlst = [p for p in revlog.parents(node) if p != nullid]
1689 1684 while parentlst:
1690 1685 n = parentlst.pop()
1691 1686 if n not in hasset:
1692 1687 hasset[n] = 1
1693 1688 p = [p for p in revlog.parents(n) if p != nullid]
1694 1689 parentlst.extend(p)
1695 1690 for n in hasset:
1696 1691 msngset.pop(n, None)
1697 1692
1698 1693 # This is a function generating function used to set up an environment
1699 1694 # for the inner function to execute in.
1700 1695 def manifest_and_file_collector(changedfileset):
1701 1696 # This is an information gathering function that gathers
1702 1697 # information from each changeset node that goes out as part of
1703 1698 # the changegroup. The information gathered is a list of which
1704 1699 # manifest nodes are potentially required (the recipient may
1705 1700 # already have them) and total list of all files which were
1706 1701 # changed in any changeset in the changegroup.
1707 1702 #
1708 1703 # We also remember the first changenode we saw any manifest
1709 1704 # referenced by so we can later determine which changenode 'owns'
1710 1705 # the manifest.
1711 1706 def collect_manifests_and_files(clnode):
1712 1707 c = cl.read(clnode)
1713 1708 for f in c[3]:
1714 1709 # This is to make sure we only have one instance of each
1715 1710 # filename string for each filename.
1716 1711 changedfileset.setdefault(f, f)
1717 1712 msng_mnfst_set.setdefault(c[0], clnode)
1718 1713 return collect_manifests_and_files
1719 1714
1720 1715 # Figure out which manifest nodes (of the ones we think might be part
1721 1716 # of the changegroup) the recipient must know about and remove them
1722 1717 # from the changegroup.
1723 1718 def prune_manifests():
1724 1719 has_mnfst_set = {}
1725 1720 for n in msng_mnfst_set:
1726 1721 # If a 'missing' manifest thinks it belongs to a changenode
1727 1722 # the recipient is assumed to have, obviously the recipient
1728 1723 # must have that manifest.
1729 1724 linknode = cl.node(mnfst.linkrev(n))
1730 1725 if linknode in has_cl_set:
1731 1726 has_mnfst_set[n] = 1
1732 1727 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1733 1728
1734 1729 # Use the information collected in collect_manifests_and_files to say
1735 1730 # which changenode any manifestnode belongs to.
1736 1731 def lookup_manifest_link(mnfstnode):
1737 1732 return msng_mnfst_set[mnfstnode]
1738 1733
1739 1734 # A function generating function that sets up the initial environment
1740 1735 # the inner function.
1741 1736 def filenode_collector(changedfiles):
1742 1737 next_rev = [0]
1743 1738 # This gathers information from each manifestnode included in the
1744 1739 # changegroup about which filenodes the manifest node references
1745 1740 # so we can include those in the changegroup too.
1746 1741 #
1747 1742 # It also remembers which changenode each filenode belongs to. It
1748 1743 # does this by assuming the a filenode belongs to the changenode
1749 1744 # the first manifest that references it belongs to.
1750 1745 def collect_msng_filenodes(mnfstnode):
1751 1746 r = mnfst.rev(mnfstnode)
1752 1747 if r == next_rev[0]:
1753 1748 # If the last rev we looked at was the one just previous,
1754 1749 # we only need to see a diff.
1755 1750 deltamf = mnfst.readdelta(mnfstnode)
1756 1751 # For each line in the delta
1757 1752 for f, fnode in deltamf.items():
1758 1753 f = changedfiles.get(f, None)
1759 1754 # And if the file is in the list of files we care
1760 1755 # about.
1761 1756 if f is not None:
1762 1757 # Get the changenode this manifest belongs to
1763 1758 clnode = msng_mnfst_set[mnfstnode]
1764 1759 # Create the set of filenodes for the file if
1765 1760 # there isn't one already.
1766 1761 ndset = msng_filenode_set.setdefault(f, {})
1767 1762 # And set the filenode's changelog node to the
1768 1763 # manifest's if it hasn't been set already.
1769 1764 ndset.setdefault(fnode, clnode)
1770 1765 else:
1771 1766 # Otherwise we need a full manifest.
1772 1767 m = mnfst.read(mnfstnode)
1773 1768 # For every file in we care about.
1774 1769 for f in changedfiles:
1775 1770 fnode = m.get(f, None)
1776 1771 # If it's in the manifest
1777 1772 if fnode is not None:
1778 1773 # See comments above.
1779 1774 clnode = msng_mnfst_set[mnfstnode]
1780 1775 ndset = msng_filenode_set.setdefault(f, {})
1781 1776 ndset.setdefault(fnode, clnode)
1782 1777 # Remember the revision we hope to see next.
1783 1778 next_rev[0] = r + 1
1784 1779 return collect_msng_filenodes
1785 1780
1786 1781 # We have a list of filenodes we think we need for a file, lets remove
1787 1782 # all those we now the recipient must have.
1788 1783 def prune_filenodes(f, filerevlog):
1789 1784 msngset = msng_filenode_set[f]
1790 1785 hasset = {}
1791 1786 # If a 'missing' filenode thinks it belongs to a changenode we
1792 1787 # assume the recipient must have, then the recipient must have
1793 1788 # that filenode.
1794 1789 for n in msngset:
1795 1790 clnode = cl.node(filerevlog.linkrev(n))
1796 1791 if clnode in has_cl_set:
1797 1792 hasset[n] = 1
1798 1793 prune_parents(filerevlog, hasset, msngset)
1799 1794
1800 1795 # A function generator function that sets up the a context for the
1801 1796 # inner function.
1802 1797 def lookup_filenode_link_func(fname):
1803 1798 msngset = msng_filenode_set[fname]
1804 1799 # Lookup the changenode the filenode belongs to.
1805 1800 def lookup_filenode_link(fnode):
1806 1801 return msngset[fnode]
1807 1802 return lookup_filenode_link
1808 1803
1809 1804 # Add the nodes that were explicitly requested.
1810 1805 def add_extra_nodes(name, nodes):
1811 1806 if not extranodes or name not in extranodes:
1812 1807 return
1813 1808
1814 1809 for node, linknode in extranodes[name]:
1815 1810 if node not in nodes:
1816 1811 nodes[node] = linknode
1817 1812
1818 1813 # Now that we have all theses utility functions to help out and
1819 1814 # logically divide up the task, generate the group.
1820 1815 def gengroup():
1821 1816 # The set of changed files starts empty.
1822 1817 changedfiles = {}
1823 1818 # Create a changenode group generator that will call our functions
1824 1819 # back to lookup the owning changenode and collect information.
1825 1820 group = cl.group(msng_cl_lst, identity,
1826 1821 manifest_and_file_collector(changedfiles))
1827 1822 for chnk in group:
1828 1823 yield chnk
1829 1824
1830 1825 # The list of manifests has been collected by the generator
1831 1826 # calling our functions back.
1832 1827 prune_manifests()
1833 1828 add_extra_nodes(1, msng_mnfst_set)
1834 1829 msng_mnfst_lst = msng_mnfst_set.keys()
1835 1830 # Sort the manifestnodes by revision number.
1836 1831 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1837 1832 # Create a generator for the manifestnodes that calls our lookup
1838 1833 # and data collection functions back.
1839 1834 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1840 1835 filenode_collector(changedfiles))
1841 1836 for chnk in group:
1842 1837 yield chnk
1843 1838
1844 1839 # These are no longer needed, dereference and toss the memory for
1845 1840 # them.
1846 1841 msng_mnfst_lst = None
1847 1842 msng_mnfst_set.clear()
1848 1843
1849 1844 if extranodes:
1850 1845 for fname in extranodes:
1851 1846 if isinstance(fname, int):
1852 1847 continue
1853 1848 add_extra_nodes(fname,
1854 1849 msng_filenode_set.setdefault(fname, {}))
1855 1850 changedfiles[fname] = 1
1856 1851 changedfiles = changedfiles.keys()
1857 1852 changedfiles.sort()
1858 1853 # Go through all our files in order sorted by name.
1859 1854 for fname in changedfiles:
1860 1855 filerevlog = self.file(fname)
1861 1856 if filerevlog.count() == 0:
1862 1857 raise util.Abort(_("empty or missing revlog for %s") % fname)
1863 1858 # Toss out the filenodes that the recipient isn't really
1864 1859 # missing.
1865 1860 if fname in msng_filenode_set:
1866 1861 prune_filenodes(fname, filerevlog)
1867 1862 msng_filenode_lst = msng_filenode_set[fname].keys()
1868 1863 else:
1869 1864 msng_filenode_lst = []
1870 1865 # If any filenodes are left, generate the group for them,
1871 1866 # otherwise don't bother.
1872 1867 if len(msng_filenode_lst) > 0:
1873 1868 yield changegroup.chunkheader(len(fname))
1874 1869 yield fname
1875 1870 # Sort the filenodes by their revision #
1876 1871 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1877 1872 # Create a group generator and only pass in a changenode
1878 1873 # lookup function as we need to collect no information
1879 1874 # from filenodes.
1880 1875 group = filerevlog.group(msng_filenode_lst,
1881 1876 lookup_filenode_link_func(fname))
1882 1877 for chnk in group:
1883 1878 yield chnk
1884 1879 if fname in msng_filenode_set:
1885 1880 # Don't need this anymore, toss it to free memory.
1886 1881 del msng_filenode_set[fname]
1887 1882 # Signal that no more groups are left.
1888 1883 yield changegroup.closechunk()
1889 1884
1890 1885 if msng_cl_lst:
1891 1886 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1892 1887
1893 1888 return util.chunkbuffer(gengroup())
1894 1889
1895 1890 def changegroup(self, basenodes, source):
1896 1891 """Generate a changegroup of all nodes that we have that a recipient
1897 1892 doesn't.
1898 1893
1899 1894 This is much easier than the previous function as we can assume that
1900 1895 the recipient has any changenode we aren't sending them."""
1901 1896
1902 1897 self.hook('preoutgoing', throw=True, source=source)
1903 1898
1904 1899 cl = self.changelog
1905 1900 nodes = cl.nodesbetween(basenodes, None)[0]
1906 1901 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1907 1902 self.changegroupinfo(nodes, source)
1908 1903
1909 1904 def identity(x):
1910 1905 return x
1911 1906
1912 1907 def gennodelst(revlog):
1913 1908 for r in xrange(0, revlog.count()):
1914 1909 n = revlog.node(r)
1915 1910 if revlog.linkrev(n) in revset:
1916 1911 yield n
1917 1912
1918 1913 def changed_file_collector(changedfileset):
1919 1914 def collect_changed_files(clnode):
1920 1915 c = cl.read(clnode)
1921 1916 for fname in c[3]:
1922 1917 changedfileset[fname] = 1
1923 1918 return collect_changed_files
1924 1919
1925 1920 def lookuprevlink_func(revlog):
1926 1921 def lookuprevlink(n):
1927 1922 return cl.node(revlog.linkrev(n))
1928 1923 return lookuprevlink
1929 1924
1930 1925 def gengroup():
1931 1926 # construct a list of all changed files
1932 1927 changedfiles = {}
1933 1928
1934 1929 for chnk in cl.group(nodes, identity,
1935 1930 changed_file_collector(changedfiles)):
1936 1931 yield chnk
1937 1932 changedfiles = changedfiles.keys()
1938 1933 changedfiles.sort()
1939 1934
1940 1935 mnfst = self.manifest
1941 1936 nodeiter = gennodelst(mnfst)
1942 1937 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1943 1938 yield chnk
1944 1939
1945 1940 for fname in changedfiles:
1946 1941 filerevlog = self.file(fname)
1947 1942 if filerevlog.count() == 0:
1948 1943 raise util.Abort(_("empty or missing revlog for %s") % fname)
1949 1944 nodeiter = gennodelst(filerevlog)
1950 1945 nodeiter = list(nodeiter)
1951 1946 if nodeiter:
1952 1947 yield changegroup.chunkheader(len(fname))
1953 1948 yield fname
1954 1949 lookup = lookuprevlink_func(filerevlog)
1955 1950 for chnk in filerevlog.group(nodeiter, lookup):
1956 1951 yield chnk
1957 1952
1958 1953 yield changegroup.closechunk()
1959 1954
1960 1955 if nodes:
1961 1956 self.hook('outgoing', node=hex(nodes[0]), source=source)
1962 1957
1963 1958 return util.chunkbuffer(gengroup())
1964 1959
1965 1960 def addchangegroup(self, source, srctype, url, emptyok=False):
1966 1961 """add changegroup to repo.
1967 1962
1968 1963 return values:
1969 1964 - nothing changed or no source: 0
1970 1965 - more heads than before: 1+added heads (2..n)
1971 1966 - less heads than before: -1-removed heads (-2..-n)
1972 1967 - number of heads stays the same: 1
1973 1968 """
1974 1969 def csmap(x):
1975 1970 self.ui.debug(_("add changeset %s\n") % short(x))
1976 1971 return cl.count()
1977 1972
1978 1973 def revmap(x):
1979 1974 return cl.rev(x)
1980 1975
1981 1976 if not source:
1982 1977 return 0
1983 1978
1984 1979 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1985 1980
1986 1981 changesets = files = revisions = 0
1987 1982
1988 1983 # write changelog data to temp files so concurrent readers will not see
1989 1984 # inconsistent view
1990 1985 cl = self.changelog
1991 1986 cl.delayupdate()
1992 1987 oldheads = len(cl.heads())
1993 1988
1994 1989 tr = self.transaction()
1995 1990 try:
1996 1991 trp = weakref.proxy(tr)
1997 1992 # pull off the changeset group
1998 1993 self.ui.status(_("adding changesets\n"))
1999 1994 cor = cl.count() - 1
2000 1995 chunkiter = changegroup.chunkiter(source)
2001 1996 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2002 1997 raise util.Abort(_("received changelog group is empty"))
2003 1998 cnr = cl.count() - 1
2004 1999 changesets = cnr - cor
2005 2000
2006 2001 # pull off the manifest group
2007 2002 self.ui.status(_("adding manifests\n"))
2008 2003 chunkiter = changegroup.chunkiter(source)
2009 2004 # no need to check for empty manifest group here:
2010 2005 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2011 2006 # no new manifest will be created and the manifest group will
2012 2007 # be empty during the pull
2013 2008 self.manifest.addgroup(chunkiter, revmap, trp)
2014 2009
2015 2010 # process the files
2016 2011 self.ui.status(_("adding file changes\n"))
2017 2012 while 1:
2018 2013 f = changegroup.getchunk(source)
2019 2014 if not f:
2020 2015 break
2021 2016 self.ui.debug(_("adding %s revisions\n") % f)
2022 2017 fl = self.file(f)
2023 2018 o = fl.count()
2024 2019 chunkiter = changegroup.chunkiter(source)
2025 2020 if fl.addgroup(chunkiter, revmap, trp) is None:
2026 2021 raise util.Abort(_("received file revlog group is empty"))
2027 2022 revisions += fl.count() - o
2028 2023 files += 1
2029 2024
2030 2025 # make changelog see real files again
2031 2026 cl.finalize(trp)
2032 2027
2033 2028 newheads = len(self.changelog.heads())
2034 2029 heads = ""
2035 2030 if oldheads and newheads != oldheads:
2036 2031 heads = _(" (%+d heads)") % (newheads - oldheads)
2037 2032
2038 2033 self.ui.status(_("added %d changesets"
2039 2034 " with %d changes to %d files%s\n")
2040 2035 % (changesets, revisions, files, heads))
2041 2036
2042 2037 if changesets > 0:
2043 2038 self.hook('pretxnchangegroup', throw=True,
2044 2039 node=hex(self.changelog.node(cor+1)), source=srctype,
2045 2040 url=url)
2046 2041
2047 2042 tr.close()
2048 2043 finally:
2049 2044 del tr
2050 2045
2051 2046 if changesets > 0:
2052 2047 # forcefully update the on-disk branch cache
2053 2048 self.ui.debug(_("updating the branch cache\n"))
2054 2049 self.branchtags()
2055 2050 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2056 2051 source=srctype, url=url)
2057 2052
2058 2053 for i in xrange(cor + 1, cnr + 1):
2059 2054 self.hook("incoming", node=hex(self.changelog.node(i)),
2060 2055 source=srctype, url=url)
2061 2056
2062 2057 # never return 0 here:
2063 2058 if newheads < oldheads:
2064 2059 return newheads - oldheads - 1
2065 2060 else:
2066 2061 return newheads - oldheads + 1
2067 2062
2068 2063
2069 2064 def stream_in(self, remote):
2070 2065 fp = remote.stream_out()
2071 2066 l = fp.readline()
2072 2067 try:
2073 2068 resp = int(l)
2074 2069 except ValueError:
2075 2070 raise util.UnexpectedOutput(
2076 2071 _('Unexpected response from remote server:'), l)
2077 2072 if resp == 1:
2078 2073 raise util.Abort(_('operation forbidden by server'))
2079 2074 elif resp == 2:
2080 2075 raise util.Abort(_('locking the remote repository failed'))
2081 2076 elif resp != 0:
2082 2077 raise util.Abort(_('the server sent an unknown error code'))
2083 2078 self.ui.status(_('streaming all changes\n'))
2084 2079 l = fp.readline()
2085 2080 try:
2086 2081 total_files, total_bytes = map(int, l.split(' ', 1))
2087 2082 except (ValueError, TypeError):
2088 2083 raise util.UnexpectedOutput(
2089 2084 _('Unexpected response from remote server:'), l)
2090 2085 self.ui.status(_('%d files to transfer, %s of data\n') %
2091 2086 (total_files, util.bytecount(total_bytes)))
2092 2087 start = time.time()
2093 2088 for i in xrange(total_files):
2094 2089 # XXX doesn't support '\n' or '\r' in filenames
2095 2090 l = fp.readline()
2096 2091 try:
2097 2092 name, size = l.split('\0', 1)
2098 2093 size = int(size)
2099 2094 except ValueError, TypeError:
2100 2095 raise util.UnexpectedOutput(
2101 2096 _('Unexpected response from remote server:'), l)
2102 2097 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2103 2098 ofp = self.sopener(name, 'w')
2104 2099 for chunk in util.filechunkiter(fp, limit=size):
2105 2100 ofp.write(chunk)
2106 2101 ofp.close()
2107 2102 elapsed = time.time() - start
2108 2103 if elapsed <= 0:
2109 2104 elapsed = 0.001
2110 2105 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2111 2106 (util.bytecount(total_bytes), elapsed,
2112 2107 util.bytecount(total_bytes / elapsed)))
2113 2108 self.invalidate()
2114 2109 return len(self.heads()) + 1
2115 2110
2116 2111 def clone(self, remote, heads=[], stream=False):
2117 2112 '''clone remote repository.
2118 2113
2119 2114 keyword arguments:
2120 2115 heads: list of revs to clone (forces use of pull)
2121 2116 stream: use streaming clone if possible'''
2122 2117
2123 2118 # now, all clients that can request uncompressed clones can
2124 2119 # read repo formats supported by all servers that can serve
2125 2120 # them.
2126 2121
2127 2122 # if revlog format changes, client will have to check version
2128 2123 # and format flags on "stream" capability, and use
2129 2124 # uncompressed only if compatible.
2130 2125
2131 2126 if stream and not heads and remote.capable('stream'):
2132 2127 return self.stream_in(remote)
2133 2128 return self.pull(remote, heads)
2134 2129
2135 2130 # used to avoid circular references so destructors work
2136 2131 def aftertrans(files):
2137 2132 renamefiles = [tuple(t) for t in files]
2138 2133 def a():
2139 2134 for src, dest in renamefiles:
2140 2135 util.rename(src, dest)
2141 2136 return a
2142 2137
2143 2138 def instance(ui, path, create):
2144 2139 return localrepository(ui, util.drop_scheme('file', path), create)
2145 2140
2146 2141 def islocal(path):
2147 2142 return True
@@ -1,226 +1,225 b''
1 1 adding a
2 2 changeset: 0:8580ff50825a
3 3 user: test
4 4 date: Thu Jan 01 00:00:01 1970 +0000
5 5 summary: a
6 6
7 7 % -f, directory
8 8 abort: can only follow copies/renames for explicit file names
9 9 % -f, but no args
10 10 changeset: 4:b30c444c7c84
11 11 tag: tip
12 12 user: test
13 13 date: Thu Jan 01 00:00:05 1970 +0000
14 14 summary: e
15 15
16 16 changeset: 3:16b60bf3f99a
17 17 user: test
18 18 date: Thu Jan 01 00:00:04 1970 +0000
19 19 summary: d
20 20
21 21 changeset: 2:21fba396af4c
22 22 user: test
23 23 date: Thu Jan 01 00:00:03 1970 +0000
24 24 summary: c
25 25
26 26 changeset: 1:c0296dabce9b
27 27 user: test
28 28 date: Thu Jan 01 00:00:02 1970 +0000
29 29 summary: b
30 30
31 31 changeset: 0:8580ff50825a
32 32 user: test
33 33 date: Thu Jan 01 00:00:01 1970 +0000
34 34 summary: a
35 35
36 36 % one rename
37 37 changeset: 0:8580ff50825a
38 38 user: test
39 39 date: Thu Jan 01 00:00:01 1970 +0000
40 40 files: a
41 41 description:
42 42 a
43 43
44 44
45 45 % many renames
46 46 changeset: 4:b30c444c7c84
47 47 tag: tip
48 48 user: test
49 49 date: Thu Jan 01 00:00:05 1970 +0000
50 50 files: dir/b e
51 51 description:
52 52 e
53 53
54 54
55 55 changeset: 2:21fba396af4c
56 56 user: test
57 57 date: Thu Jan 01 00:00:03 1970 +0000
58 58 files: b dir/b
59 59 description:
60 60 c
61 61
62 62
63 63 changeset: 1:c0296dabce9b
64 64 user: test
65 65 date: Thu Jan 01 00:00:02 1970 +0000
66 66 files: b
67 67 description:
68 68 b
69 69
70 70
71 71 changeset: 0:8580ff50825a
72 72 user: test
73 73 date: Thu Jan 01 00:00:01 1970 +0000
74 74 files: a
75 75 description:
76 76 a
77 77
78 78
79 79 % log copies
80 80 4 e (dir/b)
81 81 3 b (a)
82 82 2 dir/b (b)
83 83 1 b (a)
84 84 0
85 85 % log copies, non-linear manifest
86 86 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
87 87 adding foo
88 88 created new head
89 89 5 e (dir/b)
90 90 % log copies, execute bit set
91 91 6
92 92 % log -p d
93 93 changeset: 3:16b60bf3f99a
94 94 user: test
95 95 date: Thu Jan 01 00:00:04 1970 +0000
96 96 files: a b d
97 97 description:
98 98 d
99 99
100 100
101 101 diff -r 21fba396af4c -r 16b60bf3f99a d
102 102 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
103 103 +++ b/d Thu Jan 01 00:00:04 1970 +0000
104 104 @@ -0,0 +1,1 @@
105 105 +a
106 106
107 107 adding base
108 108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 109 adding b1
110 110 created new head
111 111 % log -f
112 112 changeset: 3:e62f78d544b4
113 113 tag: tip
114 114 parent: 1:3d5bf5654eda
115 115 user: test
116 116 date: Thu Jan 01 00:00:01 1970 +0000
117 117 summary: b1
118 118
119 119 changeset: 1:3d5bf5654eda
120 120 user: test
121 121 date: Thu Jan 01 00:00:01 1970 +0000
122 122 summary: r1
123 123
124 124 changeset: 0:67e992f2c4f3
125 125 user: test
126 126 date: Thu Jan 01 00:00:01 1970 +0000
127 127 summary: base
128 128
129 129 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
130 130 adding b2
131 131 created new head
132 132 % log -f -r 1:tip
133 133 changeset: 1:3d5bf5654eda
134 134 user: test
135 135 date: Thu Jan 01 00:00:01 1970 +0000
136 136 summary: r1
137 137
138 138 changeset: 2:60c670bf5b30
139 139 user: test
140 140 date: Thu Jan 01 00:00:01 1970 +0000
141 141 summary: r2
142 142
143 143 changeset: 3:e62f78d544b4
144 144 parent: 1:3d5bf5654eda
145 145 user: test
146 146 date: Thu Jan 01 00:00:01 1970 +0000
147 147 summary: b1
148 148
149 149 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
150 150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 151 (branch merge, don't forget to commit)
152 152 % log -r . with two parents
153 warning: working directory has two parents, tag '.' uses the first
154 153 changeset: 3:e62f78d544b4
155 154 parent: 1:3d5bf5654eda
156 155 user: test
157 156 date: Thu Jan 01 00:00:01 1970 +0000
158 157 summary: b1
159 158
160 159 % log -r . with one parent
161 160 changeset: 5:302e9dd6890d
162 161 tag: tip
163 162 parent: 3:e62f78d544b4
164 163 parent: 4:ddb82e70d1a1
165 164 user: test
166 165 date: Thu Jan 01 00:00:01 1970 +0000
167 166 summary: m12
168 167
169 168 % log --follow-first
170 169 changeset: 6:2404bbcab562
171 170 tag: tip
172 171 user: test
173 172 date: Thu Jan 01 00:00:01 1970 +0000
174 173 summary: b1.1
175 174
176 175 changeset: 5:302e9dd6890d
177 176 parent: 3:e62f78d544b4
178 177 parent: 4:ddb82e70d1a1
179 178 user: test
180 179 date: Thu Jan 01 00:00:01 1970 +0000
181 180 summary: m12
182 181
183 182 changeset: 3:e62f78d544b4
184 183 parent: 1:3d5bf5654eda
185 184 user: test
186 185 date: Thu Jan 01 00:00:01 1970 +0000
187 186 summary: b1
188 187
189 188 changeset: 1:3d5bf5654eda
190 189 user: test
191 190 date: Thu Jan 01 00:00:01 1970 +0000
192 191 summary: r1
193 192
194 193 changeset: 0:67e992f2c4f3
195 194 user: test
196 195 date: Thu Jan 01 00:00:01 1970 +0000
197 196 summary: base
198 197
199 198 % log -P 2
200 199 changeset: 6:2404bbcab562
201 200 tag: tip
202 201 user: test
203 202 date: Thu Jan 01 00:00:01 1970 +0000
204 203 summary: b1.1
205 204
206 205 changeset: 5:302e9dd6890d
207 206 parent: 3:e62f78d544b4
208 207 parent: 4:ddb82e70d1a1
209 208 user: test
210 209 date: Thu Jan 01 00:00:01 1970 +0000
211 210 summary: m12
212 211
213 212 changeset: 4:ddb82e70d1a1
214 213 parent: 0:67e992f2c4f3
215 214 user: test
216 215 date: Thu Jan 01 00:00:01 1970 +0000
217 216 summary: b2
218 217
219 218 changeset: 3:e62f78d544b4
220 219 parent: 1:3d5bf5654eda
221 220 user: test
222 221 date: Thu Jan 01 00:00:01 1970 +0000
223 222 summary: b1
224 223
225 224 % log -r ""
226 225 abort: 00changelog.i@: ambiguous identifier!
@@ -1,36 +1,35 b''
1 1 created new head
2 2 merging foo1 and foo to foo1
3 3 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
4 4 (branch merge, don't forget to commit)
5 5 n 0 -2 bar
6 6 m 644 14 foo1
7 7 copy: foo -> foo1
8 8 M bar
9 9 M foo1
10 10 % removing foo1 and bar
11 11 r 0 -2 bar
12 12 r 0 -1 foo1
13 13 copy: foo -> foo1
14 14 R bar
15 15 R foo1
16 16 % readding foo1 and bar
17 17 adding bar
18 18 adding foo1
19 19 n 0 -2 bar
20 20 m 644 14 foo1
21 21 copy: foo -> foo1
22 22 M bar
23 23 M foo1
24 24 foo
25 25 % reverting foo1 and bar
26 warning: working directory has two parents, tag '.' uses the first
27 26 saving current version of bar as bar.orig
28 27 reverting bar
29 28 saving current version of foo1 as foo1.orig
30 29 reverting foo1
31 30 n 0 -2 bar
32 31 m 644 14 foo1
33 32 copy: foo -> foo1
34 33 M bar
35 34 M foo1
36 35 foo
General Comments 0
You need to be logged in to leave comments. Login now