##// END OF EJS Templates
tags: reverse and simplify head-walking
Matt Mackall -
r8852:a81652fc default
parent child Browse files
Show More
@@ -1,2178 +1,2177 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 from lock import release
17 17 import weakref, stat, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19
20 20 class localrepository(repo.repository):
21 21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 22 supported = set('revlogv1 store fncache shared'.split())
23 23
24 24 def __init__(self, baseui, path=None, create=0):
25 25 repo.repository.__init__(self)
26 26 self.root = os.path.realpath(path)
27 27 self.path = os.path.join(self.root, ".hg")
28 28 self.origroot = path
29 29 self.opener = util.opener(self.path)
30 30 self.wopener = util.opener(self.root)
31 31 self.baseui = baseui
32 32 self.ui = baseui.copy()
33 33
34 34 try:
35 35 self.ui.readconfig(self.join("hgrc"), self.root)
36 36 extensions.loadall(self.ui)
37 37 except IOError:
38 38 pass
39 39
40 40 if not os.path.isdir(self.path):
41 41 if create:
42 42 if not os.path.exists(path):
43 43 os.mkdir(path)
44 44 os.mkdir(self.path)
45 45 requirements = ["revlogv1"]
46 46 if self.ui.configbool('format', 'usestore', True):
47 47 os.mkdir(os.path.join(self.path, "store"))
48 48 requirements.append("store")
49 49 if self.ui.configbool('format', 'usefncache', True):
50 50 requirements.append("fncache")
51 51 # create an invalid changelog
52 52 self.opener("00changelog.i", "a").write(
53 53 '\0\0\0\2' # represents revlogv2
54 54 ' dummy changelog to prevent using the old repo layout'
55 55 )
56 56 reqfile = self.opener("requires", "w")
57 57 for r in requirements:
58 58 reqfile.write("%s\n" % r)
59 59 reqfile.close()
60 60 else:
61 61 raise error.RepoError(_("repository %s not found") % path)
62 62 elif create:
63 63 raise error.RepoError(_("repository %s already exists") % path)
64 64 else:
65 65 # find requirements
66 66 requirements = set()
67 67 try:
68 68 requirements = set(self.opener("requires").read().splitlines())
69 69 except IOError, inst:
70 70 if inst.errno != errno.ENOENT:
71 71 raise
72 72 for r in requirements - self.supported:
73 73 raise error.RepoError(_("requirement '%s' not supported") % r)
74 74
75 75 self.sharedpath = self.path
76 76 try:
77 77 s = os.path.realpath(self.opener("sharedpath").read())
78 78 if not os.path.exists(s):
79 79 raise error.RepoError(
80 80 _('.hg/sharedpath points to nonexistent directory %s' % s))
81 81 self.sharedpath = s
82 82 except IOError, inst:
83 83 if inst.errno != errno.ENOENT:
84 84 raise
85 85
86 86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 87 self.spath = self.store.path
88 88 self.sopener = self.store.opener
89 89 self.sjoin = self.store.join
90 90 self.opener.createmode = self.store.createmode
91 91
92 92 self.tagscache = None
93 93 self._tagstypecache = None
94 94 self.branchcache = None
95 95 self._ubranchcache = None # UTF-8 version of branchcache
96 96 self._branchcachetip = None
97 97 self.nodetagscache = None
98 98 self.filterpats = {}
99 99 self._datafilters = {}
100 100 self._transref = self._lockref = self._wlockref = None
101 101
102 102 @propertycache
103 103 def changelog(self):
104 104 c = changelog.changelog(self.sopener)
105 105 if 'HG_PENDING' in os.environ:
106 106 p = os.environ['HG_PENDING']
107 107 if p.startswith(self.root):
108 108 c.readpending('00changelog.i.a')
109 109 self.sopener.defversion = c.version
110 110 return c
111 111
112 112 @propertycache
113 113 def manifest(self):
114 114 return manifest.manifest(self.sopener)
115 115
116 116 @propertycache
117 117 def dirstate(self):
118 118 return dirstate.dirstate(self.opener, self.ui, self.root)
119 119
120 120 def __getitem__(self, changeid):
121 121 if changeid is None:
122 122 return context.workingctx(self)
123 123 return context.changectx(self, changeid)
124 124
125 125 def __nonzero__(self):
126 126 return True
127 127
128 128 def __len__(self):
129 129 return len(self.changelog)
130 130
131 131 def __iter__(self):
132 132 for i in xrange(len(self)):
133 133 yield i
134 134
135 135 def url(self):
136 136 return 'file:' + self.root
137 137
138 138 def hook(self, name, throw=False, **args):
139 139 return hook.hook(self.ui, self, name, throw, **args)
140 140
141 141 tag_disallowed = ':\r\n'
142 142
143 143 def _tag(self, names, node, message, local, user, date, extra={}):
144 144 if isinstance(names, str):
145 145 allchars = names
146 146 names = (names,)
147 147 else:
148 148 allchars = ''.join(names)
149 149 for c in self.tag_disallowed:
150 150 if c in allchars:
151 151 raise util.Abort(_('%r cannot be used in a tag name') % c)
152 152
153 153 for name in names:
154 154 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 155 local=local)
156 156
157 157 def writetags(fp, names, munge, prevtags):
158 158 fp.seek(0, 2)
159 159 if prevtags and prevtags[-1] != '\n':
160 160 fp.write('\n')
161 161 for name in names:
162 162 m = munge and munge(name) or name
163 163 if self._tagstypecache and name in self._tagstypecache:
164 164 old = self.tagscache.get(name, nullid)
165 165 fp.write('%s %s\n' % (hex(old), m))
166 166 fp.write('%s %s\n' % (hex(node), m))
167 167 fp.close()
168 168
169 169 prevtags = ''
170 170 if local:
171 171 try:
172 172 fp = self.opener('localtags', 'r+')
173 173 except IOError:
174 174 fp = self.opener('localtags', 'a')
175 175 else:
176 176 prevtags = fp.read()
177 177
178 178 # local tags are stored in the current charset
179 179 writetags(fp, names, None, prevtags)
180 180 for name in names:
181 181 self.hook('tag', node=hex(node), tag=name, local=local)
182 182 return
183 183
184 184 try:
185 185 fp = self.wfile('.hgtags', 'rb+')
186 186 except IOError:
187 187 fp = self.wfile('.hgtags', 'ab')
188 188 else:
189 189 prevtags = fp.read()
190 190
191 191 # committed tags are stored in UTF-8
192 192 writetags(fp, names, encoding.fromlocal, prevtags)
193 193
194 194 if '.hgtags' not in self.dirstate:
195 195 self.add(['.hgtags'])
196 196
197 197 m = match_.exact(self.root, '', ['.hgtags'])
198 198 tagnode = self.commit(message, user, date, extra=extra, match=m)
199 199
200 200 for name in names:
201 201 self.hook('tag', node=hex(node), tag=name, local=local)
202 202
203 203 return tagnode
204 204
205 205 def tag(self, names, node, message, local, user, date):
206 206 '''tag a revision with one or more symbolic names.
207 207
208 208 names is a list of strings or, when adding a single tag, names may be a
209 209 string.
210 210
211 211 if local is True, the tags are stored in a per-repository file.
212 212 otherwise, they are stored in the .hgtags file, and a new
213 213 changeset is committed with the change.
214 214
215 215 keyword arguments:
216 216
217 217 local: whether to store tags in non-version-controlled file
218 218 (default False)
219 219
220 220 message: commit message to use if committing
221 221
222 222 user: name of user to use if committing
223 223
224 224 date: date tuple to use if committing'''
225 225
226 226 for x in self.status()[:5]:
227 227 if '.hgtags' in x:
228 228 raise util.Abort(_('working copy of .hgtags is changed '
229 229 '(please commit .hgtags manually)'))
230 230
231 231 self.tags() # instantiate the cache
232 232 self._tag(names, node, message, local, user, date)
233 233
234 234 def tags(self):
235 235 '''return a mapping of tag to node'''
236 236 if self.tagscache:
237 237 return self.tagscache
238 238
239 239 globaltags = {}
240 240 tagtypes = {}
241 241
242 242 def readtags(lines, fn, tagtype):
243 243 filetags = {}
244 244 count = 0
245 245
246 246 def warn(msg):
247 247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248 248
249 249 for l in lines:
250 250 count += 1
251 251 if not l:
252 252 continue
253 253 s = l.split(" ", 1)
254 254 if len(s) != 2:
255 255 warn(_("cannot parse entry"))
256 256 continue
257 257 node, key = s
258 258 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 259 try:
260 260 bin_n = bin(node)
261 261 except TypeError:
262 262 warn(_("node '%s' is not well formed") % node)
263 263 continue
264 264 if bin_n not in self.changelog.nodemap:
265 265 warn(_("tag '%s' refers to unknown node") % key)
266 266 continue
267 267
268 268 h = []
269 269 if key in filetags:
270 270 n, h = filetags[key]
271 271 h.append(n)
272 272 filetags[key] = (bin_n, h)
273 273
274 274 for k, nh in filetags.iteritems():
275 275 if k not in globaltags:
276 276 globaltags[k] = nh
277 277 tagtypes[k] = tagtype
278 278 continue
279 279
280 280 # we prefer the global tag if:
281 281 # it supercedes us OR
282 282 # mutual supercedes and it has a higher rank
283 283 # otherwise we win because we're tip-most
284 284 an, ah = nh
285 285 bn, bh = globaltags[k]
286 286 if (bn != an and an in bh and
287 287 (bn not in ah or len(bh) > len(ah))):
288 288 an = bn
289 289 ah.extend([n for n in bh if n not in ah])
290 290 globaltags[k] = an, ah
291 291 tagtypes[k] = tagtype
292 292
293 293 def tagnodes():
294 last = {}
294 seen = set()
295 295 ret = []
296 for node in reversed(self.heads()):
296 for node in self.heads():
297 297 c = self[node]
298 298 try:
299 299 fnode = c.filenode('.hgtags')
300 300 except error.LookupError:
301 301 continue
302 ret.append((node, fnode))
303 if fnode in last:
304 ret[last[fnode]] = None
305 last[fnode] = len(ret) - 1
306 return [item for item in ret if item]
302 if fnode not in seen:
303 ret.append((node, fnode))
304 seen.add(fnode)
305 return reversed(ret)
307 306
308 307 # read the tags file from each head, ending with the tip
309 308 f = None
310 309 for node, fnode in tagnodes():
311 310 f = (f and f.filectx(fnode) or
312 311 self.filectx('.hgtags', fileid=fnode))
313 312 readtags(f.data().splitlines(), f, "global")
314 313
315 314 try:
316 315 data = encoding.fromlocal(self.opener("localtags").read())
317 316 # localtags are stored in the local character set
318 317 # while the internal tag table is stored in UTF-8
319 318 readtags(data.splitlines(), "localtags", "local")
320 319 except IOError:
321 320 pass
322 321
323 322 self.tagscache = {}
324 323 self._tagstypecache = {}
325 324 for k, nh in globaltags.iteritems():
326 325 n = nh[0]
327 326 if n != nullid:
328 327 self.tagscache[k] = n
329 328 self._tagstypecache[k] = tagtypes[k]
330 329 self.tagscache['tip'] = self.changelog.tip()
331 330 return self.tagscache
332 331
333 332 def tagtype(self, tagname):
334 333 '''
335 334 return the type of the given tag. result can be:
336 335
337 336 'local' : a local tag
338 337 'global' : a global tag
339 338 None : tag does not exist
340 339 '''
341 340
342 341 self.tags()
343 342
344 343 return self._tagstypecache.get(tagname)
345 344
346 345 def tagslist(self):
347 346 '''return a list of tags ordered by revision'''
348 347 l = []
349 348 for t, n in self.tags().iteritems():
350 349 try:
351 350 r = self.changelog.rev(n)
352 351 except:
353 352 r = -2 # sort to the beginning of the list if unknown
354 353 l.append((r, t, n))
355 354 return [(t, n) for r, t, n in sorted(l)]
356 355
357 356 def nodetags(self, node):
358 357 '''return the tags associated with a node'''
359 358 if not self.nodetagscache:
360 359 self.nodetagscache = {}
361 360 for t, n in self.tags().iteritems():
362 361 self.nodetagscache.setdefault(n, []).append(t)
363 362 return self.nodetagscache.get(node, [])
364 363
365 364 def _branchtags(self, partial, lrev):
366 365 # TODO: rename this function?
367 366 tiprev = len(self) - 1
368 367 if lrev != tiprev:
369 368 self._updatebranchcache(partial, lrev+1, tiprev+1)
370 369 self._writebranchcache(partial, self.changelog.tip(), tiprev)
371 370
372 371 return partial
373 372
374 373 def branchmap(self):
375 374 tip = self.changelog.tip()
376 375 if self.branchcache is not None and self._branchcachetip == tip:
377 376 return self.branchcache
378 377
379 378 oldtip = self._branchcachetip
380 379 self._branchcachetip = tip
381 380 if self.branchcache is None:
382 381 self.branchcache = {} # avoid recursion in changectx
383 382 else:
384 383 self.branchcache.clear() # keep using the same dict
385 384 if oldtip is None or oldtip not in self.changelog.nodemap:
386 385 partial, last, lrev = self._readbranchcache()
387 386 else:
388 387 lrev = self.changelog.rev(oldtip)
389 388 partial = self._ubranchcache
390 389
391 390 self._branchtags(partial, lrev)
392 391 # this private cache holds all heads (not just tips)
393 392 self._ubranchcache = partial
394 393
395 394 # the branch cache is stored on disk as UTF-8, but in the local
396 395 # charset internally
397 396 for k, v in partial.iteritems():
398 397 self.branchcache[encoding.tolocal(k)] = v
399 398 return self.branchcache
400 399
401 400
402 401 def branchtags(self):
403 402 '''return a dict where branch names map to the tipmost head of
404 403 the branch, open heads come before closed'''
405 404 bt = {}
406 405 for bn, heads in self.branchmap().iteritems():
407 406 head = None
408 407 for i in range(len(heads)-1, -1, -1):
409 408 h = heads[i]
410 409 if 'close' not in self.changelog.read(h)[5]:
411 410 head = h
412 411 break
413 412 # no open heads were found
414 413 if head is None:
415 414 head = heads[-1]
416 415 bt[bn] = head
417 416 return bt
418 417
419 418
420 419 def _readbranchcache(self):
421 420 partial = {}
422 421 try:
423 422 f = self.opener("branchheads.cache")
424 423 lines = f.read().split('\n')
425 424 f.close()
426 425 except (IOError, OSError):
427 426 return {}, nullid, nullrev
428 427
429 428 try:
430 429 last, lrev = lines.pop(0).split(" ", 1)
431 430 last, lrev = bin(last), int(lrev)
432 431 if lrev >= len(self) or self[lrev].node() != last:
433 432 # invalidate the cache
434 433 raise ValueError('invalidating branch cache (tip differs)')
435 434 for l in lines:
436 435 if not l: continue
437 436 node, label = l.split(" ", 1)
438 437 partial.setdefault(label.strip(), []).append(bin(node))
439 438 except KeyboardInterrupt:
440 439 raise
441 440 except Exception, inst:
442 441 if self.ui.debugflag:
443 442 self.ui.warn(str(inst), '\n')
444 443 partial, last, lrev = {}, nullid, nullrev
445 444 return partial, last, lrev
446 445
447 446 def _writebranchcache(self, branches, tip, tiprev):
448 447 try:
449 448 f = self.opener("branchheads.cache", "w", atomictemp=True)
450 449 f.write("%s %s\n" % (hex(tip), tiprev))
451 450 for label, nodes in branches.iteritems():
452 451 for node in nodes:
453 452 f.write("%s %s\n" % (hex(node), label))
454 453 f.rename()
455 454 except (IOError, OSError):
456 455 pass
457 456
458 457 def _updatebranchcache(self, partial, start, end):
459 458 for r in xrange(start, end):
460 459 c = self[r]
461 460 b = c.branch()
462 461 bheads = partial.setdefault(b, [])
463 462 bheads.append(c.node())
464 463 for p in c.parents():
465 464 pn = p.node()
466 465 if pn in bheads:
467 466 bheads.remove(pn)
468 467
469 468 def lookup(self, key):
470 469 if isinstance(key, int):
471 470 return self.changelog.node(key)
472 471 elif key == '.':
473 472 return self.dirstate.parents()[0]
474 473 elif key == 'null':
475 474 return nullid
476 475 elif key == 'tip':
477 476 return self.changelog.tip()
478 477 n = self.changelog._match(key)
479 478 if n:
480 479 return n
481 480 if key in self.tags():
482 481 return self.tags()[key]
483 482 if key in self.branchtags():
484 483 return self.branchtags()[key]
485 484 n = self.changelog._partialmatch(key)
486 485 if n:
487 486 return n
488 487
489 488 # can't find key, check if it might have come from damaged dirstate
490 489 if key in self.dirstate.parents():
491 490 raise error.Abort(_("working directory has unknown parent '%s'!")
492 491 % short(key))
493 492 try:
494 493 if len(key) == 20:
495 494 key = hex(key)
496 495 except:
497 496 pass
498 497 raise error.RepoError(_("unknown revision '%s'") % key)
499 498
500 499 def local(self):
501 500 return True
502 501
503 502 def join(self, f):
504 503 return os.path.join(self.path, f)
505 504
506 505 def wjoin(self, f):
507 506 return os.path.join(self.root, f)
508 507
509 508 def rjoin(self, f):
510 509 return os.path.join(self.root, util.pconvert(f))
511 510
512 511 def file(self, f):
513 512 if f[0] == '/':
514 513 f = f[1:]
515 514 return filelog.filelog(self.sopener, f)
516 515
517 516 def changectx(self, changeid):
518 517 return self[changeid]
519 518
520 519 def parents(self, changeid=None):
521 520 '''get list of changectxs for parents of changeid'''
522 521 return self[changeid].parents()
523 522
524 523 def filectx(self, path, changeid=None, fileid=None):
525 524 """changeid can be a changeset revision, node, or tag.
526 525 fileid can be a file revision or node."""
527 526 return context.filectx(self, path, changeid, fileid)
528 527
529 528 def getcwd(self):
530 529 return self.dirstate.getcwd()
531 530
532 531 def pathto(self, f, cwd=None):
533 532 return self.dirstate.pathto(f, cwd)
534 533
535 534 def wfile(self, f, mode='r'):
536 535 return self.wopener(f, mode)
537 536
538 537 def _link(self, f):
539 538 return os.path.islink(self.wjoin(f))
540 539
541 540 def _filter(self, filter, filename, data):
542 541 if filter not in self.filterpats:
543 542 l = []
544 543 for pat, cmd in self.ui.configitems(filter):
545 544 if cmd == '!':
546 545 continue
547 546 mf = match_.match(self.root, '', [pat])
548 547 fn = None
549 548 params = cmd
550 549 for name, filterfn in self._datafilters.iteritems():
551 550 if cmd.startswith(name):
552 551 fn = filterfn
553 552 params = cmd[len(name):].lstrip()
554 553 break
555 554 if not fn:
556 555 fn = lambda s, c, **kwargs: util.filter(s, c)
557 556 # Wrap old filters not supporting keyword arguments
558 557 if not inspect.getargspec(fn)[2]:
559 558 oldfn = fn
560 559 fn = lambda s, c, **kwargs: oldfn(s, c)
561 560 l.append((mf, fn, params))
562 561 self.filterpats[filter] = l
563 562
564 563 for mf, fn, cmd in self.filterpats[filter]:
565 564 if mf(filename):
566 565 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
567 566 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
568 567 break
569 568
570 569 return data
571 570
572 571 def adddatafilter(self, name, filter):
573 572 self._datafilters[name] = filter
574 573
575 574 def wread(self, filename):
576 575 if self._link(filename):
577 576 data = os.readlink(self.wjoin(filename))
578 577 else:
579 578 data = self.wopener(filename, 'r').read()
580 579 return self._filter("encode", filename, data)
581 580
582 581 def wwrite(self, filename, data, flags):
583 582 data = self._filter("decode", filename, data)
584 583 try:
585 584 os.unlink(self.wjoin(filename))
586 585 except OSError:
587 586 pass
588 587 if 'l' in flags:
589 588 self.wopener.symlink(data, filename)
590 589 else:
591 590 self.wopener(filename, 'w').write(data)
592 591 if 'x' in flags:
593 592 util.set_flags(self.wjoin(filename), False, True)
594 593
595 594 def wwritedata(self, filename, data):
596 595 return self._filter("decode", filename, data)
597 596
598 597 def transaction(self):
599 598 tr = self._transref and self._transref() or None
600 599 if tr and tr.running():
601 600 return tr.nest()
602 601
603 602 # abort here if the journal already exists
604 603 if os.path.exists(self.sjoin("journal")):
605 604 raise error.RepoError(_("journal already exists - run hg recover"))
606 605
607 606 # save dirstate for rollback
608 607 try:
609 608 ds = self.opener("dirstate").read()
610 609 except IOError:
611 610 ds = ""
612 611 self.opener("journal.dirstate", "w").write(ds)
613 612 self.opener("journal.branch", "w").write(self.dirstate.branch())
614 613
615 614 renames = [(self.sjoin("journal"), self.sjoin("undo")),
616 615 (self.join("journal.dirstate"), self.join("undo.dirstate")),
617 616 (self.join("journal.branch"), self.join("undo.branch"))]
618 617 tr = transaction.transaction(self.ui.warn, self.sopener,
619 618 self.sjoin("journal"),
620 619 aftertrans(renames),
621 620 self.store.createmode)
622 621 self._transref = weakref.ref(tr)
623 622 return tr
624 623
625 624 def recover(self):
626 625 lock = self.lock()
627 626 try:
628 627 if os.path.exists(self.sjoin("journal")):
629 628 self.ui.status(_("rolling back interrupted transaction\n"))
630 629 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
631 630 self.invalidate()
632 631 return True
633 632 else:
634 633 self.ui.warn(_("no interrupted transaction available\n"))
635 634 return False
636 635 finally:
637 636 lock.release()
638 637
639 638 def rollback(self):
640 639 wlock = lock = None
641 640 try:
642 641 wlock = self.wlock()
643 642 lock = self.lock()
644 643 if os.path.exists(self.sjoin("undo")):
645 644 self.ui.status(_("rolling back last transaction\n"))
646 645 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
647 646 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
648 647 try:
649 648 branch = self.opener("undo.branch").read()
650 649 self.dirstate.setbranch(branch)
651 650 except IOError:
652 651 self.ui.warn(_("Named branch could not be reset, "
653 652 "current branch still is: %s\n")
654 653 % encoding.tolocal(self.dirstate.branch()))
655 654 self.invalidate()
656 655 self.dirstate.invalidate()
657 656 else:
658 657 self.ui.warn(_("no rollback information available\n"))
659 658 finally:
660 659 release(lock, wlock)
661 660
662 661 def invalidate(self):
663 662 for a in "changelog manifest".split():
664 663 if a in self.__dict__:
665 664 delattr(self, a)
666 665 self.tagscache = None
667 666 self._tagstypecache = None
668 667 self.nodetagscache = None
669 668 self.branchcache = None
670 669 self._ubranchcache = None
671 670 self._branchcachetip = None
672 671
673 672 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
674 673 try:
675 674 l = lock.lock(lockname, 0, releasefn, desc=desc)
676 675 except error.LockHeld, inst:
677 676 if not wait:
678 677 raise
679 678 self.ui.warn(_("waiting for lock on %s held by %r\n") %
680 679 (desc, inst.locker))
681 680 # default to 600 seconds timeout
682 681 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
683 682 releasefn, desc=desc)
684 683 if acquirefn:
685 684 acquirefn()
686 685 return l
687 686
688 687 def lock(self, wait=True):
689 688 l = self._lockref and self._lockref()
690 689 if l is not None and l.held:
691 690 l.lock()
692 691 return l
693 692
694 693 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
695 694 _('repository %s') % self.origroot)
696 695 self._lockref = weakref.ref(l)
697 696 return l
698 697
699 698 def wlock(self, wait=True):
700 699 l = self._wlockref and self._wlockref()
701 700 if l is not None and l.held:
702 701 l.lock()
703 702 return l
704 703
705 704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 705 self.dirstate.invalidate, _('working directory of %s') %
707 706 self.origroot)
708 707 self._wlockref = weakref.ref(l)
709 708 return l
710 709
711 710 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 711 """
713 712 commit an individual file as part of a larger transaction
714 713 """
715 714
716 715 fname = fctx.path()
717 716 text = fctx.data()
718 717 flog = self.file(fname)
719 718 fparent1 = manifest1.get(fname, nullid)
720 719 fparent2 = fparent2o = manifest2.get(fname, nullid)
721 720
722 721 meta = {}
723 722 copy = fctx.renamed()
724 723 if copy and copy[0] != fname:
725 724 # Mark the new revision of this file as a copy of another
726 725 # file. This copy data will effectively act as a parent
727 726 # of this new revision. If this is a merge, the first
728 727 # parent will be the nullid (meaning "look up the copy data")
729 728 # and the second one will be the other parent. For example:
730 729 #
731 730 # 0 --- 1 --- 3 rev1 changes file foo
732 731 # \ / rev2 renames foo to bar and changes it
733 732 # \- 2 -/ rev3 should have bar with all changes and
734 733 # should record that bar descends from
735 734 # bar in rev2 and foo in rev1
736 735 #
737 736 # this allows this merge to succeed:
738 737 #
739 738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 739 # \ / merging rev3 and rev4 should use bar@rev2
741 740 # \- 2 --- 4 as the merge base
742 741 #
743 742
744 743 cfname = copy[0]
745 744 crev = manifest1.get(cfname)
746 745 newfparent = fparent2
747 746
748 747 if manifest2: # branch merge
749 748 if fparent2 == nullid or crev is None: # copied on remote side
750 749 if cfname in manifest2:
751 750 crev = manifest2[cfname]
752 751 newfparent = fparent1
753 752
754 753 # find source in nearest ancestor if we've lost track
755 754 if not crev:
756 755 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
757 756 (fname, cfname))
758 757 for ancestor in self['.'].ancestors():
759 758 if cfname in ancestor:
760 759 crev = ancestor[cfname].filenode()
761 760 break
762 761
763 762 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
764 763 meta["copy"] = cfname
765 764 meta["copyrev"] = hex(crev)
766 765 fparent1, fparent2 = nullid, newfparent
767 766 elif fparent2 != nullid:
768 767 # is one parent an ancestor of the other?
769 768 fparentancestor = flog.ancestor(fparent1, fparent2)
770 769 if fparentancestor == fparent1:
771 770 fparent1, fparent2 = fparent2, nullid
772 771 elif fparentancestor == fparent2:
773 772 fparent2 = nullid
774 773
775 774 # is the file changed?
776 775 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 776 changelist.append(fname)
778 777 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779 778
780 779 # are just the flags changed during merge?
781 780 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 781 changelist.append(fname)
783 782
784 783 return fparent1
785 784
786 785 def commit(self, text="", user=None, date=None, match=None, force=False,
787 786 editor=False, extra={}):
788 787 """Add a new revision to current repository.
789 788
790 789 Revision information is gathered from the working directory,
791 790 match can be used to filter the committed files. If editor is
792 791 supplied, it is called to get a commit message.
793 792 """
794 793
795 794 def fail(f, msg):
796 795 raise util.Abort('%s: %s' % (f, msg))
797 796
798 797 if not match:
799 798 match = match_.always(self.root, '')
800 799
801 800 if not force:
802 801 vdirs = []
803 802 match.dir = vdirs.append
804 803 match.bad = fail
805 804
806 805 wlock = self.wlock()
807 806 try:
808 807 p1, p2 = self.dirstate.parents()
809 808 wctx = self[None]
810 809
811 810 if (not force and p2 != nullid and match and
812 811 (match.files() or match.anypats())):
813 812 raise util.Abort(_('cannot partially commit a merge '
814 813 '(do not specify files or patterns)'))
815 814
816 815 changes = self.status(match=match, clean=force)
817 816 if force:
818 817 changes[0].extend(changes[6]) # mq may commit unchanged files
819 818
820 819 # check subrepos
821 820 subs = []
822 821 for s in wctx.substate:
823 822 if match(s) and wctx.sub(s).dirty():
824 823 subs.append(s)
825 824 if subs and '.hgsubstate' not in changes[0]:
826 825 changes[0].insert(0, '.hgsubstate')
827 826
828 827 # make sure all explicit patterns are matched
829 828 if not force and match.files():
830 829 matched = set(changes[0] + changes[1] + changes[2])
831 830
832 831 for f in match.files():
833 832 if f == '.' or f in matched or f in wctx.substate:
834 833 continue
835 834 if f in changes[3]: # missing
836 835 fail(f, _('file not found!'))
837 836 if f in vdirs: # visited directory
838 837 d = f + '/'
839 838 for mf in matched:
840 839 if mf.startswith(d):
841 840 break
842 841 else:
843 842 fail(f, _("no match under directory!"))
844 843 elif f not in self.dirstate:
845 844 fail(f, _("file not tracked!"))
846 845
847 846 if (not force and not extra.get("close") and p2 == nullid
848 847 and not (changes[0] or changes[1] or changes[2])
849 848 and self[None].branch() == self['.'].branch()):
850 849 self.ui.status(_("nothing changed\n"))
851 850 return None
852 851
853 852 ms = merge_.mergestate(self)
854 853 for f in changes[0]:
855 854 if f in ms and ms[f] == 'u':
856 855 raise util.Abort(_("unresolved merge conflicts "
857 856 "(see hg resolve)"))
858 857
859 858 cctx = context.workingctx(self, (p1, p2), text, user, date,
860 859 extra, changes)
861 860 if editor:
862 861 cctx._text = editor(self, cctx)
863 862
864 863 # commit subs
865 864 if subs:
866 865 state = wctx.substate.copy()
867 866 for s in subs:
868 867 self.ui.status(_('committing subrepository %s\n') % s)
869 868 sr = wctx.sub(s).commit(cctx._text, user, date)
870 869 state[s] = (state[s][0], sr)
871 870 subrepo.writestate(self, state)
872 871
873 872 ret = self.commitctx(cctx, True)
874 873
875 874 # update dirstate and mergestate
876 875 for f in changes[0] + changes[1]:
877 876 self.dirstate.normal(f)
878 877 for f in changes[2]:
879 878 self.dirstate.forget(f)
880 879 self.dirstate.setparents(ret)
881 880 ms.reset()
882 881
883 882 return ret
884 883
885 884 finally:
886 885 wlock.release()
887 886
888 887 def commitctx(self, ctx, error=False):
889 888 """Add a new revision to current repository.
890 889
891 890 Revision information is passed via the context argument.
892 891 """
893 892
894 893 tr = lock = None
895 894 removed = ctx.removed()
896 895 p1, p2 = ctx.p1(), ctx.p2()
897 896 m1 = p1.manifest().copy()
898 897 m2 = p2.manifest()
899 898 user = ctx.user()
900 899
901 900 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
902 901 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
903 902
904 903 lock = self.lock()
905 904 try:
906 905 tr = self.transaction()
907 906 trp = weakref.proxy(tr)
908 907
909 908 # check in files
910 909 new = {}
911 910 changed = []
912 911 linkrev = len(self)
913 912 for f in sorted(ctx.modified() + ctx.added()):
914 913 self.ui.note(f + "\n")
915 914 try:
916 915 fctx = ctx[f]
917 916 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
918 917 changed)
919 918 m1.set(f, fctx.flags())
920 919 except (OSError, IOError):
921 920 if error:
922 921 self.ui.warn(_("trouble committing %s!\n") % f)
923 922 raise
924 923 else:
925 924 removed.append(f)
926 925
927 926 # update manifest
928 927 m1.update(new)
929 928 removed = [f for f in sorted(removed) if f in m1 or f in m2]
930 929 drop = [f for f in removed if f in m1]
931 930 for f in drop:
932 931 del m1[f]
933 932 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
934 933 p2.manifestnode(), (new, drop))
935 934
936 935 # update changelog
937 936 self.changelog.delayupdate()
938 937 n = self.changelog.add(mn, changed + removed, ctx.description(),
939 938 trp, p1.node(), p2.node(),
940 939 user, ctx.date(), ctx.extra().copy())
941 940 p = lambda: self.changelog.writepending() and self.root or ""
942 941 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
943 942 parent2=xp2, pending=p)
944 943 self.changelog.finalize(trp)
945 944 tr.close()
946 945
947 946 if self.branchcache:
948 947 self.branchtags()
949 948
950 949 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
951 950 return n
952 951 finally:
953 952 del tr
954 953 lock.release()
955 954
956 955 def walk(self, match, node=None):
957 956 '''
958 957 walk recursively through the directory tree or a given
959 958 changeset, finding all files matched by the match
960 959 function
961 960 '''
962 961 return self[node].walk(match)
963 962
964 963 def status(self, node1='.', node2=None, match=None,
965 964 ignored=False, clean=False, unknown=False):
966 965 """return status of files between two nodes or node and working directory
967 966
968 967 If node1 is None, use the first dirstate parent instead.
969 968 If node2 is None, compare node1 with working directory.
970 969 """
971 970
972 971 def mfmatches(ctx):
973 972 mf = ctx.manifest().copy()
974 973 for fn in mf.keys():
975 974 if not match(fn):
976 975 del mf[fn]
977 976 return mf
978 977
979 978 if isinstance(node1, context.changectx):
980 979 ctx1 = node1
981 980 else:
982 981 ctx1 = self[node1]
983 982 if isinstance(node2, context.changectx):
984 983 ctx2 = node2
985 984 else:
986 985 ctx2 = self[node2]
987 986
988 987 working = ctx2.rev() is None
989 988 parentworking = working and ctx1 == self['.']
990 989 match = match or match_.always(self.root, self.getcwd())
991 990 listignored, listclean, listunknown = ignored, clean, unknown
992 991
993 992 # load earliest manifest first for caching reasons
994 993 if not working and ctx2.rev() < ctx1.rev():
995 994 ctx2.manifest()
996 995
997 996 if not parentworking:
998 997 def bad(f, msg):
999 998 if f not in ctx1:
1000 999 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1001 1000 match.bad = bad
1002 1001
1003 1002 if working: # we need to scan the working dir
1004 1003 s = self.dirstate.status(match, listignored, listclean, listunknown)
1005 1004 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1006 1005
1007 1006 # check for any possibly clean files
1008 1007 if parentworking and cmp:
1009 1008 fixup = []
1010 1009 # do a full compare of any files that might have changed
1011 1010 for f in sorted(cmp):
1012 1011 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1013 1012 or ctx1[f].cmp(ctx2[f].data())):
1014 1013 modified.append(f)
1015 1014 else:
1016 1015 fixup.append(f)
1017 1016
1018 1017 if listclean:
1019 1018 clean += fixup
1020 1019
1021 1020 # update dirstate for files that are actually clean
1022 1021 if fixup:
1023 1022 try:
1024 1023 # updating the dirstate is optional
1025 1024 # so we don't wait on the lock
1026 1025 wlock = self.wlock(False)
1027 1026 try:
1028 1027 for f in fixup:
1029 1028 self.dirstate.normal(f)
1030 1029 finally:
1031 1030 wlock.release()
1032 1031 except error.LockError:
1033 1032 pass
1034 1033
1035 1034 if not parentworking:
1036 1035 mf1 = mfmatches(ctx1)
1037 1036 if working:
1038 1037 # we are comparing working dir against non-parent
1039 1038 # generate a pseudo-manifest for the working dir
1040 1039 mf2 = mfmatches(self['.'])
1041 1040 for f in cmp + modified + added:
1042 1041 mf2[f] = None
1043 1042 mf2.set(f, ctx2.flags(f))
1044 1043 for f in removed:
1045 1044 if f in mf2:
1046 1045 del mf2[f]
1047 1046 else:
1048 1047 # we are comparing two revisions
1049 1048 deleted, unknown, ignored = [], [], []
1050 1049 mf2 = mfmatches(ctx2)
1051 1050
1052 1051 modified, added, clean = [], [], []
1053 1052 for fn in mf2:
1054 1053 if fn in mf1:
1055 1054 if (mf1.flags(fn) != mf2.flags(fn) or
1056 1055 (mf1[fn] != mf2[fn] and
1057 1056 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1058 1057 modified.append(fn)
1059 1058 elif listclean:
1060 1059 clean.append(fn)
1061 1060 del mf1[fn]
1062 1061 else:
1063 1062 added.append(fn)
1064 1063 removed = mf1.keys()
1065 1064
1066 1065 r = modified, added, removed, deleted, unknown, ignored, clean
1067 1066 [l.sort() for l in r]
1068 1067 return r
1069 1068
1070 1069 def add(self, list):
1071 1070 wlock = self.wlock()
1072 1071 try:
1073 1072 rejected = []
1074 1073 for f in list:
1075 1074 p = self.wjoin(f)
1076 1075 try:
1077 1076 st = os.lstat(p)
1078 1077 except:
1079 1078 self.ui.warn(_("%s does not exist!\n") % f)
1080 1079 rejected.append(f)
1081 1080 continue
1082 1081 if st.st_size > 10000000:
1083 1082 self.ui.warn(_("%s: files over 10MB may cause memory and"
1084 1083 " performance problems\n"
1085 1084 "(use 'hg revert %s' to unadd the file)\n")
1086 1085 % (f, f))
1087 1086 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1088 1087 self.ui.warn(_("%s not added: only files and symlinks "
1089 1088 "supported currently\n") % f)
1090 1089 rejected.append(p)
1091 1090 elif self.dirstate[f] in 'amn':
1092 1091 self.ui.warn(_("%s already tracked!\n") % f)
1093 1092 elif self.dirstate[f] == 'r':
1094 1093 self.dirstate.normallookup(f)
1095 1094 else:
1096 1095 self.dirstate.add(f)
1097 1096 return rejected
1098 1097 finally:
1099 1098 wlock.release()
1100 1099
1101 1100 def forget(self, list):
1102 1101 wlock = self.wlock()
1103 1102 try:
1104 1103 for f in list:
1105 1104 if self.dirstate[f] != 'a':
1106 1105 self.ui.warn(_("%s not added!\n") % f)
1107 1106 else:
1108 1107 self.dirstate.forget(f)
1109 1108 finally:
1110 1109 wlock.release()
1111 1110
1112 1111 def remove(self, list, unlink=False):
1113 1112 if unlink:
1114 1113 for f in list:
1115 1114 try:
1116 1115 util.unlink(self.wjoin(f))
1117 1116 except OSError, inst:
1118 1117 if inst.errno != errno.ENOENT:
1119 1118 raise
1120 1119 wlock = self.wlock()
1121 1120 try:
1122 1121 for f in list:
1123 1122 if unlink and os.path.exists(self.wjoin(f)):
1124 1123 self.ui.warn(_("%s still exists!\n") % f)
1125 1124 elif self.dirstate[f] == 'a':
1126 1125 self.dirstate.forget(f)
1127 1126 elif f not in self.dirstate:
1128 1127 self.ui.warn(_("%s not tracked!\n") % f)
1129 1128 else:
1130 1129 self.dirstate.remove(f)
1131 1130 finally:
1132 1131 wlock.release()
1133 1132
1134 1133 def undelete(self, list):
1135 1134 manifests = [self.manifest.read(self.changelog.read(p)[0])
1136 1135 for p in self.dirstate.parents() if p != nullid]
1137 1136 wlock = self.wlock()
1138 1137 try:
1139 1138 for f in list:
1140 1139 if self.dirstate[f] != 'r':
1141 1140 self.ui.warn(_("%s not removed!\n") % f)
1142 1141 else:
1143 1142 m = f in manifests[0] and manifests[0] or manifests[1]
1144 1143 t = self.file(f).read(m[f])
1145 1144 self.wwrite(f, t, m.flags(f))
1146 1145 self.dirstate.normal(f)
1147 1146 finally:
1148 1147 wlock.release()
1149 1148
1150 1149 def copy(self, source, dest):
1151 1150 p = self.wjoin(dest)
1152 1151 if not (os.path.exists(p) or os.path.islink(p)):
1153 1152 self.ui.warn(_("%s does not exist!\n") % dest)
1154 1153 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 1154 self.ui.warn(_("copy failed: %s is not a file or a "
1156 1155 "symbolic link\n") % dest)
1157 1156 else:
1158 1157 wlock = self.wlock()
1159 1158 try:
1160 1159 if self.dirstate[dest] in '?r':
1161 1160 self.dirstate.add(dest)
1162 1161 self.dirstate.copy(source, dest)
1163 1162 finally:
1164 1163 wlock.release()
1165 1164
1166 1165 def heads(self, start=None):
1167 1166 heads = self.changelog.heads(start)
1168 1167 # sort the output in rev descending order
1169 1168 heads = [(-self.changelog.rev(h), h) for h in heads]
1170 1169 return [n for (r, n) in sorted(heads)]
1171 1170
1172 1171 def branchheads(self, branch=None, start=None, closed=False):
1173 1172 if branch is None:
1174 1173 branch = self[None].branch()
1175 1174 branches = self.branchmap()
1176 1175 if branch not in branches:
1177 1176 return []
1178 1177 bheads = branches[branch]
1179 1178 # the cache returns heads ordered lowest to highest
1180 1179 bheads.reverse()
1181 1180 if start is not None:
1182 1181 # filter out the heads that cannot be reached from startrev
1183 1182 bheads = self.changelog.nodesbetween([start], bheads)[2]
1184 1183 if not closed:
1185 1184 bheads = [h for h in bheads if
1186 1185 ('close' not in self.changelog.read(h)[5])]
1187 1186 return bheads
1188 1187
1189 1188 def branches(self, nodes):
1190 1189 if not nodes:
1191 1190 nodes = [self.changelog.tip()]
1192 1191 b = []
1193 1192 for n in nodes:
1194 1193 t = n
1195 1194 while 1:
1196 1195 p = self.changelog.parents(n)
1197 1196 if p[1] != nullid or p[0] == nullid:
1198 1197 b.append((t, n, p[0], p[1]))
1199 1198 break
1200 1199 n = p[0]
1201 1200 return b
1202 1201
1203 1202 def between(self, pairs):
1204 1203 r = []
1205 1204
1206 1205 for top, bottom in pairs:
1207 1206 n, l, i = top, [], 0
1208 1207 f = 1
1209 1208
1210 1209 while n != bottom and n != nullid:
1211 1210 p = self.changelog.parents(n)[0]
1212 1211 if i == f:
1213 1212 l.append(n)
1214 1213 f = f * 2
1215 1214 n = p
1216 1215 i += 1
1217 1216
1218 1217 r.append(l)
1219 1218
1220 1219 return r
1221 1220
1222 1221 def findincoming(self, remote, base=None, heads=None, force=False):
1223 1222 """Return list of roots of the subsets of missing nodes from remote
1224 1223
1225 1224 If base dict is specified, assume that these nodes and their parents
1226 1225 exist on the remote side and that no child of a node of base exists
1227 1226 in both remote and self.
1228 1227 Furthermore base will be updated to include the nodes that exists
1229 1228 in self and remote but no children exists in self and remote.
1230 1229 If a list of heads is specified, return only nodes which are heads
1231 1230 or ancestors of these heads.
1232 1231
1233 1232 All the ancestors of base are in self and in remote.
1234 1233 All the descendants of the list returned are missing in self.
1235 1234 (and so we know that the rest of the nodes are missing in remote, see
1236 1235 outgoing)
1237 1236 """
1238 1237 return self.findcommonincoming(remote, base, heads, force)[1]
1239 1238
1240 1239 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1241 1240 """Return a tuple (common, missing roots, heads) used to identify
1242 1241 missing nodes from remote.
1243 1242
1244 1243 If base dict is specified, assume that these nodes and their parents
1245 1244 exist on the remote side and that no child of a node of base exists
1246 1245 in both remote and self.
1247 1246 Furthermore base will be updated to include the nodes that exists
1248 1247 in self and remote but no children exists in self and remote.
1249 1248 If a list of heads is specified, return only nodes which are heads
1250 1249 or ancestors of these heads.
1251 1250
1252 1251 All the ancestors of base are in self and in remote.
1253 1252 """
1254 1253 m = self.changelog.nodemap
1255 1254 search = []
1256 1255 fetch = set()
1257 1256 seen = set()
1258 1257 seenbranch = set()
1259 1258 if base is None:
1260 1259 base = {}
1261 1260
1262 1261 if not heads:
1263 1262 heads = remote.heads()
1264 1263
1265 1264 if self.changelog.tip() == nullid:
1266 1265 base[nullid] = 1
1267 1266 if heads != [nullid]:
1268 1267 return [nullid], [nullid], list(heads)
1269 1268 return [nullid], [], []
1270 1269
1271 1270 # assume we're closer to the tip than the root
1272 1271 # and start by examining the heads
1273 1272 self.ui.status(_("searching for changes\n"))
1274 1273
1275 1274 unknown = []
1276 1275 for h in heads:
1277 1276 if h not in m:
1278 1277 unknown.append(h)
1279 1278 else:
1280 1279 base[h] = 1
1281 1280
1282 1281 heads = unknown
1283 1282 if not unknown:
1284 1283 return base.keys(), [], []
1285 1284
1286 1285 req = set(unknown)
1287 1286 reqcnt = 0
1288 1287
1289 1288 # search through remote branches
1290 1289 # a 'branch' here is a linear segment of history, with four parts:
1291 1290 # head, root, first parent, second parent
1292 1291 # (a branch always has two parents (or none) by definition)
1293 1292 unknown = remote.branches(unknown)
1294 1293 while unknown:
1295 1294 r = []
1296 1295 while unknown:
1297 1296 n = unknown.pop(0)
1298 1297 if n[0] in seen:
1299 1298 continue
1300 1299
1301 1300 self.ui.debug(_("examining %s:%s\n")
1302 1301 % (short(n[0]), short(n[1])))
1303 1302 if n[0] == nullid: # found the end of the branch
1304 1303 pass
1305 1304 elif n in seenbranch:
1306 1305 self.ui.debug(_("branch already found\n"))
1307 1306 continue
1308 1307 elif n[1] and n[1] in m: # do we know the base?
1309 1308 self.ui.debug(_("found incomplete branch %s:%s\n")
1310 1309 % (short(n[0]), short(n[1])))
1311 1310 search.append(n[0:2]) # schedule branch range for scanning
1312 1311 seenbranch.add(n)
1313 1312 else:
1314 1313 if n[1] not in seen and n[1] not in fetch:
1315 1314 if n[2] in m and n[3] in m:
1316 1315 self.ui.debug(_("found new changeset %s\n") %
1317 1316 short(n[1]))
1318 1317 fetch.add(n[1]) # earliest unknown
1319 1318 for p in n[2:4]:
1320 1319 if p in m:
1321 1320 base[p] = 1 # latest known
1322 1321
1323 1322 for p in n[2:4]:
1324 1323 if p not in req and p not in m:
1325 1324 r.append(p)
1326 1325 req.add(p)
1327 1326 seen.add(n[0])
1328 1327
1329 1328 if r:
1330 1329 reqcnt += 1
1331 1330 self.ui.debug(_("request %d: %s\n") %
1332 1331 (reqcnt, " ".join(map(short, r))))
1333 1332 for p in xrange(0, len(r), 10):
1334 1333 for b in remote.branches(r[p:p+10]):
1335 1334 self.ui.debug(_("received %s:%s\n") %
1336 1335 (short(b[0]), short(b[1])))
1337 1336 unknown.append(b)
1338 1337
1339 1338 # do binary search on the branches we found
1340 1339 while search:
1341 1340 newsearch = []
1342 1341 reqcnt += 1
1343 1342 for n, l in zip(search, remote.between(search)):
1344 1343 l.append(n[1])
1345 1344 p = n[0]
1346 1345 f = 1
1347 1346 for i in l:
1348 1347 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1349 1348 if i in m:
1350 1349 if f <= 2:
1351 1350 self.ui.debug(_("found new branch changeset %s\n") %
1352 1351 short(p))
1353 1352 fetch.add(p)
1354 1353 base[i] = 1
1355 1354 else:
1356 1355 self.ui.debug(_("narrowed branch search to %s:%s\n")
1357 1356 % (short(p), short(i)))
1358 1357 newsearch.append((p, i))
1359 1358 break
1360 1359 p, f = i, f * 2
1361 1360 search = newsearch
1362 1361
1363 1362 # sanity check our fetch list
1364 1363 for f in fetch:
1365 1364 if f in m:
1366 1365 raise error.RepoError(_("already have changeset ")
1367 1366 + short(f[:4]))
1368 1367
1369 1368 if base.keys() == [nullid]:
1370 1369 if force:
1371 1370 self.ui.warn(_("warning: repository is unrelated\n"))
1372 1371 else:
1373 1372 raise util.Abort(_("repository is unrelated"))
1374 1373
1375 1374 self.ui.debug(_("found new changesets starting at ") +
1376 1375 " ".join([short(f) for f in fetch]) + "\n")
1377 1376
1378 1377 self.ui.debug(_("%d total queries\n") % reqcnt)
1379 1378
1380 1379 return base.keys(), list(fetch), heads
1381 1380
1382 1381 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 1382 """Return list of nodes that are roots of subsets not in remote
1384 1383
1385 1384 If base dict is specified, assume that these nodes and their parents
1386 1385 exist on the remote side.
1387 1386 If a list of heads is specified, return only nodes which are heads
1388 1387 or ancestors of these heads, and return a second element which
1389 1388 contains all remote heads which get new children.
1390 1389 """
1391 1390 if base is None:
1392 1391 base = {}
1393 1392 self.findincoming(remote, base, heads, force=force)
1394 1393
1395 1394 self.ui.debug(_("common changesets up to ")
1396 1395 + " ".join(map(short, base.keys())) + "\n")
1397 1396
1398 1397 remain = set(self.changelog.nodemap)
1399 1398
1400 1399 # prune everything remote has from the tree
1401 1400 remain.remove(nullid)
1402 1401 remove = base.keys()
1403 1402 while remove:
1404 1403 n = remove.pop(0)
1405 1404 if n in remain:
1406 1405 remain.remove(n)
1407 1406 for p in self.changelog.parents(n):
1408 1407 remove.append(p)
1409 1408
1410 1409 # find every node whose parents have been pruned
1411 1410 subset = []
1412 1411 # find every remote head that will get new children
1413 1412 updated_heads = set()
1414 1413 for n in remain:
1415 1414 p1, p2 = self.changelog.parents(n)
1416 1415 if p1 not in remain and p2 not in remain:
1417 1416 subset.append(n)
1418 1417 if heads:
1419 1418 if p1 in heads:
1420 1419 updated_heads.add(p1)
1421 1420 if p2 in heads:
1422 1421 updated_heads.add(p2)
1423 1422
1424 1423 # this is the set of all roots we have to push
1425 1424 if heads:
1426 1425 return subset, list(updated_heads)
1427 1426 else:
1428 1427 return subset
1429 1428
1430 1429 def pull(self, remote, heads=None, force=False):
1431 1430 lock = self.lock()
1432 1431 try:
1433 1432 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1434 1433 force=force)
1435 1434 if fetch == [nullid]:
1436 1435 self.ui.status(_("requesting all changes\n"))
1437 1436
1438 1437 if not fetch:
1439 1438 self.ui.status(_("no changes found\n"))
1440 1439 return 0
1441 1440
1442 1441 if heads is None and remote.capable('changegroupsubset'):
1443 1442 heads = rheads
1444 1443
1445 1444 if heads is None:
1446 1445 cg = remote.changegroup(fetch, 'pull')
1447 1446 else:
1448 1447 if not remote.capable('changegroupsubset'):
1449 1448 raise util.Abort(_("Partial pull cannot be done because "
1450 1449 "other repository doesn't support "
1451 1450 "changegroupsubset."))
1452 1451 cg = remote.changegroupsubset(fetch, heads, 'pull')
1453 1452 return self.addchangegroup(cg, 'pull', remote.url())
1454 1453 finally:
1455 1454 lock.release()
1456 1455
1457 1456 def push(self, remote, force=False, revs=None):
1458 1457 # there are two ways to push to remote repo:
1459 1458 #
1460 1459 # addchangegroup assumes local user can lock remote
1461 1460 # repo (local filesystem, old ssh servers).
1462 1461 #
1463 1462 # unbundle assumes local user cannot lock remote repo (new ssh
1464 1463 # servers, http servers).
1465 1464
1466 1465 if remote.capable('unbundle'):
1467 1466 return self.push_unbundle(remote, force, revs)
1468 1467 return self.push_addchangegroup(remote, force, revs)
1469 1468
1470 1469 def prepush(self, remote, force, revs):
1471 1470 common = {}
1472 1471 remote_heads = remote.heads()
1473 1472 inc = self.findincoming(remote, common, remote_heads, force=force)
1474 1473
1475 1474 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1476 1475 if revs is not None:
1477 1476 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1478 1477 else:
1479 1478 bases, heads = update, self.changelog.heads()
1480 1479
1481 1480 def checkbranch(lheads, rheads, updatelh):
1482 1481 '''
1483 1482 check whether there are more local heads than remote heads on
1484 1483 a specific branch.
1485 1484
1486 1485 lheads: local branch heads
1487 1486 rheads: remote branch heads
1488 1487 updatelh: outgoing local branch heads
1489 1488 '''
1490 1489
1491 1490 warn = 0
1492 1491
1493 1492 if not revs and len(lheads) > len(rheads):
1494 1493 warn = 1
1495 1494 else:
1496 1495 updatelheads = [self.changelog.heads(x, lheads)
1497 1496 for x in updatelh]
1498 1497 newheads = set(sum(updatelheads, [])) & set(lheads)
1499 1498
1500 1499 if not newheads:
1501 1500 return True
1502 1501
1503 1502 for r in rheads:
1504 1503 if r in self.changelog.nodemap:
1505 1504 desc = self.changelog.heads(r, heads)
1506 1505 l = [h for h in heads if h in desc]
1507 1506 if not l:
1508 1507 newheads.add(r)
1509 1508 else:
1510 1509 newheads.add(r)
1511 1510 if len(newheads) > len(rheads):
1512 1511 warn = 1
1513 1512
1514 1513 if warn:
1515 1514 if not rheads: # new branch requires --force
1516 1515 self.ui.warn(_("abort: push creates new"
1517 1516 " remote branch '%s'!\n" %
1518 1517 self[updatelh[0]].branch()))
1519 1518 else:
1520 1519 self.ui.warn(_("abort: push creates new remote heads!\n"))
1521 1520
1522 1521 self.ui.status(_("(did you forget to merge?"
1523 1522 " use push -f to force)\n"))
1524 1523 return False
1525 1524 return True
1526 1525
1527 1526 if not bases:
1528 1527 self.ui.status(_("no changes found\n"))
1529 1528 return None, 1
1530 1529 elif not force:
1531 1530 # Check for each named branch if we're creating new remote heads.
1532 1531 # To be a remote head after push, node must be either:
1533 1532 # - unknown locally
1534 1533 # - a local outgoing head descended from update
1535 1534 # - a remote head that's known locally and not
1536 1535 # ancestral to an outgoing head
1537 1536 #
1538 1537 # New named branches cannot be created without --force.
1539 1538
1540 1539 if remote_heads != [nullid]:
1541 1540 if remote.capable('branchmap'):
1542 1541 localhds = {}
1543 1542 if not revs:
1544 1543 localhds = self.branchmap()
1545 1544 else:
1546 1545 for n in heads:
1547 1546 branch = self[n].branch()
1548 1547 if branch in localhds:
1549 1548 localhds[branch].append(n)
1550 1549 else:
1551 1550 localhds[branch] = [n]
1552 1551
1553 1552 remotehds = remote.branchmap()
1554 1553
1555 1554 for lh in localhds:
1556 1555 if lh in remotehds:
1557 1556 rheads = remotehds[lh]
1558 1557 else:
1559 1558 rheads = []
1560 1559 lheads = localhds[lh]
1561 1560 updatelh = [upd for upd in update
1562 1561 if self[upd].branch() == lh]
1563 1562 if not updatelh:
1564 1563 continue
1565 1564 if not checkbranch(lheads, rheads, updatelh):
1566 1565 return None, 0
1567 1566 else:
1568 1567 if not checkbranch(heads, remote_heads, update):
1569 1568 return None, 0
1570 1569
1571 1570 if inc:
1572 1571 self.ui.warn(_("note: unsynced remote changes!\n"))
1573 1572
1574 1573
1575 1574 if revs is None:
1576 1575 # use the fast path, no race possible on push
1577 1576 cg = self._changegroup(common.keys(), 'push')
1578 1577 else:
1579 1578 cg = self.changegroupsubset(update, revs, 'push')
1580 1579 return cg, remote_heads
1581 1580
1582 1581 def push_addchangegroup(self, remote, force, revs):
1583 1582 lock = remote.lock()
1584 1583 try:
1585 1584 ret = self.prepush(remote, force, revs)
1586 1585 if ret[0] is not None:
1587 1586 cg, remote_heads = ret
1588 1587 return remote.addchangegroup(cg, 'push', self.url())
1589 1588 return ret[1]
1590 1589 finally:
1591 1590 lock.release()
1592 1591
1593 1592 def push_unbundle(self, remote, force, revs):
1594 1593 # local repo finds heads on server, finds out what revs it
1595 1594 # must push. once revs transferred, if server finds it has
1596 1595 # different heads (someone else won commit/push race), server
1597 1596 # aborts.
1598 1597
1599 1598 ret = self.prepush(remote, force, revs)
1600 1599 if ret[0] is not None:
1601 1600 cg, remote_heads = ret
1602 1601 if force: remote_heads = ['force']
1603 1602 return remote.unbundle(cg, remote_heads, 'push')
1604 1603 return ret[1]
1605 1604
1606 1605 def changegroupinfo(self, nodes, source):
1607 1606 if self.ui.verbose or source == 'bundle':
1608 1607 self.ui.status(_("%d changesets found\n") % len(nodes))
1609 1608 if self.ui.debugflag:
1610 1609 self.ui.debug(_("list of changesets:\n"))
1611 1610 for node in nodes:
1612 1611 self.ui.debug("%s\n" % hex(node))
1613 1612
1614 1613 def changegroupsubset(self, bases, heads, source, extranodes=None):
1615 1614 """This function generates a changegroup consisting of all the nodes
1616 1615 that are descendents of any of the bases, and ancestors of any of
1617 1616 the heads.
1618 1617
1619 1618 It is fairly complex as determining which filenodes and which
1620 1619 manifest nodes need to be included for the changeset to be complete
1621 1620 is non-trivial.
1622 1621
1623 1622 Another wrinkle is doing the reverse, figuring out which changeset in
1624 1623 the changegroup a particular filenode or manifestnode belongs to.
1625 1624
1626 1625 The caller can specify some nodes that must be included in the
1627 1626 changegroup using the extranodes argument. It should be a dict
1628 1627 where the keys are the filenames (or 1 for the manifest), and the
1629 1628 values are lists of (node, linknode) tuples, where node is a wanted
1630 1629 node and linknode is the changelog node that should be transmitted as
1631 1630 the linkrev.
1632 1631 """
1633 1632
1634 1633 if extranodes is None:
1635 1634 # can we go through the fast path ?
1636 1635 heads.sort()
1637 1636 allheads = self.heads()
1638 1637 allheads.sort()
1639 1638 if heads == allheads:
1640 1639 common = []
1641 1640 # parents of bases are known from both sides
1642 1641 for n in bases:
1643 1642 for p in self.changelog.parents(n):
1644 1643 if p != nullid:
1645 1644 common.append(p)
1646 1645 return self._changegroup(common, source)
1647 1646
1648 1647 self.hook('preoutgoing', throw=True, source=source)
1649 1648
1650 1649 # Set up some initial variables
1651 1650 # Make it easy to refer to self.changelog
1652 1651 cl = self.changelog
1653 1652 # msng is short for missing - compute the list of changesets in this
1654 1653 # changegroup.
1655 1654 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1656 1655 self.changegroupinfo(msng_cl_lst, source)
1657 1656 # Some bases may turn out to be superfluous, and some heads may be
1658 1657 # too. nodesbetween will return the minimal set of bases and heads
1659 1658 # necessary to re-create the changegroup.
1660 1659
1661 1660 # Known heads are the list of heads that it is assumed the recipient
1662 1661 # of this changegroup will know about.
1663 1662 knownheads = set()
1664 1663 # We assume that all parents of bases are known heads.
1665 1664 for n in bases:
1666 1665 knownheads.update(cl.parents(n))
1667 1666 knownheads.discard(nullid)
1668 1667 knownheads = list(knownheads)
1669 1668 if knownheads:
1670 1669 # Now that we know what heads are known, we can compute which
1671 1670 # changesets are known. The recipient must know about all
1672 1671 # changesets required to reach the known heads from the null
1673 1672 # changeset.
1674 1673 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1675 1674 junk = None
1676 1675 # Transform the list into a set.
1677 1676 has_cl_set = set(has_cl_set)
1678 1677 else:
1679 1678 # If there were no known heads, the recipient cannot be assumed to
1680 1679 # know about any changesets.
1681 1680 has_cl_set = set()
1682 1681
1683 1682 # Make it easy to refer to self.manifest
1684 1683 mnfst = self.manifest
1685 1684 # We don't know which manifests are missing yet
1686 1685 msng_mnfst_set = {}
1687 1686 # Nor do we know which filenodes are missing.
1688 1687 msng_filenode_set = {}
1689 1688
1690 1689 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1691 1690 junk = None
1692 1691
1693 1692 # A changeset always belongs to itself, so the changenode lookup
1694 1693 # function for a changenode is identity.
1695 1694 def identity(x):
1696 1695 return x
1697 1696
1698 1697 # A function generating function. Sets up an environment for the
1699 1698 # inner function.
1700 1699 def cmp_by_rev_func(revlog):
1701 1700 # Compare two nodes by their revision number in the environment's
1702 1701 # revision history. Since the revision number both represents the
1703 1702 # most efficient order to read the nodes in, and represents a
1704 1703 # topological sorting of the nodes, this function is often useful.
1705 1704 def cmp_by_rev(a, b):
1706 1705 return cmp(revlog.rev(a), revlog.rev(b))
1707 1706 return cmp_by_rev
1708 1707
1709 1708 # If we determine that a particular file or manifest node must be a
1710 1709 # node that the recipient of the changegroup will already have, we can
1711 1710 # also assume the recipient will have all the parents. This function
1712 1711 # prunes them from the set of missing nodes.
1713 1712 def prune_parents(revlog, hasset, msngset):
1714 1713 haslst = list(hasset)
1715 1714 haslst.sort(cmp_by_rev_func(revlog))
1716 1715 for node in haslst:
1717 1716 parentlst = [p for p in revlog.parents(node) if p != nullid]
1718 1717 while parentlst:
1719 1718 n = parentlst.pop()
1720 1719 if n not in hasset:
1721 1720 hasset.add(n)
1722 1721 p = [p for p in revlog.parents(n) if p != nullid]
1723 1722 parentlst.extend(p)
1724 1723 for n in hasset:
1725 1724 msngset.pop(n, None)
1726 1725
1727 1726 # This is a function generating function used to set up an environment
1728 1727 # for the inner function to execute in.
1729 1728 def manifest_and_file_collector(changedfileset):
1730 1729 # This is an information gathering function that gathers
1731 1730 # information from each changeset node that goes out as part of
1732 1731 # the changegroup. The information gathered is a list of which
1733 1732 # manifest nodes are potentially required (the recipient may
1734 1733 # already have them) and total list of all files which were
1735 1734 # changed in any changeset in the changegroup.
1736 1735 #
1737 1736 # We also remember the first changenode we saw any manifest
1738 1737 # referenced by so we can later determine which changenode 'owns'
1739 1738 # the manifest.
1740 1739 def collect_manifests_and_files(clnode):
1741 1740 c = cl.read(clnode)
1742 1741 for f in c[3]:
1743 1742 # This is to make sure we only have one instance of each
1744 1743 # filename string for each filename.
1745 1744 changedfileset.setdefault(f, f)
1746 1745 msng_mnfst_set.setdefault(c[0], clnode)
1747 1746 return collect_manifests_and_files
1748 1747
1749 1748 # Figure out which manifest nodes (of the ones we think might be part
1750 1749 # of the changegroup) the recipient must know about and remove them
1751 1750 # from the changegroup.
1752 1751 def prune_manifests():
1753 1752 has_mnfst_set = set()
1754 1753 for n in msng_mnfst_set:
1755 1754 # If a 'missing' manifest thinks it belongs to a changenode
1756 1755 # the recipient is assumed to have, obviously the recipient
1757 1756 # must have that manifest.
1758 1757 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1759 1758 if linknode in has_cl_set:
1760 1759 has_mnfst_set.add(n)
1761 1760 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1762 1761
1763 1762 # Use the information collected in collect_manifests_and_files to say
1764 1763 # which changenode any manifestnode belongs to.
1765 1764 def lookup_manifest_link(mnfstnode):
1766 1765 return msng_mnfst_set[mnfstnode]
1767 1766
1768 1767 # A function generating function that sets up the initial environment
1769 1768 # the inner function.
1770 1769 def filenode_collector(changedfiles):
1771 1770 next_rev = [0]
1772 1771 # This gathers information from each manifestnode included in the
1773 1772 # changegroup about which filenodes the manifest node references
1774 1773 # so we can include those in the changegroup too.
1775 1774 #
1776 1775 # It also remembers which changenode each filenode belongs to. It
1777 1776 # does this by assuming the a filenode belongs to the changenode
1778 1777 # the first manifest that references it belongs to.
1779 1778 def collect_msng_filenodes(mnfstnode):
1780 1779 r = mnfst.rev(mnfstnode)
1781 1780 if r == next_rev[0]:
1782 1781 # If the last rev we looked at was the one just previous,
1783 1782 # we only need to see a diff.
1784 1783 deltamf = mnfst.readdelta(mnfstnode)
1785 1784 # For each line in the delta
1786 1785 for f, fnode in deltamf.iteritems():
1787 1786 f = changedfiles.get(f, None)
1788 1787 # And if the file is in the list of files we care
1789 1788 # about.
1790 1789 if f is not None:
1791 1790 # Get the changenode this manifest belongs to
1792 1791 clnode = msng_mnfst_set[mnfstnode]
1793 1792 # Create the set of filenodes for the file if
1794 1793 # there isn't one already.
1795 1794 ndset = msng_filenode_set.setdefault(f, {})
1796 1795 # And set the filenode's changelog node to the
1797 1796 # manifest's if it hasn't been set already.
1798 1797 ndset.setdefault(fnode, clnode)
1799 1798 else:
1800 1799 # Otherwise we need a full manifest.
1801 1800 m = mnfst.read(mnfstnode)
1802 1801 # For every file in we care about.
1803 1802 for f in changedfiles:
1804 1803 fnode = m.get(f, None)
1805 1804 # If it's in the manifest
1806 1805 if fnode is not None:
1807 1806 # See comments above.
1808 1807 clnode = msng_mnfst_set[mnfstnode]
1809 1808 ndset = msng_filenode_set.setdefault(f, {})
1810 1809 ndset.setdefault(fnode, clnode)
1811 1810 # Remember the revision we hope to see next.
1812 1811 next_rev[0] = r + 1
1813 1812 return collect_msng_filenodes
1814 1813
1815 1814 # We have a list of filenodes we think we need for a file, lets remove
1816 1815 # all those we know the recipient must have.
1817 1816 def prune_filenodes(f, filerevlog):
1818 1817 msngset = msng_filenode_set[f]
1819 1818 hasset = set()
1820 1819 # If a 'missing' filenode thinks it belongs to a changenode we
1821 1820 # assume the recipient must have, then the recipient must have
1822 1821 # that filenode.
1823 1822 for n in msngset:
1824 1823 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1825 1824 if clnode in has_cl_set:
1826 1825 hasset.add(n)
1827 1826 prune_parents(filerevlog, hasset, msngset)
1828 1827
1829 1828 # A function generator function that sets up the a context for the
1830 1829 # inner function.
1831 1830 def lookup_filenode_link_func(fname):
1832 1831 msngset = msng_filenode_set[fname]
1833 1832 # Lookup the changenode the filenode belongs to.
1834 1833 def lookup_filenode_link(fnode):
1835 1834 return msngset[fnode]
1836 1835 return lookup_filenode_link
1837 1836
1838 1837 # Add the nodes that were explicitly requested.
1839 1838 def add_extra_nodes(name, nodes):
1840 1839 if not extranodes or name not in extranodes:
1841 1840 return
1842 1841
1843 1842 for node, linknode in extranodes[name]:
1844 1843 if node not in nodes:
1845 1844 nodes[node] = linknode
1846 1845
1847 1846 # Now that we have all theses utility functions to help out and
1848 1847 # logically divide up the task, generate the group.
1849 1848 def gengroup():
1850 1849 # The set of changed files starts empty.
1851 1850 changedfiles = {}
1852 1851 # Create a changenode group generator that will call our functions
1853 1852 # back to lookup the owning changenode and collect information.
1854 1853 group = cl.group(msng_cl_lst, identity,
1855 1854 manifest_and_file_collector(changedfiles))
1856 1855 for chnk in group:
1857 1856 yield chnk
1858 1857
1859 1858 # The list of manifests has been collected by the generator
1860 1859 # calling our functions back.
1861 1860 prune_manifests()
1862 1861 add_extra_nodes(1, msng_mnfst_set)
1863 1862 msng_mnfst_lst = msng_mnfst_set.keys()
1864 1863 # Sort the manifestnodes by revision number.
1865 1864 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1866 1865 # Create a generator for the manifestnodes that calls our lookup
1867 1866 # and data collection functions back.
1868 1867 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1869 1868 filenode_collector(changedfiles))
1870 1869 for chnk in group:
1871 1870 yield chnk
1872 1871
1873 1872 # These are no longer needed, dereference and toss the memory for
1874 1873 # them.
1875 1874 msng_mnfst_lst = None
1876 1875 msng_mnfst_set.clear()
1877 1876
1878 1877 if extranodes:
1879 1878 for fname in extranodes:
1880 1879 if isinstance(fname, int):
1881 1880 continue
1882 1881 msng_filenode_set.setdefault(fname, {})
1883 1882 changedfiles[fname] = 1
1884 1883 # Go through all our files in order sorted by name.
1885 1884 for fname in sorted(changedfiles):
1886 1885 filerevlog = self.file(fname)
1887 1886 if not len(filerevlog):
1888 1887 raise util.Abort(_("empty or missing revlog for %s") % fname)
1889 1888 # Toss out the filenodes that the recipient isn't really
1890 1889 # missing.
1891 1890 if fname in msng_filenode_set:
1892 1891 prune_filenodes(fname, filerevlog)
1893 1892 add_extra_nodes(fname, msng_filenode_set[fname])
1894 1893 msng_filenode_lst = msng_filenode_set[fname].keys()
1895 1894 else:
1896 1895 msng_filenode_lst = []
1897 1896 # If any filenodes are left, generate the group for them,
1898 1897 # otherwise don't bother.
1899 1898 if len(msng_filenode_lst) > 0:
1900 1899 yield changegroup.chunkheader(len(fname))
1901 1900 yield fname
1902 1901 # Sort the filenodes by their revision #
1903 1902 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1904 1903 # Create a group generator and only pass in a changenode
1905 1904 # lookup function as we need to collect no information
1906 1905 # from filenodes.
1907 1906 group = filerevlog.group(msng_filenode_lst,
1908 1907 lookup_filenode_link_func(fname))
1909 1908 for chnk in group:
1910 1909 yield chnk
1911 1910 if fname in msng_filenode_set:
1912 1911 # Don't need this anymore, toss it to free memory.
1913 1912 del msng_filenode_set[fname]
1914 1913 # Signal that no more groups are left.
1915 1914 yield changegroup.closechunk()
1916 1915
1917 1916 if msng_cl_lst:
1918 1917 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1919 1918
1920 1919 return util.chunkbuffer(gengroup())
1921 1920
1922 1921 def changegroup(self, basenodes, source):
1923 1922 # to avoid a race we use changegroupsubset() (issue1320)
1924 1923 return self.changegroupsubset(basenodes, self.heads(), source)
1925 1924
1926 1925 def _changegroup(self, common, source):
1927 1926 """Generate a changegroup of all nodes that we have that a recipient
1928 1927 doesn't.
1929 1928
1930 1929 This is much easier than the previous function as we can assume that
1931 1930 the recipient has any changenode we aren't sending them.
1932 1931
1933 1932 common is the set of common nodes between remote and self"""
1934 1933
1935 1934 self.hook('preoutgoing', throw=True, source=source)
1936 1935
1937 1936 cl = self.changelog
1938 1937 nodes = cl.findmissing(common)
1939 1938 revset = set([cl.rev(n) for n in nodes])
1940 1939 self.changegroupinfo(nodes, source)
1941 1940
1942 1941 def identity(x):
1943 1942 return x
1944 1943
1945 1944 def gennodelst(log):
1946 1945 for r in log:
1947 1946 if log.linkrev(r) in revset:
1948 1947 yield log.node(r)
1949 1948
1950 1949 def changed_file_collector(changedfileset):
1951 1950 def collect_changed_files(clnode):
1952 1951 c = cl.read(clnode)
1953 1952 changedfileset.update(c[3])
1954 1953 return collect_changed_files
1955 1954
1956 1955 def lookuprevlink_func(revlog):
1957 1956 def lookuprevlink(n):
1958 1957 return cl.node(revlog.linkrev(revlog.rev(n)))
1959 1958 return lookuprevlink
1960 1959
1961 1960 def gengroup():
1962 1961 # construct a list of all changed files
1963 1962 changedfiles = set()
1964 1963
1965 1964 for chnk in cl.group(nodes, identity,
1966 1965 changed_file_collector(changedfiles)):
1967 1966 yield chnk
1968 1967
1969 1968 mnfst = self.manifest
1970 1969 nodeiter = gennodelst(mnfst)
1971 1970 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1972 1971 yield chnk
1973 1972
1974 1973 for fname in sorted(changedfiles):
1975 1974 filerevlog = self.file(fname)
1976 1975 if not len(filerevlog):
1977 1976 raise util.Abort(_("empty or missing revlog for %s") % fname)
1978 1977 nodeiter = gennodelst(filerevlog)
1979 1978 nodeiter = list(nodeiter)
1980 1979 if nodeiter:
1981 1980 yield changegroup.chunkheader(len(fname))
1982 1981 yield fname
1983 1982 lookup = lookuprevlink_func(filerevlog)
1984 1983 for chnk in filerevlog.group(nodeiter, lookup):
1985 1984 yield chnk
1986 1985
1987 1986 yield changegroup.closechunk()
1988 1987
1989 1988 if nodes:
1990 1989 self.hook('outgoing', node=hex(nodes[0]), source=source)
1991 1990
1992 1991 return util.chunkbuffer(gengroup())
1993 1992
1994 1993 def addchangegroup(self, source, srctype, url, emptyok=False):
1995 1994 """add changegroup to repo.
1996 1995
1997 1996 return values:
1998 1997 - nothing changed or no source: 0
1999 1998 - more heads than before: 1+added heads (2..n)
2000 1999 - less heads than before: -1-removed heads (-2..-n)
2001 2000 - number of heads stays the same: 1
2002 2001 """
2003 2002 def csmap(x):
2004 2003 self.ui.debug(_("add changeset %s\n") % short(x))
2005 2004 return len(cl)
2006 2005
2007 2006 def revmap(x):
2008 2007 return cl.rev(x)
2009 2008
2010 2009 if not source:
2011 2010 return 0
2012 2011
2013 2012 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2014 2013
2015 2014 changesets = files = revisions = 0
2016 2015
2017 2016 # write changelog data to temp files so concurrent readers will not see
2018 2017 # inconsistent view
2019 2018 cl = self.changelog
2020 2019 cl.delayupdate()
2021 2020 oldheads = len(cl.heads())
2022 2021
2023 2022 tr = self.transaction()
2024 2023 try:
2025 2024 trp = weakref.proxy(tr)
2026 2025 # pull off the changeset group
2027 2026 self.ui.status(_("adding changesets\n"))
2028 2027 clstart = len(cl)
2029 2028 chunkiter = changegroup.chunkiter(source)
2030 2029 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2031 2030 raise util.Abort(_("received changelog group is empty"))
2032 2031 clend = len(cl)
2033 2032 changesets = clend - clstart
2034 2033
2035 2034 # pull off the manifest group
2036 2035 self.ui.status(_("adding manifests\n"))
2037 2036 chunkiter = changegroup.chunkiter(source)
2038 2037 # no need to check for empty manifest group here:
2039 2038 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2040 2039 # no new manifest will be created and the manifest group will
2041 2040 # be empty during the pull
2042 2041 self.manifest.addgroup(chunkiter, revmap, trp)
2043 2042
2044 2043 # process the files
2045 2044 self.ui.status(_("adding file changes\n"))
2046 2045 while 1:
2047 2046 f = changegroup.getchunk(source)
2048 2047 if not f:
2049 2048 break
2050 2049 self.ui.debug(_("adding %s revisions\n") % f)
2051 2050 fl = self.file(f)
2052 2051 o = len(fl)
2053 2052 chunkiter = changegroup.chunkiter(source)
2054 2053 if fl.addgroup(chunkiter, revmap, trp) is None:
2055 2054 raise util.Abort(_("received file revlog group is empty"))
2056 2055 revisions += len(fl) - o
2057 2056 files += 1
2058 2057
2059 2058 newheads = len(cl.heads())
2060 2059 heads = ""
2061 2060 if oldheads and newheads != oldheads:
2062 2061 heads = _(" (%+d heads)") % (newheads - oldheads)
2063 2062
2064 2063 self.ui.status(_("added %d changesets"
2065 2064 " with %d changes to %d files%s\n")
2066 2065 % (changesets, revisions, files, heads))
2067 2066
2068 2067 if changesets > 0:
2069 2068 p = lambda: cl.writepending() and self.root or ""
2070 2069 self.hook('pretxnchangegroup', throw=True,
2071 2070 node=hex(cl.node(clstart)), source=srctype,
2072 2071 url=url, pending=p)
2073 2072
2074 2073 # make changelog see real files again
2075 2074 cl.finalize(trp)
2076 2075
2077 2076 tr.close()
2078 2077 finally:
2079 2078 del tr
2080 2079
2081 2080 if changesets > 0:
2082 2081 # forcefully update the on-disk branch cache
2083 2082 self.ui.debug(_("updating the branch cache\n"))
2084 2083 self.branchtags()
2085 2084 self.hook("changegroup", node=hex(cl.node(clstart)),
2086 2085 source=srctype, url=url)
2087 2086
2088 2087 for i in xrange(clstart, clend):
2089 2088 self.hook("incoming", node=hex(cl.node(i)),
2090 2089 source=srctype, url=url)
2091 2090
2092 2091 # never return 0 here:
2093 2092 if newheads < oldheads:
2094 2093 return newheads - oldheads - 1
2095 2094 else:
2096 2095 return newheads - oldheads + 1
2097 2096
2098 2097
2099 2098 def stream_in(self, remote):
2100 2099 fp = remote.stream_out()
2101 2100 l = fp.readline()
2102 2101 try:
2103 2102 resp = int(l)
2104 2103 except ValueError:
2105 2104 raise error.ResponseError(
2106 2105 _('Unexpected response from remote server:'), l)
2107 2106 if resp == 1:
2108 2107 raise util.Abort(_('operation forbidden by server'))
2109 2108 elif resp == 2:
2110 2109 raise util.Abort(_('locking the remote repository failed'))
2111 2110 elif resp != 0:
2112 2111 raise util.Abort(_('the server sent an unknown error code'))
2113 2112 self.ui.status(_('streaming all changes\n'))
2114 2113 l = fp.readline()
2115 2114 try:
2116 2115 total_files, total_bytes = map(int, l.split(' ', 1))
2117 2116 except (ValueError, TypeError):
2118 2117 raise error.ResponseError(
2119 2118 _('Unexpected response from remote server:'), l)
2120 2119 self.ui.status(_('%d files to transfer, %s of data\n') %
2121 2120 (total_files, util.bytecount(total_bytes)))
2122 2121 start = time.time()
2123 2122 for i in xrange(total_files):
2124 2123 # XXX doesn't support '\n' or '\r' in filenames
2125 2124 l = fp.readline()
2126 2125 try:
2127 2126 name, size = l.split('\0', 1)
2128 2127 size = int(size)
2129 2128 except (ValueError, TypeError):
2130 2129 raise error.ResponseError(
2131 2130 _('Unexpected response from remote server:'), l)
2132 2131 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2133 2132 # for backwards compat, name was partially encoded
2134 2133 ofp = self.sopener(store.decodedir(name), 'w')
2135 2134 for chunk in util.filechunkiter(fp, limit=size):
2136 2135 ofp.write(chunk)
2137 2136 ofp.close()
2138 2137 elapsed = time.time() - start
2139 2138 if elapsed <= 0:
2140 2139 elapsed = 0.001
2141 2140 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2142 2141 (util.bytecount(total_bytes), elapsed,
2143 2142 util.bytecount(total_bytes / elapsed)))
2144 2143 self.invalidate()
2145 2144 return len(self.heads()) + 1
2146 2145
2147 2146 def clone(self, remote, heads=[], stream=False):
2148 2147 '''clone remote repository.
2149 2148
2150 2149 keyword arguments:
2151 2150 heads: list of revs to clone (forces use of pull)
2152 2151 stream: use streaming clone if possible'''
2153 2152
2154 2153 # now, all clients that can request uncompressed clones can
2155 2154 # read repo formats supported by all servers that can serve
2156 2155 # them.
2157 2156
2158 2157 # if revlog format changes, client will have to check version
2159 2158 # and format flags on "stream" capability, and use
2160 2159 # uncompressed only if compatible.
2161 2160
2162 2161 if stream and not heads and remote.capable('stream'):
2163 2162 return self.stream_in(remote)
2164 2163 return self.pull(remote, heads)
2165 2164
2166 2165 # used to avoid circular references so destructors work
2167 2166 def aftertrans(files):
2168 2167 renamefiles = [tuple(t) for t in files]
2169 2168 def a():
2170 2169 for src, dest in renamefiles:
2171 2170 util.rename(src, dest)
2172 2171 return a
2173 2172
2174 2173 def instance(ui, path, create):
2175 2174 return localrepository(ui, util.drop_scheme('file', path), create)
2176 2175
2177 2176 def islocal(path):
2178 2177 return True
General Comments 0
You need to be logged in to leave comments. Login now