##// END OF EJS Templates
bundle: don't send too many changesets (Issue1704)...
Peter Arrenbrecht -
r9820:0b999aec default
parent child Browse files
Show More
@@ -1,2160 +1,2158
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 27 self.root = os.path.realpath(path)
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92
93 93 # These two define the set of tags for this repository. _tags
94 94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 95 # 'local'. (Global tags are defined by .hgtags across all
96 96 # heads, and local tags are defined in .hg/localtags.) They
97 97 # constitute the in-memory cache of tags.
98 98 self._tags = None
99 99 self._tagtypes = None
100 100
101 101 self._branchcache = None # in UTF-8
102 102 self._branchcachetip = None
103 103 self.nodetagscache = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 @propertycache
109 109 def changelog(self):
110 110 c = changelog.changelog(self.sopener)
111 111 if 'HG_PENDING' in os.environ:
112 112 p = os.environ['HG_PENDING']
113 113 if p.startswith(self.root):
114 114 c.readpending('00changelog.i.a')
115 115 self.sopener.defversion = c.version
116 116 return c
117 117
118 118 @propertycache
119 119 def manifest(self):
120 120 return manifest.manifest(self.sopener)
121 121
122 122 @propertycache
123 123 def dirstate(self):
124 124 return dirstate.dirstate(self.opener, self.ui, self.root)
125 125
126 126 def __getitem__(self, changeid):
127 127 if changeid is None:
128 128 return context.workingctx(self)
129 129 return context.changectx(self, changeid)
130 130
131 131 def __nonzero__(self):
132 132 return True
133 133
134 134 def __len__(self):
135 135 return len(self.changelog)
136 136
137 137 def __iter__(self):
138 138 for i in xrange(len(self)):
139 139 yield i
140 140
141 141 def url(self):
142 142 return 'file:' + self.root
143 143
144 144 def hook(self, name, throw=False, **args):
145 145 return hook.hook(self.ui, self, name, throw, **args)
146 146
147 147 tag_disallowed = ':\r\n'
148 148
149 149 def _tag(self, names, node, message, local, user, date, extra={}):
150 150 if isinstance(names, str):
151 151 allchars = names
152 152 names = (names,)
153 153 else:
154 154 allchars = ''.join(names)
155 155 for c in self.tag_disallowed:
156 156 if c in allchars:
157 157 raise util.Abort(_('%r cannot be used in a tag name') % c)
158 158
159 159 for name in names:
160 160 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 161 local=local)
162 162
163 163 def writetags(fp, names, munge, prevtags):
164 164 fp.seek(0, 2)
165 165 if prevtags and prevtags[-1] != '\n':
166 166 fp.write('\n')
167 167 for name in names:
168 168 m = munge and munge(name) or name
169 169 if self._tagtypes and name in self._tagtypes:
170 170 old = self._tags.get(name, nullid)
171 171 fp.write('%s %s\n' % (hex(old), m))
172 172 fp.write('%s %s\n' % (hex(node), m))
173 173 fp.close()
174 174
175 175 prevtags = ''
176 176 if local:
177 177 try:
178 178 fp = self.opener('localtags', 'r+')
179 179 except IOError:
180 180 fp = self.opener('localtags', 'a')
181 181 else:
182 182 prevtags = fp.read()
183 183
184 184 # local tags are stored in the current charset
185 185 writetags(fp, names, None, prevtags)
186 186 for name in names:
187 187 self.hook('tag', node=hex(node), tag=name, local=local)
188 188 return
189 189
190 190 try:
191 191 fp = self.wfile('.hgtags', 'rb+')
192 192 except IOError:
193 193 fp = self.wfile('.hgtags', 'ab')
194 194 else:
195 195 prevtags = fp.read()
196 196
197 197 # committed tags are stored in UTF-8
198 198 writetags(fp, names, encoding.fromlocal, prevtags)
199 199
200 200 if '.hgtags' not in self.dirstate:
201 201 self.add(['.hgtags'])
202 202
203 203 m = match_.exact(self.root, '', ['.hgtags'])
204 204 tagnode = self.commit(message, user, date, extra=extra, match=m)
205 205
206 206 for name in names:
207 207 self.hook('tag', node=hex(node), tag=name, local=local)
208 208
209 209 return tagnode
210 210
211 211 def tag(self, names, node, message, local, user, date):
212 212 '''tag a revision with one or more symbolic names.
213 213
214 214 names is a list of strings or, when adding a single tag, names may be a
215 215 string.
216 216
217 217 if local is True, the tags are stored in a per-repository file.
218 218 otherwise, they are stored in the .hgtags file, and a new
219 219 changeset is committed with the change.
220 220
221 221 keyword arguments:
222 222
223 223 local: whether to store tags in non-version-controlled file
224 224 (default False)
225 225
226 226 message: commit message to use if committing
227 227
228 228 user: name of user to use if committing
229 229
230 230 date: date tuple to use if committing'''
231 231
232 232 for x in self.status()[:5]:
233 233 if '.hgtags' in x:
234 234 raise util.Abort(_('working copy of .hgtags is changed '
235 235 '(please commit .hgtags manually)'))
236 236
237 237 self.tags() # instantiate the cache
238 238 self._tag(names, node, message, local, user, date)
239 239
240 240 def tags(self):
241 241 '''return a mapping of tag to node'''
242 242 if self._tags is None:
243 243 (self._tags, self._tagtypes) = self._findtags()
244 244
245 245 return self._tags
246 246
247 247 def _findtags(self):
248 248 '''Do the hard work of finding tags. Return a pair of dicts
249 249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 250 maps tag name to a string like \'global\' or \'local\'.
251 251 Subclasses or extensions are free to add their own tags, but
252 252 should be aware that the returned dicts will be retained for the
253 253 duration of the localrepo object.'''
254 254
255 255 # XXX what tagtype should subclasses/extensions use? Currently
256 256 # mq and bookmarks add tags, but do not set the tagtype at all.
257 257 # Should each extension invent its own tag type? Should there
258 258 # be one tagtype for all such "virtual" tags? Or is the status
259 259 # quo fine?
260 260
261 261 alltags = {} # map tag name to (node, hist)
262 262 tagtypes = {}
263 263
264 264 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 265 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266 266
267 267 # Build the return dicts. Have to re-encode tag names because
268 268 # the tags module always uses UTF-8 (in order not to lose info
269 269 # writing to the cache), but the rest of Mercurial wants them in
270 270 # local encoding.
271 271 tags = {}
272 272 for (name, (node, hist)) in alltags.iteritems():
273 273 if node != nullid:
274 274 tags[encoding.tolocal(name)] = node
275 275 tags['tip'] = self.changelog.tip()
276 276 tagtypes = dict([(encoding.tolocal(name), value)
277 277 for (name, value) in tagtypes.iteritems()])
278 278 return (tags, tagtypes)
279 279
280 280 def tagtype(self, tagname):
281 281 '''
282 282 return the type of the given tag. result can be:
283 283
284 284 'local' : a local tag
285 285 'global' : a global tag
286 286 None : tag does not exist
287 287 '''
288 288
289 289 self.tags()
290 290
291 291 return self._tagtypes.get(tagname)
292 292
293 293 def tagslist(self):
294 294 '''return a list of tags ordered by revision'''
295 295 l = []
296 296 for t, n in self.tags().iteritems():
297 297 try:
298 298 r = self.changelog.rev(n)
299 299 except:
300 300 r = -2 # sort to the beginning of the list if unknown
301 301 l.append((r, t, n))
302 302 return [(t, n) for r, t, n in sorted(l)]
303 303
304 304 def nodetags(self, node):
305 305 '''return the tags associated with a node'''
306 306 if not self.nodetagscache:
307 307 self.nodetagscache = {}
308 308 for t, n in self.tags().iteritems():
309 309 self.nodetagscache.setdefault(n, []).append(t)
310 310 return self.nodetagscache.get(node, [])
311 311
312 312 def _branchtags(self, partial, lrev):
313 313 # TODO: rename this function?
314 314 tiprev = len(self) - 1
315 315 if lrev != tiprev:
316 316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 318
319 319 return partial
320 320
321 321 def branchmap(self):
322 322 tip = self.changelog.tip()
323 323 if self._branchcache is not None and self._branchcachetip == tip:
324 324 return self._branchcache
325 325
326 326 oldtip = self._branchcachetip
327 327 self._branchcachetip = tip
328 328 if oldtip is None or oldtip not in self.changelog.nodemap:
329 329 partial, last, lrev = self._readbranchcache()
330 330 else:
331 331 lrev = self.changelog.rev(oldtip)
332 332 partial = self._branchcache
333 333
334 334 self._branchtags(partial, lrev)
335 335 # this private cache holds all heads (not just tips)
336 336 self._branchcache = partial
337 337
338 338 return self._branchcache
339 339
340 340 def branchtags(self):
341 341 '''return a dict where branch names map to the tipmost head of
342 342 the branch, open heads come before closed'''
343 343 bt = {}
344 344 for bn, heads in self.branchmap().iteritems():
345 345 head = None
346 346 for i in range(len(heads)-1, -1, -1):
347 347 h = heads[i]
348 348 if 'close' not in self.changelog.read(h)[5]:
349 349 head = h
350 350 break
351 351 # no open heads were found
352 352 if head is None:
353 353 head = heads[-1]
354 354 bt[bn] = head
355 355 return bt
356 356
357 357
358 358 def _readbranchcache(self):
359 359 partial = {}
360 360 try:
361 361 f = self.opener("branchheads.cache")
362 362 lines = f.read().split('\n')
363 363 f.close()
364 364 except (IOError, OSError):
365 365 return {}, nullid, nullrev
366 366
367 367 try:
368 368 last, lrev = lines.pop(0).split(" ", 1)
369 369 last, lrev = bin(last), int(lrev)
370 370 if lrev >= len(self) or self[lrev].node() != last:
371 371 # invalidate the cache
372 372 raise ValueError('invalidating branch cache (tip differs)')
373 373 for l in lines:
374 374 if not l: continue
375 375 node, label = l.split(" ", 1)
376 376 partial.setdefault(label.strip(), []).append(bin(node))
377 377 except KeyboardInterrupt:
378 378 raise
379 379 except Exception, inst:
380 380 if self.ui.debugflag:
381 381 self.ui.warn(str(inst), '\n')
382 382 partial, last, lrev = {}, nullid, nullrev
383 383 return partial, last, lrev
384 384
385 385 def _writebranchcache(self, branches, tip, tiprev):
386 386 try:
387 387 f = self.opener("branchheads.cache", "w", atomictemp=True)
388 388 f.write("%s %s\n" % (hex(tip), tiprev))
389 389 for label, nodes in branches.iteritems():
390 390 for node in nodes:
391 391 f.write("%s %s\n" % (hex(node), label))
392 392 f.rename()
393 393 except (IOError, OSError):
394 394 pass
395 395
396 396 def _updatebranchcache(self, partial, start, end):
397 397 # collect new branch entries
398 398 newbranches = {}
399 399 for r in xrange(start, end):
400 400 c = self[r]
401 401 newbranches.setdefault(c.branch(), []).append(c.node())
402 402 # if older branchheads are reachable from new ones, they aren't
403 403 # really branchheads. Note checking parents is insufficient:
404 404 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
405 405 for branch, newnodes in newbranches.iteritems():
406 406 bheads = partial.setdefault(branch, [])
407 407 bheads.extend(newnodes)
408 408 if len(bheads) < 2:
409 409 continue
410 410 newbheads = []
411 411 # starting from tip means fewer passes over reachable
412 412 while newnodes:
413 413 latest = newnodes.pop()
414 414 if latest not in bheads:
415 415 continue
416 416 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
417 417 reachable = self.changelog.reachable(latest, minbhrev)
418 418 bheads = [b for b in bheads if b not in reachable]
419 419 newbheads.insert(0, latest)
420 420 bheads.extend(newbheads)
421 421 partial[branch] = bheads
422 422
423 423 def lookup(self, key):
424 424 if isinstance(key, int):
425 425 return self.changelog.node(key)
426 426 elif key == '.':
427 427 return self.dirstate.parents()[0]
428 428 elif key == 'null':
429 429 return nullid
430 430 elif key == 'tip':
431 431 return self.changelog.tip()
432 432 n = self.changelog._match(key)
433 433 if n:
434 434 return n
435 435 if key in self.tags():
436 436 return self.tags()[key]
437 437 if key in self.branchtags():
438 438 return self.branchtags()[key]
439 439 n = self.changelog._partialmatch(key)
440 440 if n:
441 441 return n
442 442
443 443 # can't find key, check if it might have come from damaged dirstate
444 444 if key in self.dirstate.parents():
445 445 raise error.Abort(_("working directory has unknown parent '%s'!")
446 446 % short(key))
447 447 try:
448 448 if len(key) == 20:
449 449 key = hex(key)
450 450 except:
451 451 pass
452 452 raise error.RepoLookupError(_("unknown revision '%s'") % key)
453 453
454 454 def local(self):
455 455 return True
456 456
457 457 def join(self, f):
458 458 return os.path.join(self.path, f)
459 459
460 460 def wjoin(self, f):
461 461 return os.path.join(self.root, f)
462 462
463 463 def rjoin(self, f):
464 464 return os.path.join(self.root, util.pconvert(f))
465 465
466 466 def file(self, f):
467 467 if f[0] == '/':
468 468 f = f[1:]
469 469 return filelog.filelog(self.sopener, f)
470 470
471 471 def changectx(self, changeid):
472 472 return self[changeid]
473 473
474 474 def parents(self, changeid=None):
475 475 '''get list of changectxs for parents of changeid'''
476 476 return self[changeid].parents()
477 477
478 478 def filectx(self, path, changeid=None, fileid=None):
479 479 """changeid can be a changeset revision, node, or tag.
480 480 fileid can be a file revision or node."""
481 481 return context.filectx(self, path, changeid, fileid)
482 482
483 483 def getcwd(self):
484 484 return self.dirstate.getcwd()
485 485
486 486 def pathto(self, f, cwd=None):
487 487 return self.dirstate.pathto(f, cwd)
488 488
489 489 def wfile(self, f, mode='r'):
490 490 return self.wopener(f, mode)
491 491
492 492 def _link(self, f):
493 493 return os.path.islink(self.wjoin(f))
494 494
495 495 def _filter(self, filter, filename, data):
496 496 if filter not in self.filterpats:
497 497 l = []
498 498 for pat, cmd in self.ui.configitems(filter):
499 499 if cmd == '!':
500 500 continue
501 501 mf = match_.match(self.root, '', [pat])
502 502 fn = None
503 503 params = cmd
504 504 for name, filterfn in self._datafilters.iteritems():
505 505 if cmd.startswith(name):
506 506 fn = filterfn
507 507 params = cmd[len(name):].lstrip()
508 508 break
509 509 if not fn:
510 510 fn = lambda s, c, **kwargs: util.filter(s, c)
511 511 # Wrap old filters not supporting keyword arguments
512 512 if not inspect.getargspec(fn)[2]:
513 513 oldfn = fn
514 514 fn = lambda s, c, **kwargs: oldfn(s, c)
515 515 l.append((mf, fn, params))
516 516 self.filterpats[filter] = l
517 517
518 518 for mf, fn, cmd in self.filterpats[filter]:
519 519 if mf(filename):
520 520 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
521 521 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
522 522 break
523 523
524 524 return data
525 525
526 526 def adddatafilter(self, name, filter):
527 527 self._datafilters[name] = filter
528 528
529 529 def wread(self, filename):
530 530 if self._link(filename):
531 531 data = os.readlink(self.wjoin(filename))
532 532 else:
533 533 data = self.wopener(filename, 'r').read()
534 534 return self._filter("encode", filename, data)
535 535
536 536 def wwrite(self, filename, data, flags):
537 537 data = self._filter("decode", filename, data)
538 538 try:
539 539 os.unlink(self.wjoin(filename))
540 540 except OSError:
541 541 pass
542 542 if 'l' in flags:
543 543 self.wopener.symlink(data, filename)
544 544 else:
545 545 self.wopener(filename, 'w').write(data)
546 546 if 'x' in flags:
547 547 util.set_flags(self.wjoin(filename), False, True)
548 548
549 549 def wwritedata(self, filename, data):
550 550 return self._filter("decode", filename, data)
551 551
552 552 def transaction(self):
553 553 tr = self._transref and self._transref() or None
554 554 if tr and tr.running():
555 555 return tr.nest()
556 556
557 557 # abort here if the journal already exists
558 558 if os.path.exists(self.sjoin("journal")):
559 559 raise error.RepoError(_("abandoned transaction found - run hg recover"))
560 560
561 561 # save dirstate for rollback
562 562 try:
563 563 ds = self.opener("dirstate").read()
564 564 except IOError:
565 565 ds = ""
566 566 self.opener("journal.dirstate", "w").write(ds)
567 567 self.opener("journal.branch", "w").write(self.dirstate.branch())
568 568
569 569 renames = [(self.sjoin("journal"), self.sjoin("undo")),
570 570 (self.join("journal.dirstate"), self.join("undo.dirstate")),
571 571 (self.join("journal.branch"), self.join("undo.branch"))]
572 572 tr = transaction.transaction(self.ui.warn, self.sopener,
573 573 self.sjoin("journal"),
574 574 aftertrans(renames),
575 575 self.store.createmode)
576 576 self._transref = weakref.ref(tr)
577 577 return tr
578 578
579 579 def recover(self):
580 580 lock = self.lock()
581 581 try:
582 582 if os.path.exists(self.sjoin("journal")):
583 583 self.ui.status(_("rolling back interrupted transaction\n"))
584 584 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
585 585 self.invalidate()
586 586 return True
587 587 else:
588 588 self.ui.warn(_("no interrupted transaction available\n"))
589 589 return False
590 590 finally:
591 591 lock.release()
592 592
593 593 def rollback(self):
594 594 wlock = lock = None
595 595 try:
596 596 wlock = self.wlock()
597 597 lock = self.lock()
598 598 if os.path.exists(self.sjoin("undo")):
599 599 self.ui.status(_("rolling back last transaction\n"))
600 600 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
601 601 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
602 602 try:
603 603 branch = self.opener("undo.branch").read()
604 604 self.dirstate.setbranch(branch)
605 605 except IOError:
606 606 self.ui.warn(_("Named branch could not be reset, "
607 607 "current branch still is: %s\n")
608 608 % encoding.tolocal(self.dirstate.branch()))
609 609 self.invalidate()
610 610 self.dirstate.invalidate()
611 611 self.destroyed()
612 612 else:
613 613 self.ui.warn(_("no rollback information available\n"))
614 614 finally:
615 615 release(lock, wlock)
616 616
617 617 def invalidate(self):
618 618 for a in "changelog manifest".split():
619 619 if a in self.__dict__:
620 620 delattr(self, a)
621 621 self._tags = None
622 622 self._tagtypes = None
623 623 self.nodetagscache = None
624 624 self._branchcache = None # in UTF-8
625 625 self._branchcachetip = None
626 626
627 627 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
628 628 try:
629 629 l = lock.lock(lockname, 0, releasefn, desc=desc)
630 630 except error.LockHeld, inst:
631 631 if not wait:
632 632 raise
633 633 self.ui.warn(_("waiting for lock on %s held by %r\n") %
634 634 (desc, inst.locker))
635 635 # default to 600 seconds timeout
636 636 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
637 637 releasefn, desc=desc)
638 638 if acquirefn:
639 639 acquirefn()
640 640 return l
641 641
642 642 def lock(self, wait=True):
643 643 '''Lock the repository store (.hg/store) and return a weak reference
644 644 to the lock. Use this before modifying the store (e.g. committing or
645 645 stripping). If you are opening a transaction, get a lock as well.)'''
646 646 l = self._lockref and self._lockref()
647 647 if l is not None and l.held:
648 648 l.lock()
649 649 return l
650 650
651 651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 652 _('repository %s') % self.origroot)
653 653 self._lockref = weakref.ref(l)
654 654 return l
655 655
656 656 def wlock(self, wait=True):
657 657 '''Lock the non-store parts of the repository (everything under
658 658 .hg except .hg/store) and return a weak reference to the lock.
659 659 Use this before modifying files in .hg.'''
660 660 l = self._wlockref and self._wlockref()
661 661 if l is not None and l.held:
662 662 l.lock()
663 663 return l
664 664
665 665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 666 self.dirstate.invalidate, _('working directory of %s') %
667 667 self.origroot)
668 668 self._wlockref = weakref.ref(l)
669 669 return l
670 670
671 671 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 672 """
673 673 commit an individual file as part of a larger transaction
674 674 """
675 675
676 676 fname = fctx.path()
677 677 text = fctx.data()
678 678 flog = self.file(fname)
679 679 fparent1 = manifest1.get(fname, nullid)
680 680 fparent2 = fparent2o = manifest2.get(fname, nullid)
681 681
682 682 meta = {}
683 683 copy = fctx.renamed()
684 684 if copy and copy[0] != fname:
685 685 # Mark the new revision of this file as a copy of another
686 686 # file. This copy data will effectively act as a parent
687 687 # of this new revision. If this is a merge, the first
688 688 # parent will be the nullid (meaning "look up the copy data")
689 689 # and the second one will be the other parent. For example:
690 690 #
691 691 # 0 --- 1 --- 3 rev1 changes file foo
692 692 # \ / rev2 renames foo to bar and changes it
693 693 # \- 2 -/ rev3 should have bar with all changes and
694 694 # should record that bar descends from
695 695 # bar in rev2 and foo in rev1
696 696 #
697 697 # this allows this merge to succeed:
698 698 #
699 699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 700 # \ / merging rev3 and rev4 should use bar@rev2
701 701 # \- 2 --- 4 as the merge base
702 702 #
703 703
704 704 cfname = copy[0]
705 705 crev = manifest1.get(cfname)
706 706 newfparent = fparent2
707 707
708 708 if manifest2: # branch merge
709 709 if fparent2 == nullid or crev is None: # copied on remote side
710 710 if cfname in manifest2:
711 711 crev = manifest2[cfname]
712 712 newfparent = fparent1
713 713
714 714 # find source in nearest ancestor if we've lost track
715 715 if not crev:
716 716 self.ui.debug(" %s: searching for copy revision for %s\n" %
717 717 (fname, cfname))
718 718 for ancestor in self['.'].ancestors():
719 719 if cfname in ancestor:
720 720 crev = ancestor[cfname].filenode()
721 721 break
722 722
723 723 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
724 724 meta["copy"] = cfname
725 725 meta["copyrev"] = hex(crev)
726 726 fparent1, fparent2 = nullid, newfparent
727 727 elif fparent2 != nullid:
728 728 # is one parent an ancestor of the other?
729 729 fparentancestor = flog.ancestor(fparent1, fparent2)
730 730 if fparentancestor == fparent1:
731 731 fparent1, fparent2 = fparent2, nullid
732 732 elif fparentancestor == fparent2:
733 733 fparent2 = nullid
734 734
735 735 # is the file changed?
736 736 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
737 737 changelist.append(fname)
738 738 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
739 739
740 740 # are just the flags changed during merge?
741 741 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
742 742 changelist.append(fname)
743 743
744 744 return fparent1
745 745
746 746 def commit(self, text="", user=None, date=None, match=None, force=False,
747 747 editor=False, extra={}):
748 748 """Add a new revision to current repository.
749 749
750 750 Revision information is gathered from the working directory,
751 751 match can be used to filter the committed files. If editor is
752 752 supplied, it is called to get a commit message.
753 753 """
754 754
755 755 def fail(f, msg):
756 756 raise util.Abort('%s: %s' % (f, msg))
757 757
758 758 if not match:
759 759 match = match_.always(self.root, '')
760 760
761 761 if not force:
762 762 vdirs = []
763 763 match.dir = vdirs.append
764 764 match.bad = fail
765 765
766 766 wlock = self.wlock()
767 767 try:
768 768 p1, p2 = self.dirstate.parents()
769 769 wctx = self[None]
770 770
771 771 if (not force and p2 != nullid and match and
772 772 (match.files() or match.anypats())):
773 773 raise util.Abort(_('cannot partially commit a merge '
774 774 '(do not specify files or patterns)'))
775 775
776 776 changes = self.status(match=match, clean=force)
777 777 if force:
778 778 changes[0].extend(changes[6]) # mq may commit unchanged files
779 779
780 780 # check subrepos
781 781 subs = []
782 782 for s in wctx.substate:
783 783 if match(s) and wctx.sub(s).dirty():
784 784 subs.append(s)
785 785 if subs and '.hgsubstate' not in changes[0]:
786 786 changes[0].insert(0, '.hgsubstate')
787 787
788 788 # make sure all explicit patterns are matched
789 789 if not force and match.files():
790 790 matched = set(changes[0] + changes[1] + changes[2])
791 791
792 792 for f in match.files():
793 793 if f == '.' or f in matched or f in wctx.substate:
794 794 continue
795 795 if f in changes[3]: # missing
796 796 fail(f, _('file not found!'))
797 797 if f in vdirs: # visited directory
798 798 d = f + '/'
799 799 for mf in matched:
800 800 if mf.startswith(d):
801 801 break
802 802 else:
803 803 fail(f, _("no match under directory!"))
804 804 elif f not in self.dirstate:
805 805 fail(f, _("file not tracked!"))
806 806
807 807 if (not force and not extra.get("close") and p2 == nullid
808 808 and not (changes[0] or changes[1] or changes[2])
809 809 and self[None].branch() == self['.'].branch()):
810 810 return None
811 811
812 812 ms = merge_.mergestate(self)
813 813 for f in changes[0]:
814 814 if f in ms and ms[f] == 'u':
815 815 raise util.Abort(_("unresolved merge conflicts "
816 816 "(see hg resolve)"))
817 817
818 818 cctx = context.workingctx(self, (p1, p2), text, user, date,
819 819 extra, changes)
820 820 if editor:
821 821 cctx._text = editor(self, cctx, subs)
822 822
823 823 # commit subs
824 824 if subs:
825 825 state = wctx.substate.copy()
826 826 for s in subs:
827 827 self.ui.status(_('committing subrepository %s\n') % s)
828 828 sr = wctx.sub(s).commit(cctx._text, user, date)
829 829 state[s] = (state[s][0], sr)
830 830 subrepo.writestate(self, state)
831 831
832 832 ret = self.commitctx(cctx, True)
833 833
834 834 # update dirstate and mergestate
835 835 for f in changes[0] + changes[1]:
836 836 self.dirstate.normal(f)
837 837 for f in changes[2]:
838 838 self.dirstate.forget(f)
839 839 self.dirstate.setparents(ret)
840 840 ms.reset()
841 841
842 842 return ret
843 843
844 844 finally:
845 845 wlock.release()
846 846
847 847 def commitctx(self, ctx, error=False):
848 848 """Add a new revision to current repository.
849 849
850 850 Revision information is passed via the context argument.
851 851 """
852 852
853 853 tr = lock = None
854 854 removed = ctx.removed()
855 855 p1, p2 = ctx.p1(), ctx.p2()
856 856 m1 = p1.manifest().copy()
857 857 m2 = p2.manifest()
858 858 user = ctx.user()
859 859
860 860 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
861 861 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
862 862
863 863 lock = self.lock()
864 864 try:
865 865 tr = self.transaction()
866 866 trp = weakref.proxy(tr)
867 867
868 868 # check in files
869 869 new = {}
870 870 changed = []
871 871 linkrev = len(self)
872 872 for f in sorted(ctx.modified() + ctx.added()):
873 873 self.ui.note(f + "\n")
874 874 try:
875 875 fctx = ctx[f]
876 876 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
877 877 changed)
878 878 m1.set(f, fctx.flags())
879 879 except (OSError, IOError):
880 880 if error:
881 881 self.ui.warn(_("trouble committing %s!\n") % f)
882 882 raise
883 883 else:
884 884 removed.append(f)
885 885
886 886 # update manifest
887 887 m1.update(new)
888 888 removed = [f for f in sorted(removed) if f in m1 or f in m2]
889 889 drop = [f for f in removed if f in m1]
890 890 for f in drop:
891 891 del m1[f]
892 892 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
893 893 p2.manifestnode(), (new, drop))
894 894
895 895 # update changelog
896 896 self.changelog.delayupdate()
897 897 n = self.changelog.add(mn, changed + removed, ctx.description(),
898 898 trp, p1.node(), p2.node(),
899 899 user, ctx.date(), ctx.extra().copy())
900 900 p = lambda: self.changelog.writepending() and self.root or ""
901 901 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
902 902 parent2=xp2, pending=p)
903 903 self.changelog.finalize(trp)
904 904 tr.close()
905 905
906 906 if self._branchcache:
907 907 self.branchtags()
908 908
909 909 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
910 910 return n
911 911 finally:
912 912 del tr
913 913 lock.release()
914 914
915 915 def destroyed(self):
916 916 '''Inform the repository that nodes have been destroyed.
917 917 Intended for use by strip and rollback, so there's a common
918 918 place for anything that has to be done after destroying history.'''
919 919 # XXX it might be nice if we could take the list of destroyed
920 920 # nodes, but I don't see an easy way for rollback() to do that
921 921
922 922 # Ensure the persistent tag cache is updated. Doing it now
923 923 # means that the tag cache only has to worry about destroyed
924 924 # heads immediately after a strip/rollback. That in turn
925 925 # guarantees that "cachetip == currenttip" (comparing both rev
926 926 # and node) always means no nodes have been added or destroyed.
927 927
928 928 # XXX this is suboptimal when qrefresh'ing: we strip the current
929 929 # head, refresh the tag cache, then immediately add a new head.
930 930 # But I think doing it this way is necessary for the "instant
931 931 # tag cache retrieval" case to work.
932 932 tags_.findglobaltags(self.ui, self, {}, {})
933 933
934 934 def walk(self, match, node=None):
935 935 '''
936 936 walk recursively through the directory tree or a given
937 937 changeset, finding all files matched by the match
938 938 function
939 939 '''
940 940 return self[node].walk(match)
941 941
942 942 def status(self, node1='.', node2=None, match=None,
943 943 ignored=False, clean=False, unknown=False):
944 944 """return status of files between two nodes or node and working directory
945 945
946 946 If node1 is None, use the first dirstate parent instead.
947 947 If node2 is None, compare node1 with working directory.
948 948 """
949 949
950 950 def mfmatches(ctx):
951 951 mf = ctx.manifest().copy()
952 952 for fn in mf.keys():
953 953 if not match(fn):
954 954 del mf[fn]
955 955 return mf
956 956
957 957 if isinstance(node1, context.changectx):
958 958 ctx1 = node1
959 959 else:
960 960 ctx1 = self[node1]
961 961 if isinstance(node2, context.changectx):
962 962 ctx2 = node2
963 963 else:
964 964 ctx2 = self[node2]
965 965
966 966 working = ctx2.rev() is None
967 967 parentworking = working and ctx1 == self['.']
968 968 match = match or match_.always(self.root, self.getcwd())
969 969 listignored, listclean, listunknown = ignored, clean, unknown
970 970
971 971 # load earliest manifest first for caching reasons
972 972 if not working and ctx2.rev() < ctx1.rev():
973 973 ctx2.manifest()
974 974
975 975 if not parentworking:
976 976 def bad(f, msg):
977 977 if f not in ctx1:
978 978 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
979 979 match.bad = bad
980 980
981 981 if working: # we need to scan the working dir
982 982 s = self.dirstate.status(match, listignored, listclean, listunknown)
983 983 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
984 984
985 985 # check for any possibly clean files
986 986 if parentworking and cmp:
987 987 fixup = []
988 988 # do a full compare of any files that might have changed
989 989 for f in sorted(cmp):
990 990 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
991 991 or ctx1[f].cmp(ctx2[f].data())):
992 992 modified.append(f)
993 993 else:
994 994 fixup.append(f)
995 995
996 996 if listclean:
997 997 clean += fixup
998 998
999 999 # update dirstate for files that are actually clean
1000 1000 if fixup:
1001 1001 try:
1002 1002 # updating the dirstate is optional
1003 1003 # so we don't wait on the lock
1004 1004 wlock = self.wlock(False)
1005 1005 try:
1006 1006 for f in fixup:
1007 1007 self.dirstate.normal(f)
1008 1008 finally:
1009 1009 wlock.release()
1010 1010 except error.LockError:
1011 1011 pass
1012 1012
1013 1013 if not parentworking:
1014 1014 mf1 = mfmatches(ctx1)
1015 1015 if working:
1016 1016 # we are comparing working dir against non-parent
1017 1017 # generate a pseudo-manifest for the working dir
1018 1018 mf2 = mfmatches(self['.'])
1019 1019 for f in cmp + modified + added:
1020 1020 mf2[f] = None
1021 1021 mf2.set(f, ctx2.flags(f))
1022 1022 for f in removed:
1023 1023 if f in mf2:
1024 1024 del mf2[f]
1025 1025 else:
1026 1026 # we are comparing two revisions
1027 1027 deleted, unknown, ignored = [], [], []
1028 1028 mf2 = mfmatches(ctx2)
1029 1029
1030 1030 modified, added, clean = [], [], []
1031 1031 for fn in mf2:
1032 1032 if fn in mf1:
1033 1033 if (mf1.flags(fn) != mf2.flags(fn) or
1034 1034 (mf1[fn] != mf2[fn] and
1035 1035 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1036 1036 modified.append(fn)
1037 1037 elif listclean:
1038 1038 clean.append(fn)
1039 1039 del mf1[fn]
1040 1040 else:
1041 1041 added.append(fn)
1042 1042 removed = mf1.keys()
1043 1043
1044 1044 r = modified, added, removed, deleted, unknown, ignored, clean
1045 1045 [l.sort() for l in r]
1046 1046 return r
1047 1047
1048 1048 def add(self, list):
1049 1049 wlock = self.wlock()
1050 1050 try:
1051 1051 rejected = []
1052 1052 for f in list:
1053 1053 p = self.wjoin(f)
1054 1054 try:
1055 1055 st = os.lstat(p)
1056 1056 except:
1057 1057 self.ui.warn(_("%s does not exist!\n") % f)
1058 1058 rejected.append(f)
1059 1059 continue
1060 1060 if st.st_size > 10000000:
1061 1061 self.ui.warn(_("%s: files over 10MB may cause memory and"
1062 1062 " performance problems\n"
1063 1063 "(use 'hg revert %s' to unadd the file)\n")
1064 1064 % (f, f))
1065 1065 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1066 1066 self.ui.warn(_("%s not added: only files and symlinks "
1067 1067 "supported currently\n") % f)
1068 1068 rejected.append(p)
1069 1069 elif self.dirstate[f] in 'amn':
1070 1070 self.ui.warn(_("%s already tracked!\n") % f)
1071 1071 elif self.dirstate[f] == 'r':
1072 1072 self.dirstate.normallookup(f)
1073 1073 else:
1074 1074 self.dirstate.add(f)
1075 1075 return rejected
1076 1076 finally:
1077 1077 wlock.release()
1078 1078
1079 1079 def forget(self, list):
1080 1080 wlock = self.wlock()
1081 1081 try:
1082 1082 for f in list:
1083 1083 if self.dirstate[f] != 'a':
1084 1084 self.ui.warn(_("%s not added!\n") % f)
1085 1085 else:
1086 1086 self.dirstate.forget(f)
1087 1087 finally:
1088 1088 wlock.release()
1089 1089
1090 1090 def remove(self, list, unlink=False):
1091 1091 if unlink:
1092 1092 for f in list:
1093 1093 try:
1094 1094 util.unlink(self.wjoin(f))
1095 1095 except OSError, inst:
1096 1096 if inst.errno != errno.ENOENT:
1097 1097 raise
1098 1098 wlock = self.wlock()
1099 1099 try:
1100 1100 for f in list:
1101 1101 if unlink and os.path.exists(self.wjoin(f)):
1102 1102 self.ui.warn(_("%s still exists!\n") % f)
1103 1103 elif self.dirstate[f] == 'a':
1104 1104 self.dirstate.forget(f)
1105 1105 elif f not in self.dirstate:
1106 1106 self.ui.warn(_("%s not tracked!\n") % f)
1107 1107 else:
1108 1108 self.dirstate.remove(f)
1109 1109 finally:
1110 1110 wlock.release()
1111 1111
1112 1112 def undelete(self, list):
1113 1113 manifests = [self.manifest.read(self.changelog.read(p)[0])
1114 1114 for p in self.dirstate.parents() if p != nullid]
1115 1115 wlock = self.wlock()
1116 1116 try:
1117 1117 for f in list:
1118 1118 if self.dirstate[f] != 'r':
1119 1119 self.ui.warn(_("%s not removed!\n") % f)
1120 1120 else:
1121 1121 m = f in manifests[0] and manifests[0] or manifests[1]
1122 1122 t = self.file(f).read(m[f])
1123 1123 self.wwrite(f, t, m.flags(f))
1124 1124 self.dirstate.normal(f)
1125 1125 finally:
1126 1126 wlock.release()
1127 1127
1128 1128 def copy(self, source, dest):
1129 1129 p = self.wjoin(dest)
1130 1130 if not (os.path.exists(p) or os.path.islink(p)):
1131 1131 self.ui.warn(_("%s does not exist!\n") % dest)
1132 1132 elif not (os.path.isfile(p) or os.path.islink(p)):
1133 1133 self.ui.warn(_("copy failed: %s is not a file or a "
1134 1134 "symbolic link\n") % dest)
1135 1135 else:
1136 1136 wlock = self.wlock()
1137 1137 try:
1138 1138 if self.dirstate[dest] in '?r':
1139 1139 self.dirstate.add(dest)
1140 1140 self.dirstate.copy(source, dest)
1141 1141 finally:
1142 1142 wlock.release()
1143 1143
1144 1144 def heads(self, start=None):
1145 1145 heads = self.changelog.heads(start)
1146 1146 # sort the output in rev descending order
1147 1147 heads = [(-self.changelog.rev(h), h) for h in heads]
1148 1148 return [n for (r, n) in sorted(heads)]
1149 1149
1150 1150 def branchheads(self, branch=None, start=None, closed=False):
1151 1151 '''return a (possibly filtered) list of heads for the given branch
1152 1152
1153 1153 Heads are returned in topological order, from newest to oldest.
1154 1154 If branch is None, use the dirstate branch.
1155 1155 If start is not None, return only heads reachable from start.
1156 1156 If closed is True, return heads that are marked as closed as well.
1157 1157 '''
1158 1158 if branch is None:
1159 1159 branch = self[None].branch()
1160 1160 branches = self.branchmap()
1161 1161 if branch not in branches:
1162 1162 return []
1163 1163 # the cache returns heads ordered lowest to highest
1164 1164 bheads = list(reversed(branches[branch]))
1165 1165 if start is not None:
1166 1166 # filter out the heads that cannot be reached from startrev
1167 1167 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1168 1168 bheads = [h for h in bheads if h in fbheads]
1169 1169 if not closed:
1170 1170 bheads = [h for h in bheads if
1171 1171 ('close' not in self.changelog.read(h)[5])]
1172 1172 return bheads
1173 1173
1174 1174 def branches(self, nodes):
1175 1175 if not nodes:
1176 1176 nodes = [self.changelog.tip()]
1177 1177 b = []
1178 1178 for n in nodes:
1179 1179 t = n
1180 1180 while 1:
1181 1181 p = self.changelog.parents(n)
1182 1182 if p[1] != nullid or p[0] == nullid:
1183 1183 b.append((t, n, p[0], p[1]))
1184 1184 break
1185 1185 n = p[0]
1186 1186 return b
1187 1187
1188 1188 def between(self, pairs):
1189 1189 r = []
1190 1190
1191 1191 for top, bottom in pairs:
1192 1192 n, l, i = top, [], 0
1193 1193 f = 1
1194 1194
1195 1195 while n != bottom and n != nullid:
1196 1196 p = self.changelog.parents(n)[0]
1197 1197 if i == f:
1198 1198 l.append(n)
1199 1199 f = f * 2
1200 1200 n = p
1201 1201 i += 1
1202 1202
1203 1203 r.append(l)
1204 1204
1205 1205 return r
1206 1206
1207 1207 def findincoming(self, remote, base=None, heads=None, force=False):
1208 1208 """Return list of roots of the subsets of missing nodes from remote
1209 1209
1210 1210 If base dict is specified, assume that these nodes and their parents
1211 1211 exist on the remote side and that no child of a node of base exists
1212 1212 in both remote and self.
1213 1213 Furthermore base will be updated to include the nodes that exists
1214 1214 in self and remote but no children exists in self and remote.
1215 1215 If a list of heads is specified, return only nodes which are heads
1216 1216 or ancestors of these heads.
1217 1217
1218 1218 All the ancestors of base are in self and in remote.
1219 1219 All the descendants of the list returned are missing in self.
1220 1220 (and so we know that the rest of the nodes are missing in remote, see
1221 1221 outgoing)
1222 1222 """
1223 1223 return self.findcommonincoming(remote, base, heads, force)[1]
1224 1224
1225 1225 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1226 1226 """Return a tuple (common, missing roots, heads) used to identify
1227 1227 missing nodes from remote.
1228 1228
1229 1229 If base dict is specified, assume that these nodes and their parents
1230 1230 exist on the remote side and that no child of a node of base exists
1231 1231 in both remote and self.
1232 1232 Furthermore base will be updated to include the nodes that exists
1233 1233 in self and remote but no children exists in self and remote.
1234 1234 If a list of heads is specified, return only nodes which are heads
1235 1235 or ancestors of these heads.
1236 1236
1237 1237 All the ancestors of base are in self and in remote.
1238 1238 """
1239 1239 m = self.changelog.nodemap
1240 1240 search = []
1241 1241 fetch = set()
1242 1242 seen = set()
1243 1243 seenbranch = set()
1244 1244 if base is None:
1245 1245 base = {}
1246 1246
1247 1247 if not heads:
1248 1248 heads = remote.heads()
1249 1249
1250 1250 if self.changelog.tip() == nullid:
1251 1251 base[nullid] = 1
1252 1252 if heads != [nullid]:
1253 1253 return [nullid], [nullid], list(heads)
1254 1254 return [nullid], [], []
1255 1255
1256 1256 # assume we're closer to the tip than the root
1257 1257 # and start by examining the heads
1258 1258 self.ui.status(_("searching for changes\n"))
1259 1259
1260 1260 unknown = []
1261 1261 for h in heads:
1262 1262 if h not in m:
1263 1263 unknown.append(h)
1264 1264 else:
1265 1265 base[h] = 1
1266 1266
1267 1267 heads = unknown
1268 1268 if not unknown:
1269 1269 return base.keys(), [], []
1270 1270
1271 1271 req = set(unknown)
1272 1272 reqcnt = 0
1273 1273
1274 1274 # search through remote branches
1275 1275 # a 'branch' here is a linear segment of history, with four parts:
1276 1276 # head, root, first parent, second parent
1277 1277 # (a branch always has two parents (or none) by definition)
1278 1278 unknown = remote.branches(unknown)
1279 1279 while unknown:
1280 1280 r = []
1281 1281 while unknown:
1282 1282 n = unknown.pop(0)
1283 1283 if n[0] in seen:
1284 1284 continue
1285 1285
1286 1286 self.ui.debug("examining %s:%s\n"
1287 1287 % (short(n[0]), short(n[1])))
1288 1288 if n[0] == nullid: # found the end of the branch
1289 1289 pass
1290 1290 elif n in seenbranch:
1291 1291 self.ui.debug("branch already found\n")
1292 1292 continue
1293 1293 elif n[1] and n[1] in m: # do we know the base?
1294 1294 self.ui.debug("found incomplete branch %s:%s\n"
1295 1295 % (short(n[0]), short(n[1])))
1296 1296 search.append(n[0:2]) # schedule branch range for scanning
1297 1297 seenbranch.add(n)
1298 1298 else:
1299 1299 if n[1] not in seen and n[1] not in fetch:
1300 1300 if n[2] in m and n[3] in m:
1301 1301 self.ui.debug("found new changeset %s\n" %
1302 1302 short(n[1]))
1303 1303 fetch.add(n[1]) # earliest unknown
1304 1304 for p in n[2:4]:
1305 1305 if p in m:
1306 1306 base[p] = 1 # latest known
1307 1307
1308 1308 for p in n[2:4]:
1309 1309 if p not in req and p not in m:
1310 1310 r.append(p)
1311 1311 req.add(p)
1312 1312 seen.add(n[0])
1313 1313
1314 1314 if r:
1315 1315 reqcnt += 1
1316 1316 self.ui.debug("request %d: %s\n" %
1317 1317 (reqcnt, " ".join(map(short, r))))
1318 1318 for p in xrange(0, len(r), 10):
1319 1319 for b in remote.branches(r[p:p+10]):
1320 1320 self.ui.debug("received %s:%s\n" %
1321 1321 (short(b[0]), short(b[1])))
1322 1322 unknown.append(b)
1323 1323
1324 1324 # do binary search on the branches we found
1325 1325 while search:
1326 1326 newsearch = []
1327 1327 reqcnt += 1
1328 1328 for n, l in zip(search, remote.between(search)):
1329 1329 l.append(n[1])
1330 1330 p = n[0]
1331 1331 f = 1
1332 1332 for i in l:
1333 1333 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1334 1334 if i in m:
1335 1335 if f <= 2:
1336 1336 self.ui.debug("found new branch changeset %s\n" %
1337 1337 short(p))
1338 1338 fetch.add(p)
1339 1339 base[i] = 1
1340 1340 else:
1341 1341 self.ui.debug("narrowed branch search to %s:%s\n"
1342 1342 % (short(p), short(i)))
1343 1343 newsearch.append((p, i))
1344 1344 break
1345 1345 p, f = i, f * 2
1346 1346 search = newsearch
1347 1347
1348 1348 # sanity check our fetch list
1349 1349 for f in fetch:
1350 1350 if f in m:
1351 1351 raise error.RepoError(_("already have changeset ")
1352 1352 + short(f[:4]))
1353 1353
1354 1354 if base.keys() == [nullid]:
1355 1355 if force:
1356 1356 self.ui.warn(_("warning: repository is unrelated\n"))
1357 1357 else:
1358 1358 raise util.Abort(_("repository is unrelated"))
1359 1359
1360 1360 self.ui.debug("found new changesets starting at " +
1361 1361 " ".join([short(f) for f in fetch]) + "\n")
1362 1362
1363 1363 self.ui.debug("%d total queries\n" % reqcnt)
1364 1364
1365 1365 return base.keys(), list(fetch), heads
1366 1366
1367 1367 def findoutgoing(self, remote, base=None, heads=None, force=False):
1368 1368 """Return list of nodes that are roots of subsets not in remote
1369 1369
1370 1370 If base dict is specified, assume that these nodes and their parents
1371 1371 exist on the remote side.
1372 1372 If a list of heads is specified, return only nodes which are heads
1373 1373 or ancestors of these heads, and return a second element which
1374 1374 contains all remote heads which get new children.
1375 1375 """
1376 1376 if base is None:
1377 1377 base = {}
1378 1378 self.findincoming(remote, base, heads, force=force)
1379 1379
1380 1380 self.ui.debug("common changesets up to "
1381 1381 + " ".join(map(short, base.keys())) + "\n")
1382 1382
1383 1383 remain = set(self.changelog.nodemap)
1384 1384
1385 1385 # prune everything remote has from the tree
1386 1386 remain.remove(nullid)
1387 1387 remove = base.keys()
1388 1388 while remove:
1389 1389 n = remove.pop(0)
1390 1390 if n in remain:
1391 1391 remain.remove(n)
1392 1392 for p in self.changelog.parents(n):
1393 1393 remove.append(p)
1394 1394
1395 1395 # find every node whose parents have been pruned
1396 1396 subset = []
1397 1397 # find every remote head that will get new children
1398 1398 updated_heads = set()
1399 1399 for n in remain:
1400 1400 p1, p2 = self.changelog.parents(n)
1401 1401 if p1 not in remain and p2 not in remain:
1402 1402 subset.append(n)
1403 1403 if heads:
1404 1404 if p1 in heads:
1405 1405 updated_heads.add(p1)
1406 1406 if p2 in heads:
1407 1407 updated_heads.add(p2)
1408 1408
1409 1409 # this is the set of all roots we have to push
1410 1410 if heads:
1411 1411 return subset, list(updated_heads)
1412 1412 else:
1413 1413 return subset
1414 1414
1415 1415 def pull(self, remote, heads=None, force=False):
1416 1416 lock = self.lock()
1417 1417 try:
1418 1418 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1419 1419 force=force)
1420 1420 if fetch == [nullid]:
1421 1421 self.ui.status(_("requesting all changes\n"))
1422 1422
1423 1423 if not fetch:
1424 1424 self.ui.status(_("no changes found\n"))
1425 1425 return 0
1426 1426
1427 1427 if heads is None and remote.capable('changegroupsubset'):
1428 1428 heads = rheads
1429 1429
1430 1430 if heads is None:
1431 1431 cg = remote.changegroup(fetch, 'pull')
1432 1432 else:
1433 1433 if not remote.capable('changegroupsubset'):
1434 1434 raise util.Abort(_("Partial pull cannot be done because "
1435 1435 "other repository doesn't support "
1436 1436 "changegroupsubset."))
1437 1437 cg = remote.changegroupsubset(fetch, heads, 'pull')
1438 1438 return self.addchangegroup(cg, 'pull', remote.url())
1439 1439 finally:
1440 1440 lock.release()
1441 1441
1442 1442 def push(self, remote, force=False, revs=None):
1443 1443 # there are two ways to push to remote repo:
1444 1444 #
1445 1445 # addchangegroup assumes local user can lock remote
1446 1446 # repo (local filesystem, old ssh servers).
1447 1447 #
1448 1448 # unbundle assumes local user cannot lock remote repo (new ssh
1449 1449 # servers, http servers).
1450 1450
1451 1451 if remote.capable('unbundle'):
1452 1452 return self.push_unbundle(remote, force, revs)
1453 1453 return self.push_addchangegroup(remote, force, revs)
1454 1454
1455 1455 def prepush(self, remote, force, revs):
1456 1456 '''Analyze the local and remote repositories and determine which
1457 1457 changesets need to be pushed to the remote. Return a tuple
1458 1458 (changegroup, remoteheads). changegroup is a readable file-like
1459 1459 object whose read() returns successive changegroup chunks ready to
1460 1460 be sent over the wire. remoteheads is the list of remote heads.
1461 1461 '''
1462 1462 common = {}
1463 1463 remote_heads = remote.heads()
1464 1464 inc = self.findincoming(remote, common, remote_heads, force=force)
1465 1465
1466 1466 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1467 1467 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1468 1468
1469 1469 def checkbranch(lheads, rheads, updatelb):
1470 1470 '''
1471 1471 check whether there are more local heads than remote heads on
1472 1472 a specific branch.
1473 1473
1474 1474 lheads: local branch heads
1475 1475 rheads: remote branch heads
1476 1476 updatelb: outgoing local branch bases
1477 1477 '''
1478 1478
1479 1479 warn = 0
1480 1480
1481 1481 if not revs and len(lheads) > len(rheads):
1482 1482 warn = 1
1483 1483 else:
1484 1484 # add local heads involved in the push
1485 1485 updatelheads = [self.changelog.heads(x, lheads)
1486 1486 for x in updatelb]
1487 1487 newheads = set(sum(updatelheads, [])) & set(lheads)
1488 1488
1489 1489 if not newheads:
1490 1490 return True
1491 1491
1492 1492 # add heads we don't have or that are not involved in the push
1493 1493 for r in rheads:
1494 1494 if r in self.changelog.nodemap:
1495 1495 desc = self.changelog.heads(r, heads)
1496 1496 l = [h for h in heads if h in desc]
1497 1497 if not l:
1498 1498 newheads.add(r)
1499 1499 else:
1500 1500 newheads.add(r)
1501 1501 if len(newheads) > len(rheads):
1502 1502 warn = 1
1503 1503
1504 1504 if warn:
1505 1505 if not rheads: # new branch requires --force
1506 1506 self.ui.warn(_("abort: push creates new"
1507 1507 " remote branch '%s'!\n") %
1508 1508 self[updatelb[0]].branch())
1509 1509 else:
1510 1510 self.ui.warn(_("abort: push creates new remote heads!\n"))
1511 1511
1512 1512 self.ui.status(_("(did you forget to merge?"
1513 1513 " use push -f to force)\n"))
1514 1514 return False
1515 1515 return True
1516 1516
1517 1517 if not bases:
1518 1518 self.ui.status(_("no changes found\n"))
1519 1519 return None, 1
1520 1520 elif not force:
1521 1521 # Check for each named branch if we're creating new remote heads.
1522 1522 # To be a remote head after push, node must be either:
1523 1523 # - unknown locally
1524 1524 # - a local outgoing head descended from update
1525 1525 # - a remote head that's known locally and not
1526 1526 # ancestral to an outgoing head
1527 1527 #
1528 1528 # New named branches cannot be created without --force.
1529 1529
1530 1530 if remote_heads != [nullid]:
1531 1531 if remote.capable('branchmap'):
1532 1532 localhds = {}
1533 1533 if not revs:
1534 1534 localhds = self.branchmap()
1535 1535 else:
1536 1536 for n in heads:
1537 1537 branch = self[n].branch()
1538 1538 if branch in localhds:
1539 1539 localhds[branch].append(n)
1540 1540 else:
1541 1541 localhds[branch] = [n]
1542 1542
1543 1543 remotehds = remote.branchmap()
1544 1544
1545 1545 for lh in localhds:
1546 1546 if lh in remotehds:
1547 1547 rheads = remotehds[lh]
1548 1548 else:
1549 1549 rheads = []
1550 1550 lheads = localhds[lh]
1551 1551 updatelb = [upd for upd in update
1552 1552 if self[upd].branch() == lh]
1553 1553 if not updatelb:
1554 1554 continue
1555 1555 if not checkbranch(lheads, rheads, updatelb):
1556 1556 return None, 0
1557 1557 else:
1558 1558 if not checkbranch(heads, remote_heads, update):
1559 1559 return None, 0
1560 1560
1561 1561 if inc:
1562 1562 self.ui.warn(_("note: unsynced remote changes!\n"))
1563 1563
1564 1564
1565 1565 if revs is None:
1566 1566 # use the fast path, no race possible on push
1567 cg = self._changegroup(common.keys(), 'push')
1567 nodes = self.changelog.findmissing(common.keys())
1568 cg = self._changegroup(nodes, 'push')
1568 1569 else:
1569 1570 cg = self.changegroupsubset(update, revs, 'push')
1570 1571 return cg, remote_heads
1571 1572
1572 1573 def push_addchangegroup(self, remote, force, revs):
1573 1574 lock = remote.lock()
1574 1575 try:
1575 1576 ret = self.prepush(remote, force, revs)
1576 1577 if ret[0] is not None:
1577 1578 cg, remote_heads = ret
1578 1579 return remote.addchangegroup(cg, 'push', self.url())
1579 1580 return ret[1]
1580 1581 finally:
1581 1582 lock.release()
1582 1583
1583 1584 def push_unbundle(self, remote, force, revs):
1584 1585 # local repo finds heads on server, finds out what revs it
1585 1586 # must push. once revs transferred, if server finds it has
1586 1587 # different heads (someone else won commit/push race), server
1587 1588 # aborts.
1588 1589
1589 1590 ret = self.prepush(remote, force, revs)
1590 1591 if ret[0] is not None:
1591 1592 cg, remote_heads = ret
1592 1593 if force: remote_heads = ['force']
1593 1594 return remote.unbundle(cg, remote_heads, 'push')
1594 1595 return ret[1]
1595 1596
1596 1597 def changegroupinfo(self, nodes, source):
1597 1598 if self.ui.verbose or source == 'bundle':
1598 1599 self.ui.status(_("%d changesets found\n") % len(nodes))
1599 1600 if self.ui.debugflag:
1600 1601 self.ui.debug("list of changesets:\n")
1601 1602 for node in nodes:
1602 1603 self.ui.debug("%s\n" % hex(node))
1603 1604
1604 1605 def changegroupsubset(self, bases, heads, source, extranodes=None):
1605 1606 """Compute a changegroup consisting of all the nodes that are
1606 1607 descendents of any of the bases and ancestors of any of the heads.
1607 1608 Return a chunkbuffer object whose read() method will return
1608 1609 successive changegroup chunks.
1609 1610
1610 1611 It is fairly complex as determining which filenodes and which
1611 1612 manifest nodes need to be included for the changeset to be complete
1612 1613 is non-trivial.
1613 1614
1614 1615 Another wrinkle is doing the reverse, figuring out which changeset in
1615 1616 the changegroup a particular filenode or manifestnode belongs to.
1616 1617
1617 1618 The caller can specify some nodes that must be included in the
1618 1619 changegroup using the extranodes argument. It should be a dict
1619 1620 where the keys are the filenames (or 1 for the manifest), and the
1620 1621 values are lists of (node, linknode) tuples, where node is a wanted
1621 1622 node and linknode is the changelog node that should be transmitted as
1622 1623 the linkrev.
1623 1624 """
1624 1625
1626 # Set up some initial variables
1627 # Make it easy to refer to self.changelog
1628 cl = self.changelog
1629 # msng is short for missing - compute the list of changesets in this
1630 # changegroup.
1631 if not bases:
1632 bases = [nullid]
1633 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1634
1625 1635 if extranodes is None:
1626 1636 # can we go through the fast path ?
1627 1637 heads.sort()
1628 1638 allheads = self.heads()
1629 1639 allheads.sort()
1630 1640 if heads == allheads:
1631 common = []
1632 # parents of bases are known from both sides
1633 for n in bases:
1634 for p in self.changelog.parents(n):
1635 if p != nullid:
1636 common.append(p)
1637 return self._changegroup(common, source)
1641 return self._changegroup(msng_cl_lst, source)
1638 1642
1643 # slow path
1639 1644 self.hook('preoutgoing', throw=True, source=source)
1640 1645
1641 # Set up some initial variables
1642 # Make it easy to refer to self.changelog
1643 cl = self.changelog
1644 # msng is short for missing - compute the list of changesets in this
1645 # changegroup.
1646 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1647 1646 self.changegroupinfo(msng_cl_lst, source)
1648 1647 # Some bases may turn out to be superfluous, and some heads may be
1649 1648 # too. nodesbetween will return the minimal set of bases and heads
1650 1649 # necessary to re-create the changegroup.
1651 1650
1652 1651 # Known heads are the list of heads that it is assumed the recipient
1653 1652 # of this changegroup will know about.
1654 1653 knownheads = set()
1655 1654 # We assume that all parents of bases are known heads.
1656 1655 for n in bases:
1657 1656 knownheads.update(cl.parents(n))
1658 1657 knownheads.discard(nullid)
1659 1658 knownheads = list(knownheads)
1660 1659 if knownheads:
1661 1660 # Now that we know what heads are known, we can compute which
1662 1661 # changesets are known. The recipient must know about all
1663 1662 # changesets required to reach the known heads from the null
1664 1663 # changeset.
1665 1664 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1666 1665 junk = None
1667 1666 # Transform the list into a set.
1668 1667 has_cl_set = set(has_cl_set)
1669 1668 else:
1670 1669 # If there were no known heads, the recipient cannot be assumed to
1671 1670 # know about any changesets.
1672 1671 has_cl_set = set()
1673 1672
1674 1673 # Make it easy to refer to self.manifest
1675 1674 mnfst = self.manifest
1676 1675 # We don't know which manifests are missing yet
1677 1676 msng_mnfst_set = {}
1678 1677 # Nor do we know which filenodes are missing.
1679 1678 msng_filenode_set = {}
1680 1679
1681 1680 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1682 1681 junk = None
1683 1682
1684 1683 # A changeset always belongs to itself, so the changenode lookup
1685 1684 # function for a changenode is identity.
1686 1685 def identity(x):
1687 1686 return x
1688 1687
1689 1688 # If we determine that a particular file or manifest node must be a
1690 1689 # node that the recipient of the changegroup will already have, we can
1691 1690 # also assume the recipient will have all the parents. This function
1692 1691 # prunes them from the set of missing nodes.
1693 1692 def prune_parents(revlog, hasset, msngset):
1694 1693 haslst = list(hasset)
1695 1694 haslst.sort(key=revlog.rev)
1696 1695 for node in haslst:
1697 1696 parentlst = [p for p in revlog.parents(node) if p != nullid]
1698 1697 while parentlst:
1699 1698 n = parentlst.pop()
1700 1699 if n not in hasset:
1701 1700 hasset.add(n)
1702 1701 p = [p for p in revlog.parents(n) if p != nullid]
1703 1702 parentlst.extend(p)
1704 1703 for n in hasset:
1705 1704 msngset.pop(n, None)
1706 1705
1707 1706 # This is a function generating function used to set up an environment
1708 1707 # for the inner function to execute in.
1709 1708 def manifest_and_file_collector(changedfileset):
1710 1709 # This is an information gathering function that gathers
1711 1710 # information from each changeset node that goes out as part of
1712 1711 # the changegroup. The information gathered is a list of which
1713 1712 # manifest nodes are potentially required (the recipient may
1714 1713 # already have them) and total list of all files which were
1715 1714 # changed in any changeset in the changegroup.
1716 1715 #
1717 1716 # We also remember the first changenode we saw any manifest
1718 1717 # referenced by so we can later determine which changenode 'owns'
1719 1718 # the manifest.
1720 1719 def collect_manifests_and_files(clnode):
1721 1720 c = cl.read(clnode)
1722 1721 for f in c[3]:
1723 1722 # This is to make sure we only have one instance of each
1724 1723 # filename string for each filename.
1725 1724 changedfileset.setdefault(f, f)
1726 1725 msng_mnfst_set.setdefault(c[0], clnode)
1727 1726 return collect_manifests_and_files
1728 1727
1729 1728 # Figure out which manifest nodes (of the ones we think might be part
1730 1729 # of the changegroup) the recipient must know about and remove them
1731 1730 # from the changegroup.
1732 1731 def prune_manifests():
1733 1732 has_mnfst_set = set()
1734 1733 for n in msng_mnfst_set:
1735 1734 # If a 'missing' manifest thinks it belongs to a changenode
1736 1735 # the recipient is assumed to have, obviously the recipient
1737 1736 # must have that manifest.
1738 1737 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1739 1738 if linknode in has_cl_set:
1740 1739 has_mnfst_set.add(n)
1741 1740 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1742 1741
1743 1742 # Use the information collected in collect_manifests_and_files to say
1744 1743 # which changenode any manifestnode belongs to.
1745 1744 def lookup_manifest_link(mnfstnode):
1746 1745 return msng_mnfst_set[mnfstnode]
1747 1746
1748 1747 # A function generating function that sets up the initial environment
1749 1748 # the inner function.
1750 1749 def filenode_collector(changedfiles):
1751 1750 next_rev = [0]
1752 1751 # This gathers information from each manifestnode included in the
1753 1752 # changegroup about which filenodes the manifest node references
1754 1753 # so we can include those in the changegroup too.
1755 1754 #
1756 1755 # It also remembers which changenode each filenode belongs to. It
1757 1756 # does this by assuming the a filenode belongs to the changenode
1758 1757 # the first manifest that references it belongs to.
1759 1758 def collect_msng_filenodes(mnfstnode):
1760 1759 r = mnfst.rev(mnfstnode)
1761 1760 if r == next_rev[0]:
1762 1761 # If the last rev we looked at was the one just previous,
1763 1762 # we only need to see a diff.
1764 1763 deltamf = mnfst.readdelta(mnfstnode)
1765 1764 # For each line in the delta
1766 1765 for f, fnode in deltamf.iteritems():
1767 1766 f = changedfiles.get(f, None)
1768 1767 # And if the file is in the list of files we care
1769 1768 # about.
1770 1769 if f is not None:
1771 1770 # Get the changenode this manifest belongs to
1772 1771 clnode = msng_mnfst_set[mnfstnode]
1773 1772 # Create the set of filenodes for the file if
1774 1773 # there isn't one already.
1775 1774 ndset = msng_filenode_set.setdefault(f, {})
1776 1775 # And set the filenode's changelog node to the
1777 1776 # manifest's if it hasn't been set already.
1778 1777 ndset.setdefault(fnode, clnode)
1779 1778 else:
1780 1779 # Otherwise we need a full manifest.
1781 1780 m = mnfst.read(mnfstnode)
1782 1781 # For every file in we care about.
1783 1782 for f in changedfiles:
1784 1783 fnode = m.get(f, None)
1785 1784 # If it's in the manifest
1786 1785 if fnode is not None:
1787 1786 # See comments above.
1788 1787 clnode = msng_mnfst_set[mnfstnode]
1789 1788 ndset = msng_filenode_set.setdefault(f, {})
1790 1789 ndset.setdefault(fnode, clnode)
1791 1790 # Remember the revision we hope to see next.
1792 1791 next_rev[0] = r + 1
1793 1792 return collect_msng_filenodes
1794 1793
1795 1794 # We have a list of filenodes we think we need for a file, lets remove
1796 1795 # all those we know the recipient must have.
1797 1796 def prune_filenodes(f, filerevlog):
1798 1797 msngset = msng_filenode_set[f]
1799 1798 hasset = set()
1800 1799 # If a 'missing' filenode thinks it belongs to a changenode we
1801 1800 # assume the recipient must have, then the recipient must have
1802 1801 # that filenode.
1803 1802 for n in msngset:
1804 1803 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1805 1804 if clnode in has_cl_set:
1806 1805 hasset.add(n)
1807 1806 prune_parents(filerevlog, hasset, msngset)
1808 1807
1809 1808 # A function generator function that sets up the a context for the
1810 1809 # inner function.
1811 1810 def lookup_filenode_link_func(fname):
1812 1811 msngset = msng_filenode_set[fname]
1813 1812 # Lookup the changenode the filenode belongs to.
1814 1813 def lookup_filenode_link(fnode):
1815 1814 return msngset[fnode]
1816 1815 return lookup_filenode_link
1817 1816
1818 1817 # Add the nodes that were explicitly requested.
1819 1818 def add_extra_nodes(name, nodes):
1820 1819 if not extranodes or name not in extranodes:
1821 1820 return
1822 1821
1823 1822 for node, linknode in extranodes[name]:
1824 1823 if node not in nodes:
1825 1824 nodes[node] = linknode
1826 1825
1827 1826 # Now that we have all theses utility functions to help out and
1828 1827 # logically divide up the task, generate the group.
1829 1828 def gengroup():
1830 1829 # The set of changed files starts empty.
1831 1830 changedfiles = {}
1832 1831 # Create a changenode group generator that will call our functions
1833 1832 # back to lookup the owning changenode and collect information.
1834 1833 group = cl.group(msng_cl_lst, identity,
1835 1834 manifest_and_file_collector(changedfiles))
1836 1835 for chnk in group:
1837 1836 yield chnk
1838 1837
1839 1838 # The list of manifests has been collected by the generator
1840 1839 # calling our functions back.
1841 1840 prune_manifests()
1842 1841 add_extra_nodes(1, msng_mnfst_set)
1843 1842 msng_mnfst_lst = msng_mnfst_set.keys()
1844 1843 # Sort the manifestnodes by revision number.
1845 1844 msng_mnfst_lst.sort(key=mnfst.rev)
1846 1845 # Create a generator for the manifestnodes that calls our lookup
1847 1846 # and data collection functions back.
1848 1847 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1849 1848 filenode_collector(changedfiles))
1850 1849 for chnk in group:
1851 1850 yield chnk
1852 1851
1853 1852 # These are no longer needed, dereference and toss the memory for
1854 1853 # them.
1855 1854 msng_mnfst_lst = None
1856 1855 msng_mnfst_set.clear()
1857 1856
1858 1857 if extranodes:
1859 1858 for fname in extranodes:
1860 1859 if isinstance(fname, int):
1861 1860 continue
1862 1861 msng_filenode_set.setdefault(fname, {})
1863 1862 changedfiles[fname] = 1
1864 1863 # Go through all our files in order sorted by name.
1865 1864 for fname in sorted(changedfiles):
1866 1865 filerevlog = self.file(fname)
1867 1866 if not len(filerevlog):
1868 1867 raise util.Abort(_("empty or missing revlog for %s") % fname)
1869 1868 # Toss out the filenodes that the recipient isn't really
1870 1869 # missing.
1871 1870 if fname in msng_filenode_set:
1872 1871 prune_filenodes(fname, filerevlog)
1873 1872 add_extra_nodes(fname, msng_filenode_set[fname])
1874 1873 msng_filenode_lst = msng_filenode_set[fname].keys()
1875 1874 else:
1876 1875 msng_filenode_lst = []
1877 1876 # If any filenodes are left, generate the group for them,
1878 1877 # otherwise don't bother.
1879 1878 if len(msng_filenode_lst) > 0:
1880 1879 yield changegroup.chunkheader(len(fname))
1881 1880 yield fname
1882 1881 # Sort the filenodes by their revision #
1883 1882 msng_filenode_lst.sort(key=filerevlog.rev)
1884 1883 # Create a group generator and only pass in a changenode
1885 1884 # lookup function as we need to collect no information
1886 1885 # from filenodes.
1887 1886 group = filerevlog.group(msng_filenode_lst,
1888 1887 lookup_filenode_link_func(fname))
1889 1888 for chnk in group:
1890 1889 yield chnk
1891 1890 if fname in msng_filenode_set:
1892 1891 # Don't need this anymore, toss it to free memory.
1893 1892 del msng_filenode_set[fname]
1894 1893 # Signal that no more groups are left.
1895 1894 yield changegroup.closechunk()
1896 1895
1897 1896 if msng_cl_lst:
1898 1897 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1899 1898
1900 1899 return util.chunkbuffer(gengroup())
1901 1900
1902 1901 def changegroup(self, basenodes, source):
1903 1902 # to avoid a race we use changegroupsubset() (issue1320)
1904 1903 return self.changegroupsubset(basenodes, self.heads(), source)
1905 1904
1906 def _changegroup(self, common, source):
1905 def _changegroup(self, nodes, source):
1907 1906 """Compute the changegroup of all nodes that we have that a recipient
1908 1907 doesn't. Return a chunkbuffer object whose read() method will return
1909 1908 successive changegroup chunks.
1910 1909
1911 1910 This is much easier than the previous function as we can assume that
1912 1911 the recipient has any changenode we aren't sending them.
1913 1912
1914 common is the set of common nodes between remote and self"""
1913 nodes is the set of nodes to send"""
1915 1914
1916 1915 self.hook('preoutgoing', throw=True, source=source)
1917 1916
1918 1917 cl = self.changelog
1919 nodes = cl.findmissing(common)
1920 1918 revset = set([cl.rev(n) for n in nodes])
1921 1919 self.changegroupinfo(nodes, source)
1922 1920
1923 1921 def identity(x):
1924 1922 return x
1925 1923
1926 1924 def gennodelst(log):
1927 1925 for r in log:
1928 1926 if log.linkrev(r) in revset:
1929 1927 yield log.node(r)
1930 1928
1931 1929 def changed_file_collector(changedfileset):
1932 1930 def collect_changed_files(clnode):
1933 1931 c = cl.read(clnode)
1934 1932 changedfileset.update(c[3])
1935 1933 return collect_changed_files
1936 1934
1937 1935 def lookuprevlink_func(revlog):
1938 1936 def lookuprevlink(n):
1939 1937 return cl.node(revlog.linkrev(revlog.rev(n)))
1940 1938 return lookuprevlink
1941 1939
1942 1940 def gengroup():
1943 1941 '''yield a sequence of changegroup chunks (strings)'''
1944 1942 # construct a list of all changed files
1945 1943 changedfiles = set()
1946 1944
1947 1945 for chnk in cl.group(nodes, identity,
1948 1946 changed_file_collector(changedfiles)):
1949 1947 yield chnk
1950 1948
1951 1949 mnfst = self.manifest
1952 1950 nodeiter = gennodelst(mnfst)
1953 1951 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1954 1952 yield chnk
1955 1953
1956 1954 for fname in sorted(changedfiles):
1957 1955 filerevlog = self.file(fname)
1958 1956 if not len(filerevlog):
1959 1957 raise util.Abort(_("empty or missing revlog for %s") % fname)
1960 1958 nodeiter = gennodelst(filerevlog)
1961 1959 nodeiter = list(nodeiter)
1962 1960 if nodeiter:
1963 1961 yield changegroup.chunkheader(len(fname))
1964 1962 yield fname
1965 1963 lookup = lookuprevlink_func(filerevlog)
1966 1964 for chnk in filerevlog.group(nodeiter, lookup):
1967 1965 yield chnk
1968 1966
1969 1967 yield changegroup.closechunk()
1970 1968
1971 1969 if nodes:
1972 1970 self.hook('outgoing', node=hex(nodes[0]), source=source)
1973 1971
1974 1972 return util.chunkbuffer(gengroup())
1975 1973
1976 1974 def addchangegroup(self, source, srctype, url, emptyok=False):
1977 1975 """add changegroup to repo.
1978 1976
1979 1977 return values:
1980 1978 - nothing changed or no source: 0
1981 1979 - more heads than before: 1+added heads (2..n)
1982 1980 - less heads than before: -1-removed heads (-2..-n)
1983 1981 - number of heads stays the same: 1
1984 1982 """
1985 1983 def csmap(x):
1986 1984 self.ui.debug("add changeset %s\n" % short(x))
1987 1985 return len(cl)
1988 1986
1989 1987 def revmap(x):
1990 1988 return cl.rev(x)
1991 1989
1992 1990 if not source:
1993 1991 return 0
1994 1992
1995 1993 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1996 1994
1997 1995 changesets = files = revisions = 0
1998 1996
1999 1997 # write changelog data to temp files so concurrent readers will not see
2000 1998 # inconsistent view
2001 1999 cl = self.changelog
2002 2000 cl.delayupdate()
2003 2001 oldheads = len(cl.heads())
2004 2002
2005 2003 tr = self.transaction()
2006 2004 try:
2007 2005 trp = weakref.proxy(tr)
2008 2006 # pull off the changeset group
2009 2007 self.ui.status(_("adding changesets\n"))
2010 2008 clstart = len(cl)
2011 2009 chunkiter = changegroup.chunkiter(source)
2012 2010 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2013 2011 raise util.Abort(_("received changelog group is empty"))
2014 2012 clend = len(cl)
2015 2013 changesets = clend - clstart
2016 2014
2017 2015 # pull off the manifest group
2018 2016 self.ui.status(_("adding manifests\n"))
2019 2017 chunkiter = changegroup.chunkiter(source)
2020 2018 # no need to check for empty manifest group here:
2021 2019 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2022 2020 # no new manifest will be created and the manifest group will
2023 2021 # be empty during the pull
2024 2022 self.manifest.addgroup(chunkiter, revmap, trp)
2025 2023
2026 2024 # process the files
2027 2025 self.ui.status(_("adding file changes\n"))
2028 2026 while 1:
2029 2027 f = changegroup.getchunk(source)
2030 2028 if not f:
2031 2029 break
2032 2030 self.ui.debug("adding %s revisions\n" % f)
2033 2031 fl = self.file(f)
2034 2032 o = len(fl)
2035 2033 chunkiter = changegroup.chunkiter(source)
2036 2034 if fl.addgroup(chunkiter, revmap, trp) is None:
2037 2035 raise util.Abort(_("received file revlog group is empty"))
2038 2036 revisions += len(fl) - o
2039 2037 files += 1
2040 2038
2041 2039 newheads = len(cl.heads())
2042 2040 heads = ""
2043 2041 if oldheads and newheads != oldheads:
2044 2042 heads = _(" (%+d heads)") % (newheads - oldheads)
2045 2043
2046 2044 self.ui.status(_("added %d changesets"
2047 2045 " with %d changes to %d files%s\n")
2048 2046 % (changesets, revisions, files, heads))
2049 2047
2050 2048 if changesets > 0:
2051 2049 p = lambda: cl.writepending() and self.root or ""
2052 2050 self.hook('pretxnchangegroup', throw=True,
2053 2051 node=hex(cl.node(clstart)), source=srctype,
2054 2052 url=url, pending=p)
2055 2053
2056 2054 # make changelog see real files again
2057 2055 cl.finalize(trp)
2058 2056
2059 2057 tr.close()
2060 2058 finally:
2061 2059 del tr
2062 2060
2063 2061 if changesets > 0:
2064 2062 # forcefully update the on-disk branch cache
2065 2063 self.ui.debug("updating the branch cache\n")
2066 2064 self.branchtags()
2067 2065 self.hook("changegroup", node=hex(cl.node(clstart)),
2068 2066 source=srctype, url=url)
2069 2067
2070 2068 for i in xrange(clstart, clend):
2071 2069 self.hook("incoming", node=hex(cl.node(i)),
2072 2070 source=srctype, url=url)
2073 2071
2074 2072 # never return 0 here:
2075 2073 if newheads < oldheads:
2076 2074 return newheads - oldheads - 1
2077 2075 else:
2078 2076 return newheads - oldheads + 1
2079 2077
2080 2078
2081 2079 def stream_in(self, remote):
2082 2080 fp = remote.stream_out()
2083 2081 l = fp.readline()
2084 2082 try:
2085 2083 resp = int(l)
2086 2084 except ValueError:
2087 2085 raise error.ResponseError(
2088 2086 _('Unexpected response from remote server:'), l)
2089 2087 if resp == 1:
2090 2088 raise util.Abort(_('operation forbidden by server'))
2091 2089 elif resp == 2:
2092 2090 raise util.Abort(_('locking the remote repository failed'))
2093 2091 elif resp != 0:
2094 2092 raise util.Abort(_('the server sent an unknown error code'))
2095 2093 self.ui.status(_('streaming all changes\n'))
2096 2094 l = fp.readline()
2097 2095 try:
2098 2096 total_files, total_bytes = map(int, l.split(' ', 1))
2099 2097 except (ValueError, TypeError):
2100 2098 raise error.ResponseError(
2101 2099 _('Unexpected response from remote server:'), l)
2102 2100 self.ui.status(_('%d files to transfer, %s of data\n') %
2103 2101 (total_files, util.bytecount(total_bytes)))
2104 2102 start = time.time()
2105 2103 for i in xrange(total_files):
2106 2104 # XXX doesn't support '\n' or '\r' in filenames
2107 2105 l = fp.readline()
2108 2106 try:
2109 2107 name, size = l.split('\0', 1)
2110 2108 size = int(size)
2111 2109 except (ValueError, TypeError):
2112 2110 raise error.ResponseError(
2113 2111 _('Unexpected response from remote server:'), l)
2114 2112 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2115 2113 # for backwards compat, name was partially encoded
2116 2114 ofp = self.sopener(store.decodedir(name), 'w')
2117 2115 for chunk in util.filechunkiter(fp, limit=size):
2118 2116 ofp.write(chunk)
2119 2117 ofp.close()
2120 2118 elapsed = time.time() - start
2121 2119 if elapsed <= 0:
2122 2120 elapsed = 0.001
2123 2121 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2124 2122 (util.bytecount(total_bytes), elapsed,
2125 2123 util.bytecount(total_bytes / elapsed)))
2126 2124 self.invalidate()
2127 2125 return len(self.heads()) + 1
2128 2126
2129 2127 def clone(self, remote, heads=[], stream=False):
2130 2128 '''clone remote repository.
2131 2129
2132 2130 keyword arguments:
2133 2131 heads: list of revs to clone (forces use of pull)
2134 2132 stream: use streaming clone if possible'''
2135 2133
2136 2134 # now, all clients that can request uncompressed clones can
2137 2135 # read repo formats supported by all servers that can serve
2138 2136 # them.
2139 2137
2140 2138 # if revlog format changes, client will have to check version
2141 2139 # and format flags on "stream" capability, and use
2142 2140 # uncompressed only if compatible.
2143 2141
2144 2142 if stream and not heads and remote.capable('stream'):
2145 2143 return self.stream_in(remote)
2146 2144 return self.pull(remote, heads)
2147 2145
2148 2146 # used to avoid circular references so destructors work
2149 2147 def aftertrans(files):
2150 2148 renamefiles = [tuple(t) for t in files]
2151 2149 def a():
2152 2150 for src, dest in renamefiles:
2153 2151 util.rename(src, dest)
2154 2152 return a
2155 2153
2156 2154 def instance(ui, path, create):
2157 2155 return localrepository(ui, util.drop_scheme('file', path), create)
2158 2156
2159 2157 def islocal(path):
2160 2158 return True
@@ -1,149 +1,168
1 1 #!/bin/sh
2 2
3 3 cp "$TESTDIR"/printenv.py .
4 4
5 5 echo "====== Setting up test"
6 6 hg init test
7 7 cd test
8 8 echo 0 > afile
9 9 hg add afile
10 10 hg commit -m "0.0" -d "1000000 0"
11 11 echo 1 >> afile
12 12 hg commit -m "0.1" -d "1000000 0"
13 13 echo 2 >> afile
14 14 hg commit -m "0.2" -d "1000000 0"
15 15 echo 3 >> afile
16 16 hg commit -m "0.3" -d "1000000 0"
17 17 hg update -C 0
18 18 echo 1 >> afile
19 19 hg commit -m "1.1" -d "1000000 0"
20 20 echo 2 >> afile
21 21 hg commit -m "1.2" -d "1000000 0"
22 22 echo "a line" > fred
23 23 echo 3 >> afile
24 24 hg add fred
25 25 hg commit -m "1.3" -d "1000000 0"
26 26 hg mv afile adifferentfile
27 27 hg commit -m "1.3m" -d "1000000 0"
28 28 hg update -C 3
29 29 hg mv afile anotherfile
30 30 hg commit -m "0.3m" -d "1000000 0"
31 31 hg verify
32 32 cd ..
33 33 hg init empty
34 34
35 35 echo "====== Bundle --all"
36 36 hg -R test bundle --all all.hg
37 37
38 38 echo "====== Bundle test to full.hg"
39 39 hg -R test bundle full.hg empty
40 40 echo "====== Unbundle full.hg in test"
41 41 hg -R test unbundle full.hg
42 42 echo "====== Verify empty"
43 43 hg -R empty heads
44 44 hg -R empty verify
45 45
46 46 echo "====== Pull full.hg into test (using --cwd)"
47 47 hg --cwd test pull ../full.hg
48 48 echo "====== Pull full.hg into empty (using --cwd)"
49 49 hg --cwd empty pull ../full.hg
50 50 echo "====== Rollback empty"
51 51 hg -R empty rollback
52 52 echo "====== Pull full.hg into empty again (using --cwd)"
53 53 hg --cwd empty pull ../full.hg
54 54
55 55 echo "====== Pull full.hg into test (using -R)"
56 56 hg -R test pull full.hg
57 57 echo "====== Pull full.hg into empty (using -R)"
58 58 hg -R empty pull full.hg
59 59 echo "====== Rollback empty"
60 60 hg -R empty rollback
61 61 echo "====== Pull full.hg into empty again (using -R)"
62 62 hg -R empty pull full.hg
63 63
64 64 echo "====== Log -R full.hg in fresh empty"
65 65 rm -r empty
66 66 hg init empty
67 67 cd empty
68 68 hg -R bundle://../full.hg log
69 69
70 70 echo "====== Pull ../full.hg into empty (with hook)"
71 71 echo '[hooks]' >> .hg/hgrc
72 72 echo 'changegroup = python ../printenv.py changegroup' >> .hg/hgrc
73 73 #doesn't work (yet ?)
74 74 #hg -R bundle://../full.hg verify
75 75 hg pull bundle://../full.hg
76 76 echo "====== Rollback empty"
77 77 hg rollback
78 78 cd ..
79 79 echo "====== Log -R bundle:empty+full.hg"
80 80 hg -R bundle:empty+full.hg log --template="{rev} "
81 81 echo ""
82 82 echo "====== Pull full.hg into empty again (using -R; with hook)"
83 83 hg -R empty pull full.hg
84 84
85 85 echo "====== Create partial clones"
86 86 rm -r empty
87 87 hg init empty
88 88 hg clone -r 3 test partial
89 89 hg clone partial partial2
90 90 cd partial
91 91 echo "====== Log -R full.hg in partial"
92 92 hg -R bundle://../full.hg log
93 93 echo "====== Incoming full.hg in partial"
94 94 hg incoming bundle://../full.hg
95 95 echo "====== Outgoing -R full.hg vs partial2 in partial"
96 96 hg -R bundle://../full.hg outgoing ../partial2
97 97 echo "====== Outgoing -R does-not-exist.hg vs partial2 in partial"
98 98 hg -R bundle://../does-not-exist.hg outgoing ../partial2
99 99 cd ..
100 100
101 101 echo "====== Direct clone from bundle (all-history)"
102 102 hg clone full.hg full-clone
103 103 hg -R full-clone heads
104 104 rm -r full-clone
105 105
106 106 # test for http://mercurial.selenic.com/bts/issue216
107 107 echo "====== Unbundle incremental bundles into fresh empty in one go"
108 108 rm -r empty
109 109 hg init empty
110 110 hg -R test bundle --base null -r 0 ../0.hg
111 111 hg -R test bundle --base 0 -r 1 ../1.hg
112 112 hg -R empty unbundle -u ../0.hg ../1.hg
113 113
114 114 # test for 540d1059c802
115 115 echo "====== test for 540d1059c802"
116 116 hg init orig
117 117 cd orig
118 118 echo foo > foo
119 119 hg add foo
120 120 hg ci -m 'add foo'
121 121
122 122 hg clone . ../copy
123 123 hg tag foo
124 124
125 125 cd ../copy
126 126 echo >> foo
127 127 hg ci -m 'change foo'
128 128 hg bundle ../bundle.hg ../orig
129 129
130 130 cd ../orig
131 131 hg incoming ../bundle.hg
132 132 cd ..
133 133
134 134 # test for http://mercurial.selenic.com/bts/issue1144
135 135 echo "===== test that verify bundle does not traceback"
136 136 # partial history bundle, fails w/ unkown parent
137 137 hg -R bundle.hg verify
138 138 # full history bundle, refuses to verify non-local repo
139 139 hg -R all.hg verify
140 140 # but, regular verify must continue to work
141 141 hg -R orig verify
142 142
143 143 echo "====== diff against bundle"
144 144 hg init b
145 145 cd b
146 146 hg -R ../all.hg diff -r tip
147 147 cd ..
148 148
149 echo "====== bundle single branch"
150 hg init branchy
151 cd branchy
152 echo a >a
153 hg ci -Ama
154 echo b >b
155 hg ci -Amb
156 echo b1 >b1
157 hg ci -Amb1
158 hg up 0
159 echo c >c
160 hg ci -Amc
161 echo c1 >c1
162 hg ci -Amc1
163 hg clone -q .#tip part
164 echo "== bundling via incoming"
165 hg in -R part --bundle incoming.hg --template "{node}\n" .
166 echo "== bundling"
167 hg bundle bundle.hg part --debug
149 168
@@ -1,328 +1,348
1 1 ====== Setting up test
2 2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 created new head
4 4 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
5 5 checking changesets
6 6 checking manifests
7 7 crosschecking files in changesets and manifests
8 8 checking files
9 9 4 files, 9 changesets, 7 total revisions
10 10 ====== Bundle --all
11 11 9 changesets found
12 12 ====== Bundle test to full.hg
13 13 searching for changes
14 14 9 changesets found
15 15 ====== Unbundle full.hg in test
16 16 adding changesets
17 17 adding manifests
18 18 adding file changes
19 19 added 0 changesets with 0 changes to 4 files
20 20 (run 'hg update' to get a working copy)
21 21 ====== Verify empty
22 22 changeset: -1:000000000000
23 23 tag: tip
24 24 user:
25 25 date: Thu Jan 01 00:00:00 1970 +0000
26 26
27 27 checking changesets
28 28 checking manifests
29 29 crosschecking files in changesets and manifests
30 30 checking files
31 31 0 files, 0 changesets, 0 total revisions
32 32 ====== Pull full.hg into test (using --cwd)
33 33 pulling from ../full.hg
34 34 searching for changes
35 35 no changes found
36 36 ====== Pull full.hg into empty (using --cwd)
37 37 pulling from ../full.hg
38 38 requesting all changes
39 39 adding changesets
40 40 adding manifests
41 41 adding file changes
42 42 added 9 changesets with 7 changes to 4 files (+1 heads)
43 43 (run 'hg heads' to see heads, 'hg merge' to merge)
44 44 ====== Rollback empty
45 45 rolling back last transaction
46 46 ====== Pull full.hg into empty again (using --cwd)
47 47 pulling from ../full.hg
48 48 requesting all changes
49 49 adding changesets
50 50 adding manifests
51 51 adding file changes
52 52 added 9 changesets with 7 changes to 4 files (+1 heads)
53 53 (run 'hg heads' to see heads, 'hg merge' to merge)
54 54 ====== Pull full.hg into test (using -R)
55 55 pulling from full.hg
56 56 searching for changes
57 57 no changes found
58 58 ====== Pull full.hg into empty (using -R)
59 59 pulling from full.hg
60 60 searching for changes
61 61 no changes found
62 62 ====== Rollback empty
63 63 rolling back last transaction
64 64 ====== Pull full.hg into empty again (using -R)
65 65 pulling from full.hg
66 66 requesting all changes
67 67 adding changesets
68 68 adding manifests
69 69 adding file changes
70 70 added 9 changesets with 7 changes to 4 files (+1 heads)
71 71 (run 'hg heads' to see heads, 'hg merge' to merge)
72 72 ====== Log -R full.hg in fresh empty
73 73 changeset: 8:836ac62537ab
74 74 tag: tip
75 75 parent: 3:ac69c658229d
76 76 user: test
77 77 date: Mon Jan 12 13:46:40 1970 +0000
78 78 summary: 0.3m
79 79
80 80 changeset: 7:80fe151401c2
81 81 user: test
82 82 date: Mon Jan 12 13:46:40 1970 +0000
83 83 summary: 1.3m
84 84
85 85 changeset: 6:1e3f6b843bd6
86 86 user: test
87 87 date: Mon Jan 12 13:46:40 1970 +0000
88 88 summary: 1.3
89 89
90 90 changeset: 5:024e4e7df376
91 91 user: test
92 92 date: Mon Jan 12 13:46:40 1970 +0000
93 93 summary: 1.2
94 94
95 95 changeset: 4:5f4f3ceb285e
96 96 parent: 0:5649c9d34dd8
97 97 user: test
98 98 date: Mon Jan 12 13:46:40 1970 +0000
99 99 summary: 1.1
100 100
101 101 changeset: 3:ac69c658229d
102 102 user: test
103 103 date: Mon Jan 12 13:46:40 1970 +0000
104 104 summary: 0.3
105 105
106 106 changeset: 2:d62976ca1e50
107 107 user: test
108 108 date: Mon Jan 12 13:46:40 1970 +0000
109 109 summary: 0.2
110 110
111 111 changeset: 1:10b2180f755b
112 112 user: test
113 113 date: Mon Jan 12 13:46:40 1970 +0000
114 114 summary: 0.1
115 115
116 116 changeset: 0:5649c9d34dd8
117 117 user: test
118 118 date: Mon Jan 12 13:46:40 1970 +0000
119 119 summary: 0.0
120 120
121 121 ====== Pull ../full.hg into empty (with hook)
122 122 changegroup hook: HG_NODE=5649c9d34dd87d0ecb5fd39672128376e83b22e1 HG_SOURCE=pull HG_URL=bundle:../full.hg
123 123 pulling from bundle://../full.hg
124 124 requesting all changes
125 125 adding changesets
126 126 adding manifests
127 127 adding file changes
128 128 added 9 changesets with 7 changes to 4 files (+1 heads)
129 129 (run 'hg heads' to see heads, 'hg merge' to merge)
130 130 ====== Rollback empty
131 131 rolling back last transaction
132 132 ====== Log -R bundle:empty+full.hg
133 133 8 7 6 5 4 3 2 1 0
134 134 ====== Pull full.hg into empty again (using -R; with hook)
135 135 changegroup hook: HG_NODE=5649c9d34dd87d0ecb5fd39672128376e83b22e1 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
136 136 pulling from full.hg
137 137 requesting all changes
138 138 adding changesets
139 139 adding manifests
140 140 adding file changes
141 141 added 9 changesets with 7 changes to 4 files (+1 heads)
142 142 (run 'hg heads' to see heads, 'hg merge' to merge)
143 143 ====== Create partial clones
144 144 requesting all changes
145 145 adding changesets
146 146 adding manifests
147 147 adding file changes
148 148 added 4 changesets with 4 changes to 1 files
149 149 updating to branch default
150 150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 151 updating to branch default
152 152 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 153 ====== Log -R full.hg in partial
154 154 changeset: 8:836ac62537ab
155 155 tag: tip
156 156 parent: 3:ac69c658229d
157 157 user: test
158 158 date: Mon Jan 12 13:46:40 1970 +0000
159 159 summary: 0.3m
160 160
161 161 changeset: 7:80fe151401c2
162 162 user: test
163 163 date: Mon Jan 12 13:46:40 1970 +0000
164 164 summary: 1.3m
165 165
166 166 changeset: 6:1e3f6b843bd6
167 167 user: test
168 168 date: Mon Jan 12 13:46:40 1970 +0000
169 169 summary: 1.3
170 170
171 171 changeset: 5:024e4e7df376
172 172 user: test
173 173 date: Mon Jan 12 13:46:40 1970 +0000
174 174 summary: 1.2
175 175
176 176 changeset: 4:5f4f3ceb285e
177 177 parent: 0:5649c9d34dd8
178 178 user: test
179 179 date: Mon Jan 12 13:46:40 1970 +0000
180 180 summary: 1.1
181 181
182 182 changeset: 3:ac69c658229d
183 183 user: test
184 184 date: Mon Jan 12 13:46:40 1970 +0000
185 185 summary: 0.3
186 186
187 187 changeset: 2:d62976ca1e50
188 188 user: test
189 189 date: Mon Jan 12 13:46:40 1970 +0000
190 190 summary: 0.2
191 191
192 192 changeset: 1:10b2180f755b
193 193 user: test
194 194 date: Mon Jan 12 13:46:40 1970 +0000
195 195 summary: 0.1
196 196
197 197 changeset: 0:5649c9d34dd8
198 198 user: test
199 199 date: Mon Jan 12 13:46:40 1970 +0000
200 200 summary: 0.0
201 201
202 202 ====== Incoming full.hg in partial
203 203 comparing with bundle://../full.hg
204 204 searching for changes
205 205 changeset: 4:5f4f3ceb285e
206 206 parent: 0:5649c9d34dd8
207 207 user: test
208 208 date: Mon Jan 12 13:46:40 1970 +0000
209 209 summary: 1.1
210 210
211 211 changeset: 5:024e4e7df376
212 212 user: test
213 213 date: Mon Jan 12 13:46:40 1970 +0000
214 214 summary: 1.2
215 215
216 216 changeset: 6:1e3f6b843bd6
217 217 user: test
218 218 date: Mon Jan 12 13:46:40 1970 +0000
219 219 summary: 1.3
220 220
221 221 changeset: 7:80fe151401c2
222 222 user: test
223 223 date: Mon Jan 12 13:46:40 1970 +0000
224 224 summary: 1.3m
225 225
226 226 changeset: 8:836ac62537ab
227 227 tag: tip
228 228 parent: 3:ac69c658229d
229 229 user: test
230 230 date: Mon Jan 12 13:46:40 1970 +0000
231 231 summary: 0.3m
232 232
233 233 ====== Outgoing -R full.hg vs partial2 in partial
234 234 comparing with ../partial2
235 235 searching for changes
236 236 changeset: 4:5f4f3ceb285e
237 237 parent: 0:5649c9d34dd8
238 238 user: test
239 239 date: Mon Jan 12 13:46:40 1970 +0000
240 240 summary: 1.1
241 241
242 242 changeset: 5:024e4e7df376
243 243 user: test
244 244 date: Mon Jan 12 13:46:40 1970 +0000
245 245 summary: 1.2
246 246
247 247 changeset: 6:1e3f6b843bd6
248 248 user: test
249 249 date: Mon Jan 12 13:46:40 1970 +0000
250 250 summary: 1.3
251 251
252 252 changeset: 7:80fe151401c2
253 253 user: test
254 254 date: Mon Jan 12 13:46:40 1970 +0000
255 255 summary: 1.3m
256 256
257 257 changeset: 8:836ac62537ab
258 258 tag: tip
259 259 parent: 3:ac69c658229d
260 260 user: test
261 261 date: Mon Jan 12 13:46:40 1970 +0000
262 262 summary: 0.3m
263 263
264 264 ====== Outgoing -R does-not-exist.hg vs partial2 in partial
265 265 abort: No such file or directory: ../does-not-exist.hg
266 266 ====== Direct clone from bundle (all-history)
267 267 requesting all changes
268 268 adding changesets
269 269 adding manifests
270 270 adding file changes
271 271 added 9 changesets with 7 changes to 4 files (+1 heads)
272 272 updating to branch default
273 273 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
274 274 changeset: 8:836ac62537ab
275 275 tag: tip
276 276 parent: 3:ac69c658229d
277 277 user: test
278 278 date: Mon Jan 12 13:46:40 1970 +0000
279 279 summary: 0.3m
280 280
281 281 changeset: 7:80fe151401c2
282 282 user: test
283 283 date: Mon Jan 12 13:46:40 1970 +0000
284 284 summary: 1.3m
285 285
286 286 ====== Unbundle incremental bundles into fresh empty in one go
287 287 1 changesets found
288 288 1 changesets found
289 289 adding changesets
290 290 adding manifests
291 291 adding file changes
292 292 added 1 changesets with 1 changes to 1 files
293 293 adding changesets
294 294 adding manifests
295 295 adding file changes
296 296 added 1 changesets with 1 changes to 1 files
297 297 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
298 298 ====== test for 540d1059c802
299 299 updating to branch default
300 300 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
301 301 searching for changes
302 302 1 changesets found
303 303 comparing with ../bundle.hg
304 304 searching for changes
305 305 changeset: 2:ed1b79f46b9a
306 306 tag: tip
307 307 parent: 0:bbd179dfa0a7
308 308 user: test
309 309 date: Thu Jan 01 00:00:00 1970 +0000
310 310 summary: change foo
311 311
312 312 ===== test that verify bundle does not traceback
313 313 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
314 314 abort: cannot verify bundle or remote repos
315 315 checking changesets
316 316 checking manifests
317 317 crosschecking files in changesets and manifests
318 318 checking files
319 319 2 files, 2 changesets, 2 total revisions
320 320 ====== diff against bundle
321 321 diff -r 836ac62537ab anotherfile
322 322 --- a/anotherfile Mon Jan 12 13:46:40 1970 +0000
323 323 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
324 324 @@ -1,4 +0,0 @@
325 325 -0
326 326 -1
327 327 -2
328 328 -3
329 ====== bundle single branch
330 adding a
331 adding b
332 adding b1
333 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
334 adding c
335 created new head
336 adding c1
337 == bundling via incoming
338 comparing with .
339 searching for changes
340 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
341 5ece8e77363e2b5269e27c66828b72da29e4341a
342 == bundling
343 searching for changes
344 common changesets up to c0025332f9ed
345 2 changesets found
346 list of changesets:
347 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
348 5ece8e77363e2b5269e27c66828b72da29e4341a
@@ -1,219 +1,219
1 1 notify extension - hooks for sending email notifications at commit/push time
2 2
3 3 Subscriptions can be managed through a hgrc file. Default mode is to print
4 4 messages to stdout, for testing and configuring.
5 5
6 6 To use, configure the notify extension and enable it in hgrc like this:
7 7
8 8 [extensions]
9 9 hgext.notify =
10 10
11 11 [hooks]
12 12 # one email for each incoming changeset
13 13 incoming.notify = python:hgext.notify.hook
14 14 # batch emails when many changesets incoming at one time
15 15 changegroup.notify = python:hgext.notify.hook
16 16
17 17 [notify]
18 18 # config items go here
19 19
20 20 Required configuration items:
21 21
22 22 config = /path/to/file # file containing subscriptions
23 23
24 24 Optional configuration items:
25 25
26 26 test = True # print messages to stdout for testing
27 27 strip = 3 # number of slashes to strip for url paths
28 28 domain = example.com # domain to use if committer missing domain
29 29 style = ... # style file to use when formatting email
30 30 template = ... # template to use when formatting email
31 31 incoming = ... # template to use when run as incoming hook
32 32 changegroup = ... # template when run as changegroup hook
33 33 maxdiff = 300 # max lines of diffs to include (0=none, -1=all)
34 34 maxsubject = 67 # truncate subject line longer than this
35 35 diffstat = True # add a diffstat before the diff content
36 36 sources = serve # notify if source of incoming changes in this list
37 37 # (serve == ssh or http, push, pull, bundle)
38 38 merge = False # send notification for merges (default True)
39 39 [email]
40 40 from = user@host.com # email address to send as if none given
41 41 [web]
42 42 baseurl = http://hgserver/... # root of hg web site for browsing commits
43 43
44 44 The notify config file has same format as a regular hgrc file. It has two
45 45 sections so you can express subscriptions in whatever way is handier for you.
46 46
47 47 [usersubs]
48 48 # key is subscriber email, value is ","-separated list of glob patterns
49 49 user@host = pattern
50 50
51 51 [reposubs]
52 52 # key is glob pattern, value is ","-separated list of subscriber emails
53 53 pattern = user@host
54 54
55 55 Glob patterns are matched against path to repository root.
56 56
57 57 If you like, you can put notify config file in repository that users can push
58 58 changes to, they can manage their own subscriptions.
59 59
60 60 no commands defined
61 61 % commit
62 62 adding a
63 63 % clone
64 64 updating to branch default
65 65 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 66 % commit
67 67 % pull (minimal config)
68 68 pulling from ../a
69 69 searching for changes
70 70 adding changesets
71 71 adding manifests
72 72 adding file changes
73 73 added 1 changesets with 1 changes to 1 files
74 74 Content-Type: text/plain; charset="us-ascii"
75 75 MIME-Version: 1.0
76 76 Content-Transfer-Encoding: 7bit
77 77 Date:
78 78 Subject: changeset in test-notify/b: b
79 79 From: test
80 80 X-Hg-Notification: changeset 0647d048b600
81 81 Message-Id:
82 82 To: baz, foo@bar
83 83
84 84 changeset 0647d048b600 in test-notify/b
85 85 details: test-notify/b?cmd=changeset;node=0647d048b600
86 86 description: b
87 87
88 88 diffs (6 lines):
89 89
90 90 diff -r cb9a9f314b8b -r 0647d048b600 a
91 91 --- a/a Thu Jan 01 00:00:00 1970 +0000
92 92 +++ b/a Thu Jan 01 00:00:01 1970 +0000
93 93 @@ -1,1 +1,2 @@
94 94 a
95 95 +a
96 96 (run 'hg update' to get a working copy)
97 97 % fail for config file is missing
98 98 rolling back last transaction
99 99 pull failed
100 100 % pull
101 101 rolling back last transaction
102 102 pulling from ../a
103 103 searching for changes
104 104 adding changesets
105 105 adding manifests
106 106 adding file changes
107 107 added 1 changesets with 1 changes to 1 files
108 108 Content-Type: text/plain; charset="us-ascii"
109 109 MIME-Version: 1.0
110 110 Content-Transfer-Encoding: 7bit
111 111 X-Test: foo
112 112 Date:
113 113 Subject: b
114 114 From: test@test.com
115 115 X-Hg-Notification: changeset 0647d048b600
116 116 Message-Id:
117 117 To: baz@test.com, foo@bar
118 118
119 119 changeset 0647d048b600
120 120 description:
121 121 b
122 122 diffs (6 lines):
123 123
124 124 diff -r cb9a9f314b8b -r 0647d048b600 a
125 125 --- a/a Thu Jan 01 00:00:00 1970 +0000
126 126 +++ b/a Thu Jan 01 00:00:01 1970 +0000
127 127 @@ -1,1 +1,2 @@
128 128 a
129 129 +a
130 130 (run 'hg update' to get a working copy)
131 131 % pull
132 132 rolling back last transaction
133 133 pulling from ../a
134 134 searching for changes
135 135 adding changesets
136 136 adding manifests
137 137 adding file changes
138 138 added 1 changesets with 1 changes to 1 files
139 139 Content-Type: text/plain; charset="us-ascii"
140 140 MIME-Version: 1.0
141 141 Content-Transfer-Encoding: 7bit
142 142 X-Test: foo
143 143 Date:
144 144 Subject: b
145 145 From: test@test.com
146 146 X-Hg-Notification: changeset 0647d048b600
147 147 Message-Id:
148 148 To: baz@test.com, foo@bar
149 149
150 150 changeset 0647d048b600
151 151 description:
152 152 b
153 153 diffstat:
154 154
155 155 a | 1 +
156 156 1 files changed, 1 insertions(+), 0 deletions(-)
157 157
158 158 diffs (6 lines):
159 159
160 160 diff -r cb9a9f314b8b -r 0647d048b600 a
161 161 --- a/a Thu Jan 01 00:00:00 1970 +0000
162 162 +++ b/a Thu Jan 01 00:00:01 1970 +0000
163 163 @@ -1,1 +1,2 @@
164 164 a
165 165 +a
166 166 (run 'hg update' to get a working copy)
167 167 % test merge
168 168 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 169 created new head
170 170 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 171 (branch merge, don't forget to commit)
172 172 pulling from ../a
173 173 searching for changes
174 174 adding changesets
175 175 adding manifests
176 176 adding file changes
177 added 2 changesets with 0 changes to 1 files
177 added 2 changesets with 0 changes to 0 files
178 178 Content-Type: text/plain; charset="us-ascii"
179 179 MIME-Version: 1.0
180 180 Content-Transfer-Encoding: 7bit
181 181 X-Test: foo
182 182 Date:
183 183 Subject: adda2
184 184 From: test@test.com
185 185 X-Hg-Notification: changeset 0a184ce6067f
186 186 Message-Id:
187 187 To: baz@test.com, foo@bar
188 188
189 189 changeset 0a184ce6067f
190 190 description:
191 191 adda2
192 192 diffstat:
193 193
194 194 a | 1 +
195 195 1 files changed, 1 insertions(+), 0 deletions(-)
196 196
197 197 diffs (6 lines):
198 198
199 199 diff -r cb9a9f314b8b -r 0a184ce6067f a
200 200 --- a/a Thu Jan 01 00:00:00 1970 +0000
201 201 +++ b/a Thu Jan 01 00:00:02 1970 +0000
202 202 @@ -1,1 +1,2 @@
203 203 a
204 204 +a
205 205 Content-Type: text/plain; charset="us-ascii"
206 206 MIME-Version: 1.0
207 207 Content-Transfer-Encoding: 7bit
208 208 X-Test: foo
209 209 Date:
210 210 Subject: merge
211 211 From: test@test.com
212 212 X-Hg-Notification: changeset 22c88b85aa27
213 213 Message-Id:
214 214 To: baz@test.com, foo@bar
215 215
216 216 changeset 22c88b85aa27
217 217 description:
218 218 merge
219 219 (run 'hg update' to get a working copy)
General Comments 0
You need to be logged in to leave comments. Login now