##// END OF EJS Templates
Show added files as "added" in editor commit message (issue 1330)
Patrick Mezard -
r7072:4e0d54fb default
parent child Browse files
Show More
@@ -1,2077 +1,2085 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 # create an invalid changelog
39 39 self.opener("00changelog.i", "a").write(
40 40 '\0\0\0\2' # represents revlogv2
41 41 ' dummy changelog to prevent using the old repo layout'
42 42 )
43 43 reqfile = self.opener("requires", "w")
44 44 for r in requirements:
45 45 reqfile.write("%s\n" % r)
46 46 reqfile.close()
47 47 else:
48 48 raise repo.RepoError(_("repository %s not found") % path)
49 49 elif create:
50 50 raise repo.RepoError(_("repository %s already exists") % path)
51 51 else:
52 52 # find requirements
53 53 requirements = []
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 for r in requirements:
57 57 if r not in self.supported:
58 58 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 59 except IOError, inst:
60 60 if inst.errno != errno.ENOENT:
61 61 raise
62 62
63 63 self.store = store.store(requirements, self.path, util.opener)
64 64 self.spath = self.store.path
65 65 self.sopener = self.store.opener
66 66 self.sjoin = self.store.join
67 67 self.opener.createmode = self.store.createmode
68 68
69 69 self.ui = ui.ui(parentui=parentui)
70 70 try:
71 71 self.ui.readconfig(self.join("hgrc"), self.root)
72 72 extensions.loadall(self.ui)
73 73 except IOError:
74 74 pass
75 75
76 76 self.tagscache = None
77 77 self._tagstypecache = None
78 78 self.branchcache = None
79 79 self._ubranchcache = None # UTF-8 version of branchcache
80 80 self._branchcachetip = None
81 81 self.nodetagscache = None
82 82 self.filterpats = {}
83 83 self._datafilters = {}
84 84 self._transref = self._lockref = self._wlockref = None
85 85
86 86 def __getattr__(self, name):
87 87 if name == 'changelog':
88 88 self.changelog = changelog.changelog(self.sopener)
89 89 self.sopener.defversion = self.changelog.version
90 90 return self.changelog
91 91 if name == 'manifest':
92 92 self.changelog
93 93 self.manifest = manifest.manifest(self.sopener)
94 94 return self.manifest
95 95 if name == 'dirstate':
96 96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 97 return self.dirstate
98 98 else:
99 99 raise AttributeError(name)
100 100
101 101 def __getitem__(self, changeid):
102 102 if changeid == None:
103 103 return context.workingctx(self)
104 104 return context.changectx(self, changeid)
105 105
106 106 def __nonzero__(self):
107 107 return True
108 108
109 109 def __len__(self):
110 110 return len(self.changelog)
111 111
112 112 def __iter__(self):
113 113 for i in xrange(len(self)):
114 114 yield i
115 115
116 116 def url(self):
117 117 return 'file:' + self.root
118 118
119 119 def hook(self, name, throw=False, **args):
120 120 return hook.hook(self.ui, self, name, throw, **args)
121 121
122 122 tag_disallowed = ':\r\n'
123 123
124 124 def _tag(self, names, node, message, local, user, date, parent=None,
125 125 extra={}):
126 126 use_dirstate = parent is None
127 127
128 128 if isinstance(names, str):
129 129 allchars = names
130 130 names = (names,)
131 131 else:
132 132 allchars = ''.join(names)
133 133 for c in self.tag_disallowed:
134 134 if c in allchars:
135 135 raise util.Abort(_('%r cannot be used in a tag name') % c)
136 136
137 137 for name in names:
138 138 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 139 local=local)
140 140
141 141 def writetags(fp, names, munge, prevtags):
142 142 fp.seek(0, 2)
143 143 if prevtags and prevtags[-1] != '\n':
144 144 fp.write('\n')
145 145 for name in names:
146 146 m = munge and munge(name) or name
147 147 if self._tagstypecache and name in self._tagstypecache:
148 148 old = self.tagscache.get(name, nullid)
149 149 fp.write('%s %s\n' % (hex(old), m))
150 150 fp.write('%s %s\n' % (hex(node), m))
151 151 fp.close()
152 152
153 153 prevtags = ''
154 154 if local:
155 155 try:
156 156 fp = self.opener('localtags', 'r+')
157 157 except IOError, err:
158 158 fp = self.opener('localtags', 'a')
159 159 else:
160 160 prevtags = fp.read()
161 161
162 162 # local tags are stored in the current charset
163 163 writetags(fp, names, None, prevtags)
164 164 for name in names:
165 165 self.hook('tag', node=hex(node), tag=name, local=local)
166 166 return
167 167
168 168 if use_dirstate:
169 169 try:
170 170 fp = self.wfile('.hgtags', 'rb+')
171 171 except IOError, err:
172 172 fp = self.wfile('.hgtags', 'ab')
173 173 else:
174 174 prevtags = fp.read()
175 175 else:
176 176 try:
177 177 prevtags = self.filectx('.hgtags', parent).data()
178 178 except revlog.LookupError:
179 179 pass
180 180 fp = self.wfile('.hgtags', 'wb')
181 181 if prevtags:
182 182 fp.write(prevtags)
183 183
184 184 # committed tags are stored in UTF-8
185 185 writetags(fp, names, util.fromlocal, prevtags)
186 186
187 187 if use_dirstate and '.hgtags' not in self.dirstate:
188 188 self.add(['.hgtags'])
189 189
190 190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 191 extra=extra)
192 192
193 193 for name in names:
194 194 self.hook('tag', node=hex(node), tag=name, local=local)
195 195
196 196 return tagnode
197 197
198 198 def tag(self, names, node, message, local, user, date):
199 199 '''tag a revision with one or more symbolic names.
200 200
201 201 names is a list of strings or, when adding a single tag, names may be a
202 202 string.
203 203
204 204 if local is True, the tags are stored in a per-repository file.
205 205 otherwise, they are stored in the .hgtags file, and a new
206 206 changeset is committed with the change.
207 207
208 208 keyword arguments:
209 209
210 210 local: whether to store tags in non-version-controlled file
211 211 (default False)
212 212
213 213 message: commit message to use if committing
214 214
215 215 user: name of user to use if committing
216 216
217 217 date: date tuple to use if committing'''
218 218
219 219 for x in self.status()[:5]:
220 220 if '.hgtags' in x:
221 221 raise util.Abort(_('working copy of .hgtags is changed '
222 222 '(please commit .hgtags manually)'))
223 223
224 224 self._tag(names, node, message, local, user, date)
225 225
226 226 def tags(self):
227 227 '''return a mapping of tag to node'''
228 228 if self.tagscache:
229 229 return self.tagscache
230 230
231 231 globaltags = {}
232 232 tagtypes = {}
233 233
234 234 def readtags(lines, fn, tagtype):
235 235 filetags = {}
236 236 count = 0
237 237
238 238 def warn(msg):
239 239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240 240
241 241 for l in lines:
242 242 count += 1
243 243 if not l:
244 244 continue
245 245 s = l.split(" ", 1)
246 246 if len(s) != 2:
247 247 warn(_("cannot parse entry"))
248 248 continue
249 249 node, key = s
250 250 key = util.tolocal(key.strip()) # stored in UTF-8
251 251 try:
252 252 bin_n = bin(node)
253 253 except TypeError:
254 254 warn(_("node '%s' is not well formed") % node)
255 255 continue
256 256 if bin_n not in self.changelog.nodemap:
257 257 warn(_("tag '%s' refers to unknown node") % key)
258 258 continue
259 259
260 260 h = []
261 261 if key in filetags:
262 262 n, h = filetags[key]
263 263 h.append(n)
264 264 filetags[key] = (bin_n, h)
265 265
266 266 for k, nh in filetags.items():
267 267 if k not in globaltags:
268 268 globaltags[k] = nh
269 269 tagtypes[k] = tagtype
270 270 continue
271 271
272 272 # we prefer the global tag if:
273 273 # it supercedes us OR
274 274 # mutual supercedes and it has a higher rank
275 275 # otherwise we win because we're tip-most
276 276 an, ah = nh
277 277 bn, bh = globaltags[k]
278 278 if (bn != an and an in bh and
279 279 (bn not in ah or len(bh) > len(ah))):
280 280 an = bn
281 281 ah.extend([n for n in bh if n not in ah])
282 282 globaltags[k] = an, ah
283 283 tagtypes[k] = tagtype
284 284
285 285 # read the tags file from each head, ending with the tip
286 286 f = None
287 287 for rev, node, fnode in self._hgtagsnodes():
288 288 f = (f and f.filectx(fnode) or
289 289 self.filectx('.hgtags', fileid=fnode))
290 290 readtags(f.data().splitlines(), f, "global")
291 291
292 292 try:
293 293 data = util.fromlocal(self.opener("localtags").read())
294 294 # localtags are stored in the local character set
295 295 # while the internal tag table is stored in UTF-8
296 296 readtags(data.splitlines(), "localtags", "local")
297 297 except IOError:
298 298 pass
299 299
300 300 self.tagscache = {}
301 301 self._tagstypecache = {}
302 302 for k,nh in globaltags.items():
303 303 n = nh[0]
304 304 if n != nullid:
305 305 self.tagscache[k] = n
306 306 self._tagstypecache[k] = tagtypes[k]
307 307 self.tagscache['tip'] = self.changelog.tip()
308 308 return self.tagscache
309 309
310 310 def tagtype(self, tagname):
311 311 '''
312 312 return the type of the given tag. result can be:
313 313
314 314 'local' : a local tag
315 315 'global' : a global tag
316 316 None : tag does not exist
317 317 '''
318 318
319 319 self.tags()
320 320
321 321 return self._tagstypecache.get(tagname)
322 322
323 323 def _hgtagsnodes(self):
324 324 heads = self.heads()
325 325 heads.reverse()
326 326 last = {}
327 327 ret = []
328 328 for node in heads:
329 329 c = self[node]
330 330 rev = c.rev()
331 331 try:
332 332 fnode = c.filenode('.hgtags')
333 333 except revlog.LookupError:
334 334 continue
335 335 ret.append((rev, node, fnode))
336 336 if fnode in last:
337 337 ret[last[fnode]] = None
338 338 last[fnode] = len(ret) - 1
339 339 return [item for item in ret if item]
340 340
341 341 def tagslist(self):
342 342 '''return a list of tags ordered by revision'''
343 343 l = []
344 344 for t, n in self.tags().items():
345 345 try:
346 346 r = self.changelog.rev(n)
347 347 except:
348 348 r = -2 # sort to the beginning of the list if unknown
349 349 l.append((r, t, n))
350 350 return [(t, n) for r, t, n in util.sort(l)]
351 351
352 352 def nodetags(self, node):
353 353 '''return the tags associated with a node'''
354 354 if not self.nodetagscache:
355 355 self.nodetagscache = {}
356 356 for t, n in self.tags().items():
357 357 self.nodetagscache.setdefault(n, []).append(t)
358 358 return self.nodetagscache.get(node, [])
359 359
360 360 def _branchtags(self, partial, lrev):
361 361 tiprev = len(self) - 1
362 362 if lrev != tiprev:
363 363 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 365
366 366 return partial
367 367
368 368 def branchtags(self):
369 369 tip = self.changelog.tip()
370 370 if self.branchcache is not None and self._branchcachetip == tip:
371 371 return self.branchcache
372 372
373 373 oldtip = self._branchcachetip
374 374 self._branchcachetip = tip
375 375 if self.branchcache is None:
376 376 self.branchcache = {} # avoid recursion in changectx
377 377 else:
378 378 self.branchcache.clear() # keep using the same dict
379 379 if oldtip is None or oldtip not in self.changelog.nodemap:
380 380 partial, last, lrev = self._readbranchcache()
381 381 else:
382 382 lrev = self.changelog.rev(oldtip)
383 383 partial = self._ubranchcache
384 384
385 385 self._branchtags(partial, lrev)
386 386
387 387 # the branch cache is stored on disk as UTF-8, but in the local
388 388 # charset internally
389 389 for k, v in partial.items():
390 390 self.branchcache[util.tolocal(k)] = v
391 391 self._ubranchcache = partial
392 392 return self.branchcache
393 393
394 394 def _readbranchcache(self):
395 395 partial = {}
396 396 try:
397 397 f = self.opener("branch.cache")
398 398 lines = f.read().split('\n')
399 399 f.close()
400 400 except (IOError, OSError):
401 401 return {}, nullid, nullrev
402 402
403 403 try:
404 404 last, lrev = lines.pop(0).split(" ", 1)
405 405 last, lrev = bin(last), int(lrev)
406 406 if lrev >= len(self) or self[lrev].node() != last:
407 407 # invalidate the cache
408 408 raise ValueError('invalidating branch cache (tip differs)')
409 409 for l in lines:
410 410 if not l: continue
411 411 node, label = l.split(" ", 1)
412 412 partial[label.strip()] = bin(node)
413 413 except (KeyboardInterrupt, util.SignalInterrupt):
414 414 raise
415 415 except Exception, inst:
416 416 if self.ui.debugflag:
417 417 self.ui.warn(str(inst), '\n')
418 418 partial, last, lrev = {}, nullid, nullrev
419 419 return partial, last, lrev
420 420
421 421 def _writebranchcache(self, branches, tip, tiprev):
422 422 try:
423 423 f = self.opener("branch.cache", "w", atomictemp=True)
424 424 f.write("%s %s\n" % (hex(tip), tiprev))
425 425 for label, node in branches.iteritems():
426 426 f.write("%s %s\n" % (hex(node), label))
427 427 f.rename()
428 428 except (IOError, OSError):
429 429 pass
430 430
431 431 def _updatebranchcache(self, partial, start, end):
432 432 for r in xrange(start, end):
433 433 c = self[r]
434 434 b = c.branch()
435 435 partial[b] = c.node()
436 436
437 437 def lookup(self, key):
438 438 if key == '.':
439 439 return self.dirstate.parents()[0]
440 440 elif key == 'null':
441 441 return nullid
442 442 n = self.changelog._match(key)
443 443 if n:
444 444 return n
445 445 if key in self.tags():
446 446 return self.tags()[key]
447 447 if key in self.branchtags():
448 448 return self.branchtags()[key]
449 449 n = self.changelog._partialmatch(key)
450 450 if n:
451 451 return n
452 452 try:
453 453 if len(key) == 20:
454 454 key = hex(key)
455 455 except:
456 456 pass
457 457 raise repo.RepoError(_("unknown revision '%s'") % key)
458 458
459 459 def local(self):
460 460 return True
461 461
462 462 def join(self, f):
463 463 return os.path.join(self.path, f)
464 464
465 465 def wjoin(self, f):
466 466 return os.path.join(self.root, f)
467 467
468 468 def rjoin(self, f):
469 469 return os.path.join(self.root, util.pconvert(f))
470 470
471 471 def file(self, f):
472 472 if f[0] == '/':
473 473 f = f[1:]
474 474 return filelog.filelog(self.sopener, f)
475 475
476 476 def changectx(self, changeid):
477 477 return self[changeid]
478 478
479 479 def parents(self, changeid=None):
480 480 '''get list of changectxs for parents of changeid'''
481 481 return self[changeid].parents()
482 482
483 483 def filectx(self, path, changeid=None, fileid=None):
484 484 """changeid can be a changeset revision, node, or tag.
485 485 fileid can be a file revision or node."""
486 486 return context.filectx(self, path, changeid, fileid)
487 487
488 488 def getcwd(self):
489 489 return self.dirstate.getcwd()
490 490
491 491 def pathto(self, f, cwd=None):
492 492 return self.dirstate.pathto(f, cwd)
493 493
494 494 def wfile(self, f, mode='r'):
495 495 return self.wopener(f, mode)
496 496
497 497 def _link(self, f):
498 498 return os.path.islink(self.wjoin(f))
499 499
500 500 def _filter(self, filter, filename, data):
501 501 if filter not in self.filterpats:
502 502 l = []
503 503 for pat, cmd in self.ui.configitems(filter):
504 504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 505 fn = None
506 506 params = cmd
507 507 for name, filterfn in self._datafilters.iteritems():
508 508 if cmd.startswith(name):
509 509 fn = filterfn
510 510 params = cmd[len(name):].lstrip()
511 511 break
512 512 if not fn:
513 513 fn = lambda s, c, **kwargs: util.filter(s, c)
514 514 # Wrap old filters not supporting keyword arguments
515 515 if not inspect.getargspec(fn)[2]:
516 516 oldfn = fn
517 517 fn = lambda s, c, **kwargs: oldfn(s, c)
518 518 l.append((mf, fn, params))
519 519 self.filterpats[filter] = l
520 520
521 521 for mf, fn, cmd in self.filterpats[filter]:
522 522 if mf(filename):
523 523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 525 break
526 526
527 527 return data
528 528
529 529 def adddatafilter(self, name, filter):
530 530 self._datafilters[name] = filter
531 531
532 532 def wread(self, filename):
533 533 if self._link(filename):
534 534 data = os.readlink(self.wjoin(filename))
535 535 else:
536 536 data = self.wopener(filename, 'r').read()
537 537 return self._filter("encode", filename, data)
538 538
539 539 def wwrite(self, filename, data, flags):
540 540 data = self._filter("decode", filename, data)
541 541 try:
542 542 os.unlink(self.wjoin(filename))
543 543 except OSError:
544 544 pass
545 545 if 'l' in flags:
546 546 self.wopener.symlink(data, filename)
547 547 else:
548 548 self.wopener(filename, 'w').write(data)
549 549 if 'x' in flags:
550 550 util.set_flags(self.wjoin(filename), False, True)
551 551
552 552 def wwritedata(self, filename, data):
553 553 return self._filter("decode", filename, data)
554 554
555 555 def transaction(self):
556 556 if self._transref and self._transref():
557 557 return self._transref().nest()
558 558
559 559 # abort here if the journal already exists
560 560 if os.path.exists(self.sjoin("journal")):
561 561 raise repo.RepoError(_("journal already exists - run hg recover"))
562 562
563 563 # save dirstate for rollback
564 564 try:
565 565 ds = self.opener("dirstate").read()
566 566 except IOError:
567 567 ds = ""
568 568 self.opener("journal.dirstate", "w").write(ds)
569 569 self.opener("journal.branch", "w").write(self.dirstate.branch())
570 570
571 571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
572 572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
573 573 (self.join("journal.branch"), self.join("undo.branch"))]
574 574 tr = transaction.transaction(self.ui.warn, self.sopener,
575 575 self.sjoin("journal"),
576 576 aftertrans(renames),
577 577 self.store.createmode)
578 578 self._transref = weakref.ref(tr)
579 579 return tr
580 580
581 581 def recover(self):
582 582 l = self.lock()
583 583 try:
584 584 if os.path.exists(self.sjoin("journal")):
585 585 self.ui.status(_("rolling back interrupted transaction\n"))
586 586 transaction.rollback(self.sopener, self.sjoin("journal"))
587 587 self.invalidate()
588 588 return True
589 589 else:
590 590 self.ui.warn(_("no interrupted transaction available\n"))
591 591 return False
592 592 finally:
593 593 del l
594 594
595 595 def rollback(self):
596 596 wlock = lock = None
597 597 try:
598 598 wlock = self.wlock()
599 599 lock = self.lock()
600 600 if os.path.exists(self.sjoin("undo")):
601 601 self.ui.status(_("rolling back last transaction\n"))
602 602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 604 try:
605 605 branch = self.opener("undo.branch").read()
606 606 self.dirstate.setbranch(branch)
607 607 except IOError:
608 608 self.ui.warn(_("Named branch could not be reset, "
609 609 "current branch still is: %s\n")
610 610 % util.tolocal(self.dirstate.branch()))
611 611 self.invalidate()
612 612 self.dirstate.invalidate()
613 613 else:
614 614 self.ui.warn(_("no rollback information available\n"))
615 615 finally:
616 616 del lock, wlock
617 617
618 618 def invalidate(self):
619 619 for a in "changelog manifest".split():
620 620 if a in self.__dict__:
621 621 delattr(self, a)
622 622 self.tagscache = None
623 623 self._tagstypecache = None
624 624 self.nodetagscache = None
625 625 self.branchcache = None
626 626 self._ubranchcache = None
627 627 self._branchcachetip = None
628 628
629 629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
630 630 try:
631 631 l = lock.lock(lockname, 0, releasefn, desc=desc)
632 632 except lock.LockHeld, inst:
633 633 if not wait:
634 634 raise
635 635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
636 636 (desc, inst.locker))
637 637 # default to 600 seconds timeout
638 638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
639 639 releasefn, desc=desc)
640 640 if acquirefn:
641 641 acquirefn()
642 642 return l
643 643
644 644 def lock(self, wait=True):
645 645 if self._lockref and self._lockref():
646 646 return self._lockref()
647 647
648 648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
649 649 _('repository %s') % self.origroot)
650 650 self._lockref = weakref.ref(l)
651 651 return l
652 652
653 653 def wlock(self, wait=True):
654 654 if self._wlockref and self._wlockref():
655 655 return self._wlockref()
656 656
657 657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
658 658 self.dirstate.invalidate, _('working directory of %s') %
659 659 self.origroot)
660 660 self._wlockref = weakref.ref(l)
661 661 return l
662 662
663 663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
664 664 """
665 665 commit an individual file as part of a larger transaction
666 666 """
667 667
668 668 fn = fctx.path()
669 669 t = fctx.data()
670 670 fl = self.file(fn)
671 671 fp1 = manifest1.get(fn, nullid)
672 672 fp2 = manifest2.get(fn, nullid)
673 673
674 674 meta = {}
675 675 cp = fctx.renamed()
676 676 if cp and cp[0] != fn:
677 677 # Mark the new revision of this file as a copy of another
678 678 # file. This copy data will effectively act as a parent
679 679 # of this new revision. If this is a merge, the first
680 680 # parent will be the nullid (meaning "look up the copy data")
681 681 # and the second one will be the other parent. For example:
682 682 #
683 683 # 0 --- 1 --- 3 rev1 changes file foo
684 684 # \ / rev2 renames foo to bar and changes it
685 685 # \- 2 -/ rev3 should have bar with all changes and
686 686 # should record that bar descends from
687 687 # bar in rev2 and foo in rev1
688 688 #
689 689 # this allows this merge to succeed:
690 690 #
691 691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
692 692 # \ / merging rev3 and rev4 should use bar@rev2
693 693 # \- 2 --- 4 as the merge base
694 694 #
695 695
696 696 cf = cp[0]
697 697 cr = manifest1.get(cf)
698 698 nfp = fp2
699 699
700 700 if manifest2: # branch merge
701 701 if fp2 == nullid: # copied on remote side
702 702 if fp1 != nullid or cf in manifest2:
703 703 cr = manifest2[cf]
704 704 nfp = fp1
705 705
706 706 # find source in nearest ancestor if we've lost track
707 707 if not cr:
708 708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
709 709 (fn, cf))
710 710 for a in self['.'].ancestors():
711 711 if cf in a:
712 712 cr = a[cf].filenode()
713 713 break
714 714
715 715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
716 716 meta["copy"] = cf
717 717 meta["copyrev"] = hex(cr)
718 718 fp1, fp2 = nullid, nfp
719 719 elif fp2 != nullid:
720 720 # is one parent an ancestor of the other?
721 721 fpa = fl.ancestor(fp1, fp2)
722 722 if fpa == fp1:
723 723 fp1, fp2 = fp2, nullid
724 724 elif fpa == fp2:
725 725 fp2 = nullid
726 726
727 727 # is the file unmodified from the parent? report existing entry
728 728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
729 729 return fp1
730 730
731 731 changelist.append(fn)
732 732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
733 733
734 734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
735 735 if p1 is None:
736 736 p1, p2 = self.dirstate.parents()
737 737 return self.commit(files=files, text=text, user=user, date=date,
738 738 p1=p1, p2=p2, extra=extra, empty_ok=True)
739 739
740 740 def commit(self, files=None, text="", user=None, date=None,
741 741 match=None, force=False, force_editor=False,
742 742 p1=None, p2=None, extra={}, empty_ok=False):
743 743 wlock = lock = None
744 744 if files:
745 745 files = util.unique(files)
746 746 try:
747 747 wlock = self.wlock()
748 748 lock = self.lock()
749 749 use_dirstate = (p1 is None) # not rawcommit
750 750
751 751 if use_dirstate:
752 752 p1, p2 = self.dirstate.parents()
753 753 update_dirstate = True
754 754
755 755 if (not force and p2 != nullid and
756 756 (match and (match.files() or match.anypats()))):
757 757 raise util.Abort(_('cannot partially commit a merge '
758 758 '(do not specify files or patterns)'))
759 759
760 760 if files:
761 761 modified, removed = [], []
762 762 for f in files:
763 763 s = self.dirstate[f]
764 764 if s in 'nma':
765 765 modified.append(f)
766 766 elif s == 'r':
767 767 removed.append(f)
768 768 else:
769 769 self.ui.warn(_("%s not tracked!\n") % f)
770 770 changes = [modified, [], removed, [], []]
771 771 else:
772 772 changes = self.status(match=match)
773 773 else:
774 774 p1, p2 = p1, p2 or nullid
775 775 update_dirstate = (self.dirstate.parents()[0] == p1)
776 776 changes = [files, [], [], [], []]
777 777
778 778 ms = merge_.mergestate(self)
779 779 for f in changes[0]:
780 780 if f in ms and ms[f] == 'u':
781 781 raise util.Abort(_("unresolved merge conflicts "
782 782 "(see hg resolve)"))
783 783 wctx = context.workingctx(self, (p1, p2), text, user, date,
784 784 extra, changes)
785 785 return self._commitctx(wctx, force, force_editor, empty_ok,
786 786 use_dirstate, update_dirstate)
787 787 finally:
788 788 del lock, wlock
789 789
790 790 def commitctx(self, ctx):
791 791 wlock = lock = None
792 792 try:
793 793 wlock = self.wlock()
794 794 lock = self.lock()
795 795 return self._commitctx(ctx, force=True, force_editor=False,
796 796 empty_ok=True, use_dirstate=False,
797 797 update_dirstate=False)
798 798 finally:
799 799 del lock, wlock
800 800
801 801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
802 802 use_dirstate=True, update_dirstate=True):
803 803 tr = None
804 804 valid = 0 # don't save the dirstate if this isn't set
805 805 try:
806 806 commit = util.sort(wctx.modified() + wctx.added())
807 807 remove = wctx.removed()
808 808 extra = wctx.extra().copy()
809 809 branchname = extra['branch']
810 810 user = wctx.user()
811 811 text = wctx.description()
812 812
813 813 p1, p2 = [p.node() for p in wctx.parents()]
814 814 c1 = self.changelog.read(p1)
815 815 c2 = self.changelog.read(p2)
816 816 m1 = self.manifest.read(c1[0]).copy()
817 817 m2 = self.manifest.read(c2[0])
818 818
819 819 if use_dirstate:
820 820 oldname = c1[5].get("branch") # stored in UTF-8
821 821 if (not commit and not remove and not force and p2 == nullid
822 822 and branchname == oldname):
823 823 self.ui.status(_("nothing changed\n"))
824 824 return None
825 825
826 826 xp1 = hex(p1)
827 827 if p2 == nullid: xp2 = ''
828 828 else: xp2 = hex(p2)
829 829
830 830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
831 831
832 832 tr = self.transaction()
833 833 trp = weakref.proxy(tr)
834 834
835 835 # check in files
836 836 new = {}
837 837 changed = []
838 838 linkrev = len(self)
839 839 for f in commit:
840 840 self.ui.note(f + "\n")
841 841 try:
842 842 fctx = wctx.filectx(f)
843 843 newflags = fctx.flags()
844 844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
845 845 if ((not changed or changed[-1] != f) and
846 846 m2.get(f) != new[f]):
847 847 # mention the file in the changelog if some
848 848 # flag changed, even if there was no content
849 849 # change.
850 850 if m1.flags(f) != newflags:
851 851 changed.append(f)
852 852 m1.set(f, newflags)
853 853 if use_dirstate:
854 854 self.dirstate.normal(f)
855 855
856 856 except (OSError, IOError):
857 857 if use_dirstate:
858 858 self.ui.warn(_("trouble committing %s!\n") % f)
859 859 raise
860 860 else:
861 861 remove.append(f)
862 862
863 updated, added = [], []
864 for f in util.sort(changed):
865 if f in m1 or f in m2:
866 updated.append(f)
867 else:
868 added.append(f)
869
863 870 # update manifest
864 871 m1.update(new)
865 872 removed = []
866 873
867 874 for f in util.sort(remove):
868 875 if f in m1:
869 876 del m1[f]
870 877 removed.append(f)
871 878 elif f in m2:
872 879 removed.append(f)
873 880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
874 881 (new, removed))
875 882
876 883 # add changeset
877 884 if (not empty_ok and not text) or force_editor:
878 885 edittext = []
879 886 if text:
880 887 edittext.append(text)
881 888 edittext.append("")
882 889 edittext.append("") # Empty line between message and comments.
883 890 edittext.append(_("HG: Enter commit message."
884 891 " Lines beginning with 'HG:' are removed."))
885 892 edittext.append("HG: --")
886 893 edittext.append("HG: user: %s" % user)
887 894 if p2 != nullid:
888 895 edittext.append("HG: branch merge")
889 896 if branchname:
890 897 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
891 edittext.extend(["HG: changed %s" % f for f in changed])
898 edittext.extend(["HG: added %s" % f for f in added])
899 edittext.extend(["HG: changed %s" % f for f in updated])
892 900 edittext.extend(["HG: removed %s" % f for f in removed])
893 if not changed and not remove:
901 if not added and not updated and not removed:
894 902 edittext.append("HG: no files changed")
895 903 edittext.append("")
896 904 # run editor in the repository root
897 905 olddir = os.getcwd()
898 906 os.chdir(self.root)
899 907 text = self.ui.edit("\n".join(edittext), user)
900 908 os.chdir(olddir)
901 909
902 910 lines = [line.rstrip() for line in text.rstrip().splitlines()]
903 911 while lines and not lines[0]:
904 912 del lines[0]
905 913 if not lines and use_dirstate:
906 914 raise util.Abort(_("empty commit message"))
907 915 text = '\n'.join(lines)
908 916
909 917 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
910 918 user, wctx.date(), extra)
911 919 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
912 920 parent2=xp2)
913 921 tr.close()
914 922
915 923 if self.branchcache:
916 924 self.branchtags()
917 925
918 926 if use_dirstate or update_dirstate:
919 927 self.dirstate.setparents(n)
920 928 if use_dirstate:
921 929 for f in removed:
922 930 self.dirstate.forget(f)
923 931 valid = 1 # our dirstate updates are complete
924 932
925 933 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
926 934 return n
927 935 finally:
928 936 if not valid: # don't save our updated dirstate
929 937 self.dirstate.invalidate()
930 938 del tr
931 939
932 940 def walk(self, match, node=None):
933 941 '''
934 942 walk recursively through the directory tree or a given
935 943 changeset, finding all files matched by the match
936 944 function
937 945 '''
938 946 return self[node].walk(match)
939 947
940 948 def status(self, node1='.', node2=None, match=None,
941 949 ignored=False, clean=False, unknown=False):
942 950 """return status of files between two nodes or node and working directory
943 951
944 952 If node1 is None, use the first dirstate parent instead.
945 953 If node2 is None, compare node1 with working directory.
946 954 """
947 955
948 956 def mfmatches(ctx):
949 957 mf = ctx.manifest().copy()
950 958 for fn in mf.keys():
951 959 if not match(fn):
952 960 del mf[fn]
953 961 return mf
954 962
955 963 ctx1 = self[node1]
956 964 ctx2 = self[node2]
957 965 working = ctx2 == self[None]
958 966 parentworking = working and ctx1 == self['.']
959 967 match = match or match_.always(self.root, self.getcwd())
960 968 listignored, listclean, listunknown = ignored, clean, unknown
961 969
962 970 if not parentworking:
963 971 def bad(f, msg):
964 972 if f not in ctx1:
965 973 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
966 974 return False
967 975 match.bad = bad
968 976
969 977 if working: # we need to scan the working dir
970 978 s = self.dirstate.status(match, listignored, listclean, listunknown)
971 979 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
972 980
973 981 # check for any possibly clean files
974 982 if parentworking and cmp:
975 983 fixup = []
976 984 # do a full compare of any files that might have changed
977 985 for f in cmp:
978 986 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
979 987 or ctx1[f].cmp(ctx2[f].data())):
980 988 modified.append(f)
981 989 else:
982 990 fixup.append(f)
983 991
984 992 if listclean:
985 993 clean += fixup
986 994
987 995 # update dirstate for files that are actually clean
988 996 if fixup:
989 997 wlock = None
990 998 try:
991 999 try:
992 1000 wlock = self.wlock(False)
993 1001 for f in fixup:
994 1002 self.dirstate.normal(f)
995 1003 except lock.LockException:
996 1004 pass
997 1005 finally:
998 1006 del wlock
999 1007
1000 1008 if not parentworking:
1001 1009 mf1 = mfmatches(ctx1)
1002 1010 if working:
1003 1011 # we are comparing working dir against non-parent
1004 1012 # generate a pseudo-manifest for the working dir
1005 1013 mf2 = mfmatches(self['.'])
1006 1014 for f in cmp + modified + added:
1007 1015 mf2[f] = None
1008 1016 mf2.set(f, ctx2.flags(f))
1009 1017 for f in removed:
1010 1018 if f in mf2:
1011 1019 del mf2[f]
1012 1020 else:
1013 1021 # we are comparing two revisions
1014 1022 deleted, unknown, ignored = [], [], []
1015 1023 mf2 = mfmatches(ctx2)
1016 1024
1017 1025 modified, added, clean = [], [], []
1018 1026 for fn in mf2:
1019 1027 if fn in mf1:
1020 1028 if (mf1.flags(fn) != mf2.flags(fn) or
1021 1029 (mf1[fn] != mf2[fn] and
1022 1030 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1023 1031 modified.append(fn)
1024 1032 elif listclean:
1025 1033 clean.append(fn)
1026 1034 del mf1[fn]
1027 1035 else:
1028 1036 added.append(fn)
1029 1037 removed = mf1.keys()
1030 1038
1031 1039 r = modified, added, removed, deleted, unknown, ignored, clean
1032 1040 [l.sort() for l in r]
1033 1041 return r
1034 1042
1035 1043 def add(self, list):
1036 1044 wlock = self.wlock()
1037 1045 try:
1038 1046 rejected = []
1039 1047 for f in list:
1040 1048 p = self.wjoin(f)
1041 1049 try:
1042 1050 st = os.lstat(p)
1043 1051 except:
1044 1052 self.ui.warn(_("%s does not exist!\n") % f)
1045 1053 rejected.append(f)
1046 1054 continue
1047 1055 if st.st_size > 10000000:
1048 1056 self.ui.warn(_("%s: files over 10MB may cause memory and"
1049 1057 " performance problems\n"
1050 1058 "(use 'hg revert %s' to unadd the file)\n")
1051 1059 % (f, f))
1052 1060 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1053 1061 self.ui.warn(_("%s not added: only files and symlinks "
1054 1062 "supported currently\n") % f)
1055 1063 rejected.append(p)
1056 1064 elif self.dirstate[f] in 'amn':
1057 1065 self.ui.warn(_("%s already tracked!\n") % f)
1058 1066 elif self.dirstate[f] == 'r':
1059 1067 self.dirstate.normallookup(f)
1060 1068 else:
1061 1069 self.dirstate.add(f)
1062 1070 return rejected
1063 1071 finally:
1064 1072 del wlock
1065 1073
1066 1074 def forget(self, list):
1067 1075 wlock = self.wlock()
1068 1076 try:
1069 1077 for f in list:
1070 1078 if self.dirstate[f] != 'a':
1071 1079 self.ui.warn(_("%s not added!\n") % f)
1072 1080 else:
1073 1081 self.dirstate.forget(f)
1074 1082 finally:
1075 1083 del wlock
1076 1084
1077 1085 def remove(self, list, unlink=False):
1078 1086 wlock = None
1079 1087 try:
1080 1088 if unlink:
1081 1089 for f in list:
1082 1090 try:
1083 1091 util.unlink(self.wjoin(f))
1084 1092 except OSError, inst:
1085 1093 if inst.errno != errno.ENOENT:
1086 1094 raise
1087 1095 wlock = self.wlock()
1088 1096 for f in list:
1089 1097 if unlink and os.path.exists(self.wjoin(f)):
1090 1098 self.ui.warn(_("%s still exists!\n") % f)
1091 1099 elif self.dirstate[f] == 'a':
1092 1100 self.dirstate.forget(f)
1093 1101 elif f not in self.dirstate:
1094 1102 self.ui.warn(_("%s not tracked!\n") % f)
1095 1103 else:
1096 1104 self.dirstate.remove(f)
1097 1105 finally:
1098 1106 del wlock
1099 1107
1100 1108 def undelete(self, list):
1101 1109 wlock = None
1102 1110 try:
1103 1111 manifests = [self.manifest.read(self.changelog.read(p)[0])
1104 1112 for p in self.dirstate.parents() if p != nullid]
1105 1113 wlock = self.wlock()
1106 1114 for f in list:
1107 1115 if self.dirstate[f] != 'r':
1108 1116 self.ui.warn(_("%s not removed!\n") % f)
1109 1117 else:
1110 1118 m = f in manifests[0] and manifests[0] or manifests[1]
1111 1119 t = self.file(f).read(m[f])
1112 1120 self.wwrite(f, t, m.flags(f))
1113 1121 self.dirstate.normal(f)
1114 1122 finally:
1115 1123 del wlock
1116 1124
1117 1125 def copy(self, source, dest):
1118 1126 wlock = None
1119 1127 try:
1120 1128 p = self.wjoin(dest)
1121 1129 if not (os.path.exists(p) or os.path.islink(p)):
1122 1130 self.ui.warn(_("%s does not exist!\n") % dest)
1123 1131 elif not (os.path.isfile(p) or os.path.islink(p)):
1124 1132 self.ui.warn(_("copy failed: %s is not a file or a "
1125 1133 "symbolic link\n") % dest)
1126 1134 else:
1127 1135 wlock = self.wlock()
1128 1136 if dest not in self.dirstate:
1129 1137 self.dirstate.add(dest)
1130 1138 self.dirstate.copy(source, dest)
1131 1139 finally:
1132 1140 del wlock
1133 1141
1134 1142 def heads(self, start=None):
1135 1143 heads = self.changelog.heads(start)
1136 1144 # sort the output in rev descending order
1137 1145 heads = [(-self.changelog.rev(h), h) for h in heads]
1138 1146 return [n for (r, n) in util.sort(heads)]
1139 1147
1140 1148 def branchheads(self, branch=None, start=None):
1141 1149 if branch is None:
1142 1150 branch = self[None].branch()
1143 1151 branches = self.branchtags()
1144 1152 if branch not in branches:
1145 1153 return []
1146 1154 # The basic algorithm is this:
1147 1155 #
1148 1156 # Start from the branch tip since there are no later revisions that can
1149 1157 # possibly be in this branch, and the tip is a guaranteed head.
1150 1158 #
1151 1159 # Remember the tip's parents as the first ancestors, since these by
1152 1160 # definition are not heads.
1153 1161 #
1154 1162 # Step backwards from the brach tip through all the revisions. We are
1155 1163 # guaranteed by the rules of Mercurial that we will now be visiting the
1156 1164 # nodes in reverse topological order (children before parents).
1157 1165 #
1158 1166 # If a revision is one of the ancestors of a head then we can toss it
1159 1167 # out of the ancestors set (we've already found it and won't be
1160 1168 # visiting it again) and put its parents in the ancestors set.
1161 1169 #
1162 1170 # Otherwise, if a revision is in the branch it's another head, since it
1163 1171 # wasn't in the ancestor list of an existing head. So add it to the
1164 1172 # head list, and add its parents to the ancestor list.
1165 1173 #
1166 1174 # If it is not in the branch ignore it.
1167 1175 #
1168 1176 # Once we have a list of heads, use nodesbetween to filter out all the
1169 1177 # heads that cannot be reached from startrev. There may be a more
1170 1178 # efficient way to do this as part of the previous algorithm.
1171 1179
1172 1180 set = util.set
1173 1181 heads = [self.changelog.rev(branches[branch])]
1174 1182 # Don't care if ancestors contains nullrev or not.
1175 1183 ancestors = set(self.changelog.parentrevs(heads[0]))
1176 1184 for rev in xrange(heads[0] - 1, nullrev, -1):
1177 1185 if rev in ancestors:
1178 1186 ancestors.update(self.changelog.parentrevs(rev))
1179 1187 ancestors.remove(rev)
1180 1188 elif self[rev].branch() == branch:
1181 1189 heads.append(rev)
1182 1190 ancestors.update(self.changelog.parentrevs(rev))
1183 1191 heads = [self.changelog.node(rev) for rev in heads]
1184 1192 if start is not None:
1185 1193 heads = self.changelog.nodesbetween([start], heads)[2]
1186 1194 return heads
1187 1195
1188 1196 def branches(self, nodes):
1189 1197 if not nodes:
1190 1198 nodes = [self.changelog.tip()]
1191 1199 b = []
1192 1200 for n in nodes:
1193 1201 t = n
1194 1202 while 1:
1195 1203 p = self.changelog.parents(n)
1196 1204 if p[1] != nullid or p[0] == nullid:
1197 1205 b.append((t, n, p[0], p[1]))
1198 1206 break
1199 1207 n = p[0]
1200 1208 return b
1201 1209
1202 1210 def between(self, pairs):
1203 1211 r = []
1204 1212
1205 1213 for top, bottom in pairs:
1206 1214 n, l, i = top, [], 0
1207 1215 f = 1
1208 1216
1209 1217 while n != bottom:
1210 1218 p = self.changelog.parents(n)[0]
1211 1219 if i == f:
1212 1220 l.append(n)
1213 1221 f = f * 2
1214 1222 n = p
1215 1223 i += 1
1216 1224
1217 1225 r.append(l)
1218 1226
1219 1227 return r
1220 1228
1221 1229 def findincoming(self, remote, base=None, heads=None, force=False):
1222 1230 """Return list of roots of the subsets of missing nodes from remote
1223 1231
1224 1232 If base dict is specified, assume that these nodes and their parents
1225 1233 exist on the remote side and that no child of a node of base exists
1226 1234 in both remote and self.
1227 1235 Furthermore base will be updated to include the nodes that exists
1228 1236 in self and remote but no children exists in self and remote.
1229 1237 If a list of heads is specified, return only nodes which are heads
1230 1238 or ancestors of these heads.
1231 1239
1232 1240 All the ancestors of base are in self and in remote.
1233 1241 All the descendants of the list returned are missing in self.
1234 1242 (and so we know that the rest of the nodes are missing in remote, see
1235 1243 outgoing)
1236 1244 """
1237 1245 m = self.changelog.nodemap
1238 1246 search = []
1239 1247 fetch = {}
1240 1248 seen = {}
1241 1249 seenbranch = {}
1242 1250 if base == None:
1243 1251 base = {}
1244 1252
1245 1253 if not heads:
1246 1254 heads = remote.heads()
1247 1255
1248 1256 if self.changelog.tip() == nullid:
1249 1257 base[nullid] = 1
1250 1258 if heads != [nullid]:
1251 1259 return [nullid]
1252 1260 return []
1253 1261
1254 1262 # assume we're closer to the tip than the root
1255 1263 # and start by examining the heads
1256 1264 self.ui.status(_("searching for changes\n"))
1257 1265
1258 1266 unknown = []
1259 1267 for h in heads:
1260 1268 if h not in m:
1261 1269 unknown.append(h)
1262 1270 else:
1263 1271 base[h] = 1
1264 1272
1265 1273 if not unknown:
1266 1274 return []
1267 1275
1268 1276 req = dict.fromkeys(unknown)
1269 1277 reqcnt = 0
1270 1278
1271 1279 # search through remote branches
1272 1280 # a 'branch' here is a linear segment of history, with four parts:
1273 1281 # head, root, first parent, second parent
1274 1282 # (a branch always has two parents (or none) by definition)
1275 1283 unknown = remote.branches(unknown)
1276 1284 while unknown:
1277 1285 r = []
1278 1286 while unknown:
1279 1287 n = unknown.pop(0)
1280 1288 if n[0] in seen:
1281 1289 continue
1282 1290
1283 1291 self.ui.debug(_("examining %s:%s\n")
1284 1292 % (short(n[0]), short(n[1])))
1285 1293 if n[0] == nullid: # found the end of the branch
1286 1294 pass
1287 1295 elif n in seenbranch:
1288 1296 self.ui.debug(_("branch already found\n"))
1289 1297 continue
1290 1298 elif n[1] and n[1] in m: # do we know the base?
1291 1299 self.ui.debug(_("found incomplete branch %s:%s\n")
1292 1300 % (short(n[0]), short(n[1])))
1293 1301 search.append(n) # schedule branch range for scanning
1294 1302 seenbranch[n] = 1
1295 1303 else:
1296 1304 if n[1] not in seen and n[1] not in fetch:
1297 1305 if n[2] in m and n[3] in m:
1298 1306 self.ui.debug(_("found new changeset %s\n") %
1299 1307 short(n[1]))
1300 1308 fetch[n[1]] = 1 # earliest unknown
1301 1309 for p in n[2:4]:
1302 1310 if p in m:
1303 1311 base[p] = 1 # latest known
1304 1312
1305 1313 for p in n[2:4]:
1306 1314 if p not in req and p not in m:
1307 1315 r.append(p)
1308 1316 req[p] = 1
1309 1317 seen[n[0]] = 1
1310 1318
1311 1319 if r:
1312 1320 reqcnt += 1
1313 1321 self.ui.debug(_("request %d: %s\n") %
1314 1322 (reqcnt, " ".join(map(short, r))))
1315 1323 for p in xrange(0, len(r), 10):
1316 1324 for b in remote.branches(r[p:p+10]):
1317 1325 self.ui.debug(_("received %s:%s\n") %
1318 1326 (short(b[0]), short(b[1])))
1319 1327 unknown.append(b)
1320 1328
1321 1329 # do binary search on the branches we found
1322 1330 while search:
1323 1331 n = search.pop(0)
1324 1332 reqcnt += 1
1325 1333 l = remote.between([(n[0], n[1])])[0]
1326 1334 l.append(n[1])
1327 1335 p = n[0]
1328 1336 f = 1
1329 1337 for i in l:
1330 1338 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1331 1339 if i in m:
1332 1340 if f <= 2:
1333 1341 self.ui.debug(_("found new branch changeset %s\n") %
1334 1342 short(p))
1335 1343 fetch[p] = 1
1336 1344 base[i] = 1
1337 1345 else:
1338 1346 self.ui.debug(_("narrowed branch search to %s:%s\n")
1339 1347 % (short(p), short(i)))
1340 1348 search.append((p, i))
1341 1349 break
1342 1350 p, f = i, f * 2
1343 1351
1344 1352 # sanity check our fetch list
1345 1353 for f in fetch.keys():
1346 1354 if f in m:
1347 1355 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1348 1356
1349 1357 if base.keys() == [nullid]:
1350 1358 if force:
1351 1359 self.ui.warn(_("warning: repository is unrelated\n"))
1352 1360 else:
1353 1361 raise util.Abort(_("repository is unrelated"))
1354 1362
1355 1363 self.ui.debug(_("found new changesets starting at ") +
1356 1364 " ".join([short(f) for f in fetch]) + "\n")
1357 1365
1358 1366 self.ui.debug(_("%d total queries\n") % reqcnt)
1359 1367
1360 1368 return fetch.keys()
1361 1369
1362 1370 def findoutgoing(self, remote, base=None, heads=None, force=False):
1363 1371 """Return list of nodes that are roots of subsets not in remote
1364 1372
1365 1373 If base dict is specified, assume that these nodes and their parents
1366 1374 exist on the remote side.
1367 1375 If a list of heads is specified, return only nodes which are heads
1368 1376 or ancestors of these heads, and return a second element which
1369 1377 contains all remote heads which get new children.
1370 1378 """
1371 1379 if base == None:
1372 1380 base = {}
1373 1381 self.findincoming(remote, base, heads, force=force)
1374 1382
1375 1383 self.ui.debug(_("common changesets up to ")
1376 1384 + " ".join(map(short, base.keys())) + "\n")
1377 1385
1378 1386 remain = dict.fromkeys(self.changelog.nodemap)
1379 1387
1380 1388 # prune everything remote has from the tree
1381 1389 del remain[nullid]
1382 1390 remove = base.keys()
1383 1391 while remove:
1384 1392 n = remove.pop(0)
1385 1393 if n in remain:
1386 1394 del remain[n]
1387 1395 for p in self.changelog.parents(n):
1388 1396 remove.append(p)
1389 1397
1390 1398 # find every node whose parents have been pruned
1391 1399 subset = []
1392 1400 # find every remote head that will get new children
1393 1401 updated_heads = {}
1394 1402 for n in remain:
1395 1403 p1, p2 = self.changelog.parents(n)
1396 1404 if p1 not in remain and p2 not in remain:
1397 1405 subset.append(n)
1398 1406 if heads:
1399 1407 if p1 in heads:
1400 1408 updated_heads[p1] = True
1401 1409 if p2 in heads:
1402 1410 updated_heads[p2] = True
1403 1411
1404 1412 # this is the set of all roots we have to push
1405 1413 if heads:
1406 1414 return subset, updated_heads.keys()
1407 1415 else:
1408 1416 return subset
1409 1417
1410 1418 def pull(self, remote, heads=None, force=False):
1411 1419 lock = self.lock()
1412 1420 try:
1413 1421 fetch = self.findincoming(remote, heads=heads, force=force)
1414 1422 if fetch == [nullid]:
1415 1423 self.ui.status(_("requesting all changes\n"))
1416 1424
1417 1425 if not fetch:
1418 1426 self.ui.status(_("no changes found\n"))
1419 1427 return 0
1420 1428
1421 1429 if heads is None:
1422 1430 cg = remote.changegroup(fetch, 'pull')
1423 1431 else:
1424 1432 if 'changegroupsubset' not in remote.capabilities:
1425 1433 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1426 1434 cg = remote.changegroupsubset(fetch, heads, 'pull')
1427 1435 return self.addchangegroup(cg, 'pull', remote.url())
1428 1436 finally:
1429 1437 del lock
1430 1438
1431 1439 def push(self, remote, force=False, revs=None):
1432 1440 # there are two ways to push to remote repo:
1433 1441 #
1434 1442 # addchangegroup assumes local user can lock remote
1435 1443 # repo (local filesystem, old ssh servers).
1436 1444 #
1437 1445 # unbundle assumes local user cannot lock remote repo (new ssh
1438 1446 # servers, http servers).
1439 1447
1440 1448 if remote.capable('unbundle'):
1441 1449 return self.push_unbundle(remote, force, revs)
1442 1450 return self.push_addchangegroup(remote, force, revs)
1443 1451
1444 1452 def prepush(self, remote, force, revs):
1445 1453 base = {}
1446 1454 remote_heads = remote.heads()
1447 1455 inc = self.findincoming(remote, base, remote_heads, force=force)
1448 1456
1449 1457 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1450 1458 if revs is not None:
1451 1459 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1452 1460 else:
1453 1461 bases, heads = update, self.changelog.heads()
1454 1462
1455 1463 if not bases:
1456 1464 self.ui.status(_("no changes found\n"))
1457 1465 return None, 1
1458 1466 elif not force:
1459 1467 # check if we're creating new remote heads
1460 1468 # to be a remote head after push, node must be either
1461 1469 # - unknown locally
1462 1470 # - a local outgoing head descended from update
1463 1471 # - a remote head that's known locally and not
1464 1472 # ancestral to an outgoing head
1465 1473
1466 1474 warn = 0
1467 1475
1468 1476 if remote_heads == [nullid]:
1469 1477 warn = 0
1470 1478 elif not revs and len(heads) > len(remote_heads):
1471 1479 warn = 1
1472 1480 else:
1473 1481 newheads = list(heads)
1474 1482 for r in remote_heads:
1475 1483 if r in self.changelog.nodemap:
1476 1484 desc = self.changelog.heads(r, heads)
1477 1485 l = [h for h in heads if h in desc]
1478 1486 if not l:
1479 1487 newheads.append(r)
1480 1488 else:
1481 1489 newheads.append(r)
1482 1490 if len(newheads) > len(remote_heads):
1483 1491 warn = 1
1484 1492
1485 1493 if warn:
1486 1494 self.ui.warn(_("abort: push creates new remote heads!\n"))
1487 1495 self.ui.status(_("(did you forget to merge?"
1488 1496 " use push -f to force)\n"))
1489 1497 return None, 0
1490 1498 elif inc:
1491 1499 self.ui.warn(_("note: unsynced remote changes!\n"))
1492 1500
1493 1501
1494 1502 if revs is None:
1495 1503 cg = self.changegroup(update, 'push')
1496 1504 else:
1497 1505 cg = self.changegroupsubset(update, revs, 'push')
1498 1506 return cg, remote_heads
1499 1507
1500 1508 def push_addchangegroup(self, remote, force, revs):
1501 1509 lock = remote.lock()
1502 1510 try:
1503 1511 ret = self.prepush(remote, force, revs)
1504 1512 if ret[0] is not None:
1505 1513 cg, remote_heads = ret
1506 1514 return remote.addchangegroup(cg, 'push', self.url())
1507 1515 return ret[1]
1508 1516 finally:
1509 1517 del lock
1510 1518
1511 1519 def push_unbundle(self, remote, force, revs):
1512 1520 # local repo finds heads on server, finds out what revs it
1513 1521 # must push. once revs transferred, if server finds it has
1514 1522 # different heads (someone else won commit/push race), server
1515 1523 # aborts.
1516 1524
1517 1525 ret = self.prepush(remote, force, revs)
1518 1526 if ret[0] is not None:
1519 1527 cg, remote_heads = ret
1520 1528 if force: remote_heads = ['force']
1521 1529 return remote.unbundle(cg, remote_heads, 'push')
1522 1530 return ret[1]
1523 1531
1524 1532 def changegroupinfo(self, nodes, source):
1525 1533 if self.ui.verbose or source == 'bundle':
1526 1534 self.ui.status(_("%d changesets found\n") % len(nodes))
1527 1535 if self.ui.debugflag:
1528 1536 self.ui.debug(_("List of changesets:\n"))
1529 1537 for node in nodes:
1530 1538 self.ui.debug("%s\n" % hex(node))
1531 1539
1532 1540 def changegroupsubset(self, bases, heads, source, extranodes=None):
1533 1541 """This function generates a changegroup consisting of all the nodes
1534 1542 that are descendents of any of the bases, and ancestors of any of
1535 1543 the heads.
1536 1544
1537 1545 It is fairly complex as determining which filenodes and which
1538 1546 manifest nodes need to be included for the changeset to be complete
1539 1547 is non-trivial.
1540 1548
1541 1549 Another wrinkle is doing the reverse, figuring out which changeset in
1542 1550 the changegroup a particular filenode or manifestnode belongs to.
1543 1551
1544 1552 The caller can specify some nodes that must be included in the
1545 1553 changegroup using the extranodes argument. It should be a dict
1546 1554 where the keys are the filenames (or 1 for the manifest), and the
1547 1555 values are lists of (node, linknode) tuples, where node is a wanted
1548 1556 node and linknode is the changelog node that should be transmitted as
1549 1557 the linkrev.
1550 1558 """
1551 1559
1552 1560 self.hook('preoutgoing', throw=True, source=source)
1553 1561
1554 1562 # Set up some initial variables
1555 1563 # Make it easy to refer to self.changelog
1556 1564 cl = self.changelog
1557 1565 # msng is short for missing - compute the list of changesets in this
1558 1566 # changegroup.
1559 1567 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1560 1568 self.changegroupinfo(msng_cl_lst, source)
1561 1569 # Some bases may turn out to be superfluous, and some heads may be
1562 1570 # too. nodesbetween will return the minimal set of bases and heads
1563 1571 # necessary to re-create the changegroup.
1564 1572
1565 1573 # Known heads are the list of heads that it is assumed the recipient
1566 1574 # of this changegroup will know about.
1567 1575 knownheads = {}
1568 1576 # We assume that all parents of bases are known heads.
1569 1577 for n in bases:
1570 1578 for p in cl.parents(n):
1571 1579 if p != nullid:
1572 1580 knownheads[p] = 1
1573 1581 knownheads = knownheads.keys()
1574 1582 if knownheads:
1575 1583 # Now that we know what heads are known, we can compute which
1576 1584 # changesets are known. The recipient must know about all
1577 1585 # changesets required to reach the known heads from the null
1578 1586 # changeset.
1579 1587 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1580 1588 junk = None
1581 1589 # Transform the list into an ersatz set.
1582 1590 has_cl_set = dict.fromkeys(has_cl_set)
1583 1591 else:
1584 1592 # If there were no known heads, the recipient cannot be assumed to
1585 1593 # know about any changesets.
1586 1594 has_cl_set = {}
1587 1595
1588 1596 # Make it easy to refer to self.manifest
1589 1597 mnfst = self.manifest
1590 1598 # We don't know which manifests are missing yet
1591 1599 msng_mnfst_set = {}
1592 1600 # Nor do we know which filenodes are missing.
1593 1601 msng_filenode_set = {}
1594 1602
1595 1603 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1596 1604 junk = None
1597 1605
1598 1606 # A changeset always belongs to itself, so the changenode lookup
1599 1607 # function for a changenode is identity.
1600 1608 def identity(x):
1601 1609 return x
1602 1610
1603 1611 # A function generating function. Sets up an environment for the
1604 1612 # inner function.
1605 1613 def cmp_by_rev_func(revlog):
1606 1614 # Compare two nodes by their revision number in the environment's
1607 1615 # revision history. Since the revision number both represents the
1608 1616 # most efficient order to read the nodes in, and represents a
1609 1617 # topological sorting of the nodes, this function is often useful.
1610 1618 def cmp_by_rev(a, b):
1611 1619 return cmp(revlog.rev(a), revlog.rev(b))
1612 1620 return cmp_by_rev
1613 1621
1614 1622 # If we determine that a particular file or manifest node must be a
1615 1623 # node that the recipient of the changegroup will already have, we can
1616 1624 # also assume the recipient will have all the parents. This function
1617 1625 # prunes them from the set of missing nodes.
1618 1626 def prune_parents(revlog, hasset, msngset):
1619 1627 haslst = hasset.keys()
1620 1628 haslst.sort(cmp_by_rev_func(revlog))
1621 1629 for node in haslst:
1622 1630 parentlst = [p for p in revlog.parents(node) if p != nullid]
1623 1631 while parentlst:
1624 1632 n = parentlst.pop()
1625 1633 if n not in hasset:
1626 1634 hasset[n] = 1
1627 1635 p = [p for p in revlog.parents(n) if p != nullid]
1628 1636 parentlst.extend(p)
1629 1637 for n in hasset:
1630 1638 msngset.pop(n, None)
1631 1639
1632 1640 # This is a function generating function used to set up an environment
1633 1641 # for the inner function to execute in.
1634 1642 def manifest_and_file_collector(changedfileset):
1635 1643 # This is an information gathering function that gathers
1636 1644 # information from each changeset node that goes out as part of
1637 1645 # the changegroup. The information gathered is a list of which
1638 1646 # manifest nodes are potentially required (the recipient may
1639 1647 # already have them) and total list of all files which were
1640 1648 # changed in any changeset in the changegroup.
1641 1649 #
1642 1650 # We also remember the first changenode we saw any manifest
1643 1651 # referenced by so we can later determine which changenode 'owns'
1644 1652 # the manifest.
1645 1653 def collect_manifests_and_files(clnode):
1646 1654 c = cl.read(clnode)
1647 1655 for f in c[3]:
1648 1656 # This is to make sure we only have one instance of each
1649 1657 # filename string for each filename.
1650 1658 changedfileset.setdefault(f, f)
1651 1659 msng_mnfst_set.setdefault(c[0], clnode)
1652 1660 return collect_manifests_and_files
1653 1661
1654 1662 # Figure out which manifest nodes (of the ones we think might be part
1655 1663 # of the changegroup) the recipient must know about and remove them
1656 1664 # from the changegroup.
1657 1665 def prune_manifests():
1658 1666 has_mnfst_set = {}
1659 1667 for n in msng_mnfst_set:
1660 1668 # If a 'missing' manifest thinks it belongs to a changenode
1661 1669 # the recipient is assumed to have, obviously the recipient
1662 1670 # must have that manifest.
1663 1671 linknode = cl.node(mnfst.linkrev(n))
1664 1672 if linknode in has_cl_set:
1665 1673 has_mnfst_set[n] = 1
1666 1674 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1667 1675
1668 1676 # Use the information collected in collect_manifests_and_files to say
1669 1677 # which changenode any manifestnode belongs to.
1670 1678 def lookup_manifest_link(mnfstnode):
1671 1679 return msng_mnfst_set[mnfstnode]
1672 1680
1673 1681 # A function generating function that sets up the initial environment
1674 1682 # the inner function.
1675 1683 def filenode_collector(changedfiles):
1676 1684 next_rev = [0]
1677 1685 # This gathers information from each manifestnode included in the
1678 1686 # changegroup about which filenodes the manifest node references
1679 1687 # so we can include those in the changegroup too.
1680 1688 #
1681 1689 # It also remembers which changenode each filenode belongs to. It
1682 1690 # does this by assuming the a filenode belongs to the changenode
1683 1691 # the first manifest that references it belongs to.
1684 1692 def collect_msng_filenodes(mnfstnode):
1685 1693 r = mnfst.rev(mnfstnode)
1686 1694 if r == next_rev[0]:
1687 1695 # If the last rev we looked at was the one just previous,
1688 1696 # we only need to see a diff.
1689 1697 deltamf = mnfst.readdelta(mnfstnode)
1690 1698 # For each line in the delta
1691 1699 for f, fnode in deltamf.items():
1692 1700 f = changedfiles.get(f, None)
1693 1701 # And if the file is in the list of files we care
1694 1702 # about.
1695 1703 if f is not None:
1696 1704 # Get the changenode this manifest belongs to
1697 1705 clnode = msng_mnfst_set[mnfstnode]
1698 1706 # Create the set of filenodes for the file if
1699 1707 # there isn't one already.
1700 1708 ndset = msng_filenode_set.setdefault(f, {})
1701 1709 # And set the filenode's changelog node to the
1702 1710 # manifest's if it hasn't been set already.
1703 1711 ndset.setdefault(fnode, clnode)
1704 1712 else:
1705 1713 # Otherwise we need a full manifest.
1706 1714 m = mnfst.read(mnfstnode)
1707 1715 # For every file in we care about.
1708 1716 for f in changedfiles:
1709 1717 fnode = m.get(f, None)
1710 1718 # If it's in the manifest
1711 1719 if fnode is not None:
1712 1720 # See comments above.
1713 1721 clnode = msng_mnfst_set[mnfstnode]
1714 1722 ndset = msng_filenode_set.setdefault(f, {})
1715 1723 ndset.setdefault(fnode, clnode)
1716 1724 # Remember the revision we hope to see next.
1717 1725 next_rev[0] = r + 1
1718 1726 return collect_msng_filenodes
1719 1727
1720 1728 # We have a list of filenodes we think we need for a file, lets remove
1721 1729 # all those we now the recipient must have.
1722 1730 def prune_filenodes(f, filerevlog):
1723 1731 msngset = msng_filenode_set[f]
1724 1732 hasset = {}
1725 1733 # If a 'missing' filenode thinks it belongs to a changenode we
1726 1734 # assume the recipient must have, then the recipient must have
1727 1735 # that filenode.
1728 1736 for n in msngset:
1729 1737 clnode = cl.node(filerevlog.linkrev(n))
1730 1738 if clnode in has_cl_set:
1731 1739 hasset[n] = 1
1732 1740 prune_parents(filerevlog, hasset, msngset)
1733 1741
1734 1742 # A function generator function that sets up the a context for the
1735 1743 # inner function.
1736 1744 def lookup_filenode_link_func(fname):
1737 1745 msngset = msng_filenode_set[fname]
1738 1746 # Lookup the changenode the filenode belongs to.
1739 1747 def lookup_filenode_link(fnode):
1740 1748 return msngset[fnode]
1741 1749 return lookup_filenode_link
1742 1750
1743 1751 # Add the nodes that were explicitly requested.
1744 1752 def add_extra_nodes(name, nodes):
1745 1753 if not extranodes or name not in extranodes:
1746 1754 return
1747 1755
1748 1756 for node, linknode in extranodes[name]:
1749 1757 if node not in nodes:
1750 1758 nodes[node] = linknode
1751 1759
1752 1760 # Now that we have all theses utility functions to help out and
1753 1761 # logically divide up the task, generate the group.
1754 1762 def gengroup():
1755 1763 # The set of changed files starts empty.
1756 1764 changedfiles = {}
1757 1765 # Create a changenode group generator that will call our functions
1758 1766 # back to lookup the owning changenode and collect information.
1759 1767 group = cl.group(msng_cl_lst, identity,
1760 1768 manifest_and_file_collector(changedfiles))
1761 1769 for chnk in group:
1762 1770 yield chnk
1763 1771
1764 1772 # The list of manifests has been collected by the generator
1765 1773 # calling our functions back.
1766 1774 prune_manifests()
1767 1775 add_extra_nodes(1, msng_mnfst_set)
1768 1776 msng_mnfst_lst = msng_mnfst_set.keys()
1769 1777 # Sort the manifestnodes by revision number.
1770 1778 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1771 1779 # Create a generator for the manifestnodes that calls our lookup
1772 1780 # and data collection functions back.
1773 1781 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1774 1782 filenode_collector(changedfiles))
1775 1783 for chnk in group:
1776 1784 yield chnk
1777 1785
1778 1786 # These are no longer needed, dereference and toss the memory for
1779 1787 # them.
1780 1788 msng_mnfst_lst = None
1781 1789 msng_mnfst_set.clear()
1782 1790
1783 1791 if extranodes:
1784 1792 for fname in extranodes:
1785 1793 if isinstance(fname, int):
1786 1794 continue
1787 1795 add_extra_nodes(fname,
1788 1796 msng_filenode_set.setdefault(fname, {}))
1789 1797 changedfiles[fname] = 1
1790 1798 # Go through all our files in order sorted by name.
1791 1799 for fname in util.sort(changedfiles):
1792 1800 filerevlog = self.file(fname)
1793 1801 if not len(filerevlog):
1794 1802 raise util.Abort(_("empty or missing revlog for %s") % fname)
1795 1803 # Toss out the filenodes that the recipient isn't really
1796 1804 # missing.
1797 1805 if fname in msng_filenode_set:
1798 1806 prune_filenodes(fname, filerevlog)
1799 1807 msng_filenode_lst = msng_filenode_set[fname].keys()
1800 1808 else:
1801 1809 msng_filenode_lst = []
1802 1810 # If any filenodes are left, generate the group for them,
1803 1811 # otherwise don't bother.
1804 1812 if len(msng_filenode_lst) > 0:
1805 1813 yield changegroup.chunkheader(len(fname))
1806 1814 yield fname
1807 1815 # Sort the filenodes by their revision #
1808 1816 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1809 1817 # Create a group generator and only pass in a changenode
1810 1818 # lookup function as we need to collect no information
1811 1819 # from filenodes.
1812 1820 group = filerevlog.group(msng_filenode_lst,
1813 1821 lookup_filenode_link_func(fname))
1814 1822 for chnk in group:
1815 1823 yield chnk
1816 1824 if fname in msng_filenode_set:
1817 1825 # Don't need this anymore, toss it to free memory.
1818 1826 del msng_filenode_set[fname]
1819 1827 # Signal that no more groups are left.
1820 1828 yield changegroup.closechunk()
1821 1829
1822 1830 if msng_cl_lst:
1823 1831 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1824 1832
1825 1833 return util.chunkbuffer(gengroup())
1826 1834
1827 1835 def changegroup(self, basenodes, source):
1828 1836 """Generate a changegroup of all nodes that we have that a recipient
1829 1837 doesn't.
1830 1838
1831 1839 This is much easier than the previous function as we can assume that
1832 1840 the recipient has any changenode we aren't sending them."""
1833 1841
1834 1842 self.hook('preoutgoing', throw=True, source=source)
1835 1843
1836 1844 cl = self.changelog
1837 1845 nodes = cl.nodesbetween(basenodes, None)[0]
1838 1846 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1839 1847 self.changegroupinfo(nodes, source)
1840 1848
1841 1849 def identity(x):
1842 1850 return x
1843 1851
1844 1852 def gennodelst(log):
1845 1853 for r in log:
1846 1854 n = log.node(r)
1847 1855 if log.linkrev(n) in revset:
1848 1856 yield n
1849 1857
1850 1858 def changed_file_collector(changedfileset):
1851 1859 def collect_changed_files(clnode):
1852 1860 c = cl.read(clnode)
1853 1861 for fname in c[3]:
1854 1862 changedfileset[fname] = 1
1855 1863 return collect_changed_files
1856 1864
1857 1865 def lookuprevlink_func(revlog):
1858 1866 def lookuprevlink(n):
1859 1867 return cl.node(revlog.linkrev(n))
1860 1868 return lookuprevlink
1861 1869
1862 1870 def gengroup():
1863 1871 # construct a list of all changed files
1864 1872 changedfiles = {}
1865 1873
1866 1874 for chnk in cl.group(nodes, identity,
1867 1875 changed_file_collector(changedfiles)):
1868 1876 yield chnk
1869 1877
1870 1878 mnfst = self.manifest
1871 1879 nodeiter = gennodelst(mnfst)
1872 1880 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1873 1881 yield chnk
1874 1882
1875 1883 for fname in util.sort(changedfiles):
1876 1884 filerevlog = self.file(fname)
1877 1885 if not len(filerevlog):
1878 1886 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 1887 nodeiter = gennodelst(filerevlog)
1880 1888 nodeiter = list(nodeiter)
1881 1889 if nodeiter:
1882 1890 yield changegroup.chunkheader(len(fname))
1883 1891 yield fname
1884 1892 lookup = lookuprevlink_func(filerevlog)
1885 1893 for chnk in filerevlog.group(nodeiter, lookup):
1886 1894 yield chnk
1887 1895
1888 1896 yield changegroup.closechunk()
1889 1897
1890 1898 if nodes:
1891 1899 self.hook('outgoing', node=hex(nodes[0]), source=source)
1892 1900
1893 1901 return util.chunkbuffer(gengroup())
1894 1902
1895 1903 def addchangegroup(self, source, srctype, url, emptyok=False):
1896 1904 """add changegroup to repo.
1897 1905
1898 1906 return values:
1899 1907 - nothing changed or no source: 0
1900 1908 - more heads than before: 1+added heads (2..n)
1901 1909 - less heads than before: -1-removed heads (-2..-n)
1902 1910 - number of heads stays the same: 1
1903 1911 """
1904 1912 def csmap(x):
1905 1913 self.ui.debug(_("add changeset %s\n") % short(x))
1906 1914 return len(cl)
1907 1915
1908 1916 def revmap(x):
1909 1917 return cl.rev(x)
1910 1918
1911 1919 if not source:
1912 1920 return 0
1913 1921
1914 1922 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1915 1923
1916 1924 changesets = files = revisions = 0
1917 1925
1918 1926 # write changelog data to temp files so concurrent readers will not see
1919 1927 # inconsistent view
1920 1928 cl = self.changelog
1921 1929 cl.delayupdate()
1922 1930 oldheads = len(cl.heads())
1923 1931
1924 1932 tr = self.transaction()
1925 1933 try:
1926 1934 trp = weakref.proxy(tr)
1927 1935 # pull off the changeset group
1928 1936 self.ui.status(_("adding changesets\n"))
1929 1937 cor = len(cl) - 1
1930 1938 chunkiter = changegroup.chunkiter(source)
1931 1939 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1932 1940 raise util.Abort(_("received changelog group is empty"))
1933 1941 cnr = len(cl) - 1
1934 1942 changesets = cnr - cor
1935 1943
1936 1944 # pull off the manifest group
1937 1945 self.ui.status(_("adding manifests\n"))
1938 1946 chunkiter = changegroup.chunkiter(source)
1939 1947 # no need to check for empty manifest group here:
1940 1948 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1941 1949 # no new manifest will be created and the manifest group will
1942 1950 # be empty during the pull
1943 1951 self.manifest.addgroup(chunkiter, revmap, trp)
1944 1952
1945 1953 # process the files
1946 1954 self.ui.status(_("adding file changes\n"))
1947 1955 while 1:
1948 1956 f = changegroup.getchunk(source)
1949 1957 if not f:
1950 1958 break
1951 1959 self.ui.debug(_("adding %s revisions\n") % f)
1952 1960 fl = self.file(f)
1953 1961 o = len(fl)
1954 1962 chunkiter = changegroup.chunkiter(source)
1955 1963 if fl.addgroup(chunkiter, revmap, trp) is None:
1956 1964 raise util.Abort(_("received file revlog group is empty"))
1957 1965 revisions += len(fl) - o
1958 1966 files += 1
1959 1967
1960 1968 # make changelog see real files again
1961 1969 cl.finalize(trp)
1962 1970
1963 1971 newheads = len(self.changelog.heads())
1964 1972 heads = ""
1965 1973 if oldheads and newheads != oldheads:
1966 1974 heads = _(" (%+d heads)") % (newheads - oldheads)
1967 1975
1968 1976 self.ui.status(_("added %d changesets"
1969 1977 " with %d changes to %d files%s\n")
1970 1978 % (changesets, revisions, files, heads))
1971 1979
1972 1980 if changesets > 0:
1973 1981 self.hook('pretxnchangegroup', throw=True,
1974 1982 node=hex(self.changelog.node(cor+1)), source=srctype,
1975 1983 url=url)
1976 1984
1977 1985 tr.close()
1978 1986 finally:
1979 1987 del tr
1980 1988
1981 1989 if changesets > 0:
1982 1990 # forcefully update the on-disk branch cache
1983 1991 self.ui.debug(_("updating the branch cache\n"))
1984 1992 self.branchtags()
1985 1993 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1986 1994 source=srctype, url=url)
1987 1995
1988 1996 for i in xrange(cor + 1, cnr + 1):
1989 1997 self.hook("incoming", node=hex(self.changelog.node(i)),
1990 1998 source=srctype, url=url)
1991 1999
1992 2000 # never return 0 here:
1993 2001 if newheads < oldheads:
1994 2002 return newheads - oldheads - 1
1995 2003 else:
1996 2004 return newheads - oldheads + 1
1997 2005
1998 2006
1999 2007 def stream_in(self, remote):
2000 2008 fp = remote.stream_out()
2001 2009 l = fp.readline()
2002 2010 try:
2003 2011 resp = int(l)
2004 2012 except ValueError:
2005 2013 raise util.UnexpectedOutput(
2006 2014 _('Unexpected response from remote server:'), l)
2007 2015 if resp == 1:
2008 2016 raise util.Abort(_('operation forbidden by server'))
2009 2017 elif resp == 2:
2010 2018 raise util.Abort(_('locking the remote repository failed'))
2011 2019 elif resp != 0:
2012 2020 raise util.Abort(_('the server sent an unknown error code'))
2013 2021 self.ui.status(_('streaming all changes\n'))
2014 2022 l = fp.readline()
2015 2023 try:
2016 2024 total_files, total_bytes = map(int, l.split(' ', 1))
2017 2025 except (ValueError, TypeError):
2018 2026 raise util.UnexpectedOutput(
2019 2027 _('Unexpected response from remote server:'), l)
2020 2028 self.ui.status(_('%d files to transfer, %s of data\n') %
2021 2029 (total_files, util.bytecount(total_bytes)))
2022 2030 start = time.time()
2023 2031 for i in xrange(total_files):
2024 2032 # XXX doesn't support '\n' or '\r' in filenames
2025 2033 l = fp.readline()
2026 2034 try:
2027 2035 name, size = l.split('\0', 1)
2028 2036 size = int(size)
2029 2037 except (ValueError, TypeError):
2030 2038 raise util.UnexpectedOutput(
2031 2039 _('Unexpected response from remote server:'), l)
2032 2040 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2033 2041 ofp = self.sopener(name, 'w')
2034 2042 for chunk in util.filechunkiter(fp, limit=size):
2035 2043 ofp.write(chunk)
2036 2044 ofp.close()
2037 2045 elapsed = time.time() - start
2038 2046 if elapsed <= 0:
2039 2047 elapsed = 0.001
2040 2048 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2041 2049 (util.bytecount(total_bytes), elapsed,
2042 2050 util.bytecount(total_bytes / elapsed)))
2043 2051 self.invalidate()
2044 2052 return len(self.heads()) + 1
2045 2053
2046 2054 def clone(self, remote, heads=[], stream=False):
2047 2055 '''clone remote repository.
2048 2056
2049 2057 keyword arguments:
2050 2058 heads: list of revs to clone (forces use of pull)
2051 2059 stream: use streaming clone if possible'''
2052 2060
2053 2061 # now, all clients that can request uncompressed clones can
2054 2062 # read repo formats supported by all servers that can serve
2055 2063 # them.
2056 2064
2057 2065 # if revlog format changes, client will have to check version
2058 2066 # and format flags on "stream" capability, and use
2059 2067 # uncompressed only if compatible.
2060 2068
2061 2069 if stream and not heads and remote.capable('stream'):
2062 2070 return self.stream_in(remote)
2063 2071 return self.pull(remote, heads)
2064 2072
2065 2073 # used to avoid circular references so destructors work
2066 2074 def aftertrans(files):
2067 2075 renamefiles = [tuple(t) for t in files]
2068 2076 def a():
2069 2077 for src, dest in renamefiles:
2070 2078 util.rename(src, dest)
2071 2079 return a
2072 2080
2073 2081 def instance(ui, path, create):
2074 2082 return localrepository(ui, util.drop_scheme('file', path), create)
2075 2083
2076 2084 def islocal(path):
2077 2085 return True
@@ -1,110 +1,126 b''
1 1 #!/bin/sh
2 2
3 3 echo % commit date test
4 4 hg init test
5 5 cd test
6 6 echo foo > foo
7 7 hg add foo
8 8 HGEDITOR=true hg commit -m ""
9 9 hg commit -d '0 0' -m commit-1
10 10 echo foo >> foo
11 11 hg commit -d '1 4444444' -m commit-3
12 12 hg commit -d '1 15.1' -m commit-4
13 13 hg commit -d 'foo bar' -m commit-5
14 14 hg commit -d ' 1 4444' -m commit-6
15 15 hg commit -d '111111111111 0' -m commit-7
16 16
17 17 echo % commit added file that has been deleted
18 18 echo bar > bar
19 19 hg add bar
20 20 rm bar
21 21 hg commit -d "1000000 0" -m commit-8
22 22 hg commit -d "1000000 0" -m commit-8-2 bar
23 23
24 24 hg -q revert -a --no-backup
25 25
26 26 mkdir dir
27 27 echo boo > dir/file
28 28 hg add
29 29 hg -v commit -d '0 0' -m commit-9 dir
30 30
31 31 echo > dir.file
32 32 hg add
33 33 hg commit -d '0 0' -m commit-10 dir dir.file
34 34
35 35 echo >> dir/file
36 36 mkdir bleh
37 37 mkdir dir2
38 38 cd bleh
39 39 hg commit -d '0 0' -m commit-11 .
40 40 hg commit -d '0 0' -m commit-12 ../dir ../dir2
41 41 hg -v commit -d '0 0' -m commit-13 ../dir
42 42 cd ..
43 43
44 44 hg commit -d '0 0' -m commit-14 does-not-exist
45 45 ln -s foo baz
46 46 hg commit -d '0 0' -m commit-15 baz
47 47 touch quux
48 48 hg commit -d '0 0' -m commit-16 quux
49 49 echo >> dir/file
50 50 hg -v commit -d '0 0' -m commit-17 dir/file
51 51 # An empty date was interpreted as epoch origin
52 52 echo foo >> foo
53 53 hg commit -d '' -m commit-no-date
54 54 hg tip --template '{date|isodate}\n' | grep '1970'
55 55 cd ..
56 56
57 57 echo % partial subdir commit test
58 58 hg init test2
59 59 cd test2
60 60 mkdir foo
61 61 echo foo > foo/foo
62 62 mkdir bar
63 63 echo bar > bar/bar
64 64 hg add
65 65 hg ci -d '1000000 0' -u test -m commit-subdir-1 foo
66 66 hg ci -d '1000001 0' -u test -m commit-subdir-2 bar
67 67 echo % subdir log 1
68 68 hg log -v foo
69 69 echo % subdir log 2
70 70 hg log -v bar
71 71 echo % full log
72 72 hg log -v
73 73 cd ..
74 74
75 75 echo % dot and subdir commit test
76 76 hg init test3
77 77 cd test3
78 78 mkdir foo
79 79 echo foo content > foo/plain-file
80 80 hg add foo/plain-file
81 81 hg ci -d '1000000 0' -u test -m commit-foo-subdir foo
82 82 echo modified foo content > foo/plain-file
83 83 hg ci -d '2000000 0' -u test -m commit-foo-dot .
84 84 echo % full log
85 85 hg log -v
86 86 echo % subdir log
87 87 cd foo
88 88 hg log .
89 89 cd ..
90 90 cd ..
91 91
92 92 cd ..
93 93 hg init issue1049
94 94 cd issue1049
95 95 echo a > a
96 96 hg ci -Ama
97 97 echo a >> a
98 98 hg ci -mb
99 99 hg up 0
100 100 echo b >> a
101 101 hg ci -mc
102 102 HGMERGE=true hg merge
103 103 echo % should fail because we are specifying a file name
104 104 hg ci -mmerge a
105 105 echo % should fail because we are specifying a pattern
106 106 hg ci -mmerge -I a
107 107 echo % should succeed
108 108 hg ci -mmerge
109 cd ..
110
111
112 echo % test commit message content
113 hg init commitmsg
114 cd commitmsg
115 echo changed > changed
116 echo removed > removed
117 hg ci -qAm init
118
119 hg rm removed
120 echo changed >> changed
121 echo added > added
122 hg add added
123 HGEDITOR=cat hg ci -A
124 cd ..
109 125
110 126 exit 0
@@ -1,108 +1,121 b''
1 1 % commit date test
2 2 transaction abort!
3 3 rollback completed
4 4 abort: empty commit message
5 5 abort: impossible time zone offset: 4444444
6 6 abort: invalid date: '1\t15.1'
7 7 abort: invalid date: 'foo bar'
8 8 abort: date exceeds 32 bits: 111111111111
9 9 % commit added file that has been deleted
10 10 nothing changed
11 11 abort: file bar not found!
12 12 adding dir/file
13 13 dir/file
14 14 committed changeset 2:d2a76177cb42
15 15 adding dir.file
16 16 abort: no match under directory dir!
17 17 abort: no match under directory .!
18 18 abort: no match under directory ../dir2!
19 19 dir/file
20 20 committed changeset 3:1cd62a2d8db5
21 21 does-not-exist: No such file or directory
22 22 abort: file does-not-exist not found!
23 23 abort: file baz not tracked!
24 24 abort: file quux not tracked!
25 25 dir/file
26 26 committed changeset 4:49176991390e
27 27 % partial subdir commit test
28 28 adding bar/bar
29 29 adding foo/foo
30 30 % subdir log 1
31 31 changeset: 0:6ef3cb06bb80
32 32 user: test
33 33 date: Mon Jan 12 13:46:40 1970 +0000
34 34 files: foo/foo
35 35 description:
36 36 commit-subdir-1
37 37
38 38
39 39 % subdir log 2
40 40 changeset: 1:f2e51572cf5a
41 41 tag: tip
42 42 user: test
43 43 date: Mon Jan 12 13:46:41 1970 +0000
44 44 files: bar/bar
45 45 description:
46 46 commit-subdir-2
47 47
48 48
49 49 % full log
50 50 changeset: 1:f2e51572cf5a
51 51 tag: tip
52 52 user: test
53 53 date: Mon Jan 12 13:46:41 1970 +0000
54 54 files: bar/bar
55 55 description:
56 56 commit-subdir-2
57 57
58 58
59 59 changeset: 0:6ef3cb06bb80
60 60 user: test
61 61 date: Mon Jan 12 13:46:40 1970 +0000
62 62 files: foo/foo
63 63 description:
64 64 commit-subdir-1
65 65
66 66
67 67 % dot and subdir commit test
68 68 % full log
69 69 changeset: 1:d9180e04fa8a
70 70 tag: tip
71 71 user: test
72 72 date: Sat Jan 24 03:33:20 1970 +0000
73 73 files: foo/plain-file
74 74 description:
75 75 commit-foo-dot
76 76
77 77
78 78 changeset: 0:80b572aaf098
79 79 user: test
80 80 date: Mon Jan 12 13:46:40 1970 +0000
81 81 files: foo/plain-file
82 82 description:
83 83 commit-foo-subdir
84 84
85 85
86 86 % subdir log
87 87 changeset: 1:d9180e04fa8a
88 88 tag: tip
89 89 user: test
90 90 date: Sat Jan 24 03:33:20 1970 +0000
91 91 summary: commit-foo-dot
92 92
93 93 changeset: 0:80b572aaf098
94 94 user: test
95 95 date: Mon Jan 12 13:46:40 1970 +0000
96 96 summary: commit-foo-subdir
97 97
98 98 adding a
99 99 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 100 created new head
101 101 merging a
102 102 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
103 103 (branch merge, don't forget to commit)
104 104 % should fail because we are specifying a file name
105 105 abort: cannot partially commit a merge (do not specify files or patterns)
106 106 % should fail because we are specifying a pattern
107 107 abort: cannot partially commit a merge (do not specify files or patterns)
108 108 % should succeed
109 % test commit message content
110
111
112 HG: Enter commit message. Lines beginning with 'HG:' are removed.
113 HG: --
114 HG: user: test
115 HG: branch 'default'
116 HG: added added
117 HG: changed changed
118 HG: removed removed
119 transaction abort!
120 rollback completed
121 abort: empty commit message
General Comments 0
You need to be logged in to leave comments. Login now