##// END OF EJS Templates
localrepo.commit: normalize commit message even for rawcommit....
Alexis S. L. Carvalho -
r6254:3667b6e4 default
parent child Browse files
Show More
@@ -1,2124 +1,2123 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71
72 72 try:
73 73 # files in .hg/ will be created using this mode
74 74 mode = os.stat(self.spath).st_mode
75 75 # avoid some useless chmods
76 76 if (0777 & ~util._umask) == (0777 & mode):
77 77 mode = None
78 78 except OSError:
79 79 mode = None
80 80
81 81 self._createmode = mode
82 82 self.opener.createmode = mode
83 83 sopener = util.opener(self.spath)
84 84 sopener.createmode = mode
85 85 self.sopener = util.encodedopener(sopener, self.encodefn)
86 86
87 87 self.ui = ui.ui(parentui=parentui)
88 88 try:
89 89 self.ui.readconfig(self.join("hgrc"), self.root)
90 90 extensions.loadall(self.ui)
91 91 except IOError:
92 92 pass
93 93
94 94 self.tagscache = None
95 95 self._tagstypecache = None
96 96 self.branchcache = None
97 97 self._ubranchcache = None # UTF-8 version of branchcache
98 98 self._branchcachetip = None
99 99 self.nodetagscache = None
100 100 self.filterpats = {}
101 101 self._datafilters = {}
102 102 self._transref = self._lockref = self._wlockref = None
103 103
104 104 def __getattr__(self, name):
105 105 if name == 'changelog':
106 106 self.changelog = changelog.changelog(self.sopener)
107 107 self.sopener.defversion = self.changelog.version
108 108 return self.changelog
109 109 if name == 'manifest':
110 110 self.changelog
111 111 self.manifest = manifest.manifest(self.sopener)
112 112 return self.manifest
113 113 if name == 'dirstate':
114 114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 115 return self.dirstate
116 116 else:
117 117 raise AttributeError, name
118 118
119 119 def url(self):
120 120 return 'file:' + self.root
121 121
122 122 def hook(self, name, throw=False, **args):
123 123 return hook.hook(self.ui, self, name, throw, **args)
124 124
125 125 tag_disallowed = ':\r\n'
126 126
127 127 def _tag(self, name, node, message, local, user, date, parent=None,
128 128 extra={}):
129 129 use_dirstate = parent is None
130 130
131 131 for c in self.tag_disallowed:
132 132 if c in name:
133 133 raise util.Abort(_('%r cannot be used in a tag name') % c)
134 134
135 135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
136 136
137 137 def writetag(fp, name, munge, prevtags):
138 138 fp.seek(0, 2)
139 139 if prevtags and prevtags[-1] != '\n':
140 140 fp.write('\n')
141 141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
142 142 fp.close()
143 143
144 144 prevtags = ''
145 145 if local:
146 146 try:
147 147 fp = self.opener('localtags', 'r+')
148 148 except IOError, err:
149 149 fp = self.opener('localtags', 'a')
150 150 else:
151 151 prevtags = fp.read()
152 152
153 153 # local tags are stored in the current charset
154 154 writetag(fp, name, None, prevtags)
155 155 self.hook('tag', node=hex(node), tag=name, local=local)
156 156 return
157 157
158 158 if use_dirstate:
159 159 try:
160 160 fp = self.wfile('.hgtags', 'rb+')
161 161 except IOError, err:
162 162 fp = self.wfile('.hgtags', 'ab')
163 163 else:
164 164 prevtags = fp.read()
165 165 else:
166 166 try:
167 167 prevtags = self.filectx('.hgtags', parent).data()
168 168 except revlog.LookupError:
169 169 pass
170 170 fp = self.wfile('.hgtags', 'wb')
171 171 if prevtags:
172 172 fp.write(prevtags)
173 173
174 174 # committed tags are stored in UTF-8
175 175 writetag(fp, name, util.fromlocal, prevtags)
176 176
177 177 if use_dirstate and '.hgtags' not in self.dirstate:
178 178 self.add(['.hgtags'])
179 179
180 180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
181 181 extra=extra)
182 182
183 183 self.hook('tag', node=hex(node), tag=name, local=local)
184 184
185 185 return tagnode
186 186
187 187 def tag(self, name, node, message, local, user, date):
188 188 '''tag a revision with a symbolic name.
189 189
190 190 if local is True, the tag is stored in a per-repository file.
191 191 otherwise, it is stored in the .hgtags file, and a new
192 192 changeset is committed with the change.
193 193
194 194 keyword arguments:
195 195
196 196 local: whether to store tag in non-version-controlled file
197 197 (default False)
198 198
199 199 message: commit message to use if committing
200 200
201 201 user: name of user to use if committing
202 202
203 203 date: date tuple to use if committing'''
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self._tag(name, node, message, local, user, date)
211 211
212 212 def tags(self):
213 213 '''return a mapping of tag to node'''
214 214 if self.tagscache:
215 215 return self.tagscache
216 216
217 217 globaltags = {}
218 218 tagtypes = {}
219 219
220 220 def readtags(lines, fn, tagtype):
221 221 filetags = {}
222 222 count = 0
223 223
224 224 def warn(msg):
225 225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226 226
227 227 for l in lines:
228 228 count += 1
229 229 if not l:
230 230 continue
231 231 s = l.split(" ", 1)
232 232 if len(s) != 2:
233 233 warn(_("cannot parse entry"))
234 234 continue
235 235 node, key = s
236 236 key = util.tolocal(key.strip()) # stored in UTF-8
237 237 try:
238 238 bin_n = bin(node)
239 239 except TypeError:
240 240 warn(_("node '%s' is not well formed") % node)
241 241 continue
242 242 if bin_n not in self.changelog.nodemap:
243 243 warn(_("tag '%s' refers to unknown node") % key)
244 244 continue
245 245
246 246 h = []
247 247 if key in filetags:
248 248 n, h = filetags[key]
249 249 h.append(n)
250 250 filetags[key] = (bin_n, h)
251 251
252 252 for k, nh in filetags.items():
253 253 if k not in globaltags:
254 254 globaltags[k] = nh
255 255 tagtypes[k] = tagtype
256 256 continue
257 257
258 258 # we prefer the global tag if:
259 259 # it supercedes us OR
260 260 # mutual supercedes and it has a higher rank
261 261 # otherwise we win because we're tip-most
262 262 an, ah = nh
263 263 bn, bh = globaltags[k]
264 264 if (bn != an and an in bh and
265 265 (bn not in ah or len(bh) > len(ah))):
266 266 an = bn
267 267 ah.extend([n for n in bh if n not in ah])
268 268 globaltags[k] = an, ah
269 269 tagtypes[k] = tagtype
270 270
271 271 # read the tags file from each head, ending with the tip
272 272 f = None
273 273 for rev, node, fnode in self._hgtagsnodes():
274 274 f = (f and f.filectx(fnode) or
275 275 self.filectx('.hgtags', fileid=fnode))
276 276 readtags(f.data().splitlines(), f, "global")
277 277
278 278 try:
279 279 data = util.fromlocal(self.opener("localtags").read())
280 280 # localtags are stored in the local character set
281 281 # while the internal tag table is stored in UTF-8
282 282 readtags(data.splitlines(), "localtags", "local")
283 283 except IOError:
284 284 pass
285 285
286 286 self.tagscache = {}
287 287 self._tagstypecache = {}
288 288 for k,nh in globaltags.items():
289 289 n = nh[0]
290 290 if n != nullid:
291 291 self.tagscache[k] = n
292 292 self._tagstypecache[k] = tagtypes[k]
293 293 self.tagscache['tip'] = self.changelog.tip()
294 294
295 295 return self.tagscache
296 296
297 297 def tagtype(self, tagname):
298 298 '''
299 299 return the type of the given tag. result can be:
300 300
301 301 'local' : a local tag
302 302 'global' : a global tag
303 303 None : tag does not exist
304 304 '''
305 305
306 306 self.tags()
307 307
308 308 return self._tagstypecache.get(tagname)
309 309
310 310 def _hgtagsnodes(self):
311 311 heads = self.heads()
312 312 heads.reverse()
313 313 last = {}
314 314 ret = []
315 315 for node in heads:
316 316 c = self.changectx(node)
317 317 rev = c.rev()
318 318 try:
319 319 fnode = c.filenode('.hgtags')
320 320 except revlog.LookupError:
321 321 continue
322 322 ret.append((rev, node, fnode))
323 323 if fnode in last:
324 324 ret[last[fnode]] = None
325 325 last[fnode] = len(ret) - 1
326 326 return [item for item in ret if item]
327 327
328 328 def tagslist(self):
329 329 '''return a list of tags ordered by revision'''
330 330 l = []
331 331 for t, n in self.tags().items():
332 332 try:
333 333 r = self.changelog.rev(n)
334 334 except:
335 335 r = -2 # sort to the beginning of the list if unknown
336 336 l.append((r, t, n))
337 337 l.sort()
338 338 return [(t, n) for r, t, n in l]
339 339
340 340 def nodetags(self, node):
341 341 '''return the tags associated with a node'''
342 342 if not self.nodetagscache:
343 343 self.nodetagscache = {}
344 344 for t, n in self.tags().items():
345 345 self.nodetagscache.setdefault(n, []).append(t)
346 346 return self.nodetagscache.get(node, [])
347 347
348 348 def _branchtags(self, partial, lrev):
349 349 tiprev = self.changelog.count() - 1
350 350 if lrev != tiprev:
351 351 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353 353
354 354 return partial
355 355
356 356 def branchtags(self):
357 357 tip = self.changelog.tip()
358 358 if self.branchcache is not None and self._branchcachetip == tip:
359 359 return self.branchcache
360 360
361 361 oldtip = self._branchcachetip
362 362 self._branchcachetip = tip
363 363 if self.branchcache is None:
364 364 self.branchcache = {} # avoid recursion in changectx
365 365 else:
366 366 self.branchcache.clear() # keep using the same dict
367 367 if oldtip is None or oldtip not in self.changelog.nodemap:
368 368 partial, last, lrev = self._readbranchcache()
369 369 else:
370 370 lrev = self.changelog.rev(oldtip)
371 371 partial = self._ubranchcache
372 372
373 373 self._branchtags(partial, lrev)
374 374
375 375 # the branch cache is stored on disk as UTF-8, but in the local
376 376 # charset internally
377 377 for k, v in partial.items():
378 378 self.branchcache[util.tolocal(k)] = v
379 379 self._ubranchcache = partial
380 380 return self.branchcache
381 381
382 382 def _readbranchcache(self):
383 383 partial = {}
384 384 try:
385 385 f = self.opener("branch.cache")
386 386 lines = f.read().split('\n')
387 387 f.close()
388 388 except (IOError, OSError):
389 389 return {}, nullid, nullrev
390 390
391 391 try:
392 392 last, lrev = lines.pop(0).split(" ", 1)
393 393 last, lrev = bin(last), int(lrev)
394 394 if not (lrev < self.changelog.count() and
395 395 self.changelog.node(lrev) == last): # sanity check
396 396 # invalidate the cache
397 397 raise ValueError('invalidating branch cache (tip differs)')
398 398 for l in lines:
399 399 if not l: continue
400 400 node, label = l.split(" ", 1)
401 401 partial[label.strip()] = bin(node)
402 402 except (KeyboardInterrupt, util.SignalInterrupt):
403 403 raise
404 404 except Exception, inst:
405 405 if self.ui.debugflag:
406 406 self.ui.warn(str(inst), '\n')
407 407 partial, last, lrev = {}, nullid, nullrev
408 408 return partial, last, lrev
409 409
410 410 def _writebranchcache(self, branches, tip, tiprev):
411 411 try:
412 412 f = self.opener("branch.cache", "w", atomictemp=True)
413 413 f.write("%s %s\n" % (hex(tip), tiprev))
414 414 for label, node in branches.iteritems():
415 415 f.write("%s %s\n" % (hex(node), label))
416 416 f.rename()
417 417 except (IOError, OSError):
418 418 pass
419 419
420 420 def _updatebranchcache(self, partial, start, end):
421 421 for r in xrange(start, end):
422 422 c = self.changectx(r)
423 423 b = c.branch()
424 424 partial[b] = c.node()
425 425
426 426 def lookup(self, key):
427 427 if key == '.':
428 428 key, second = self.dirstate.parents()
429 429 if key == nullid:
430 430 raise repo.RepoError(_("no revision checked out"))
431 431 if second != nullid:
432 432 self.ui.warn(_("warning: working directory has two parents, "
433 433 "tag '.' uses the first\n"))
434 434 elif key == 'null':
435 435 return nullid
436 436 n = self.changelog._match(key)
437 437 if n:
438 438 return n
439 439 if key in self.tags():
440 440 return self.tags()[key]
441 441 if key in self.branchtags():
442 442 return self.branchtags()[key]
443 443 n = self.changelog._partialmatch(key)
444 444 if n:
445 445 return n
446 446 try:
447 447 if len(key) == 20:
448 448 key = hex(key)
449 449 except:
450 450 pass
451 451 raise repo.RepoError(_("unknown revision '%s'") % key)
452 452
453 453 def dev(self):
454 454 return os.lstat(self.path).st_dev
455 455
456 456 def local(self):
457 457 return True
458 458
459 459 def join(self, f):
460 460 return os.path.join(self.path, f)
461 461
462 462 def sjoin(self, f):
463 463 f = self.encodefn(f)
464 464 return os.path.join(self.spath, f)
465 465
466 466 def wjoin(self, f):
467 467 return os.path.join(self.root, f)
468 468
469 469 def file(self, f):
470 470 if f[0] == '/':
471 471 f = f[1:]
472 472 return filelog.filelog(self.sopener, f)
473 473
474 474 def changectx(self, changeid=None):
475 475 return context.changectx(self, changeid)
476 476
477 477 def workingctx(self):
478 478 return context.workingctx(self)
479 479
480 480 def parents(self, changeid=None):
481 481 '''
482 482 get list of changectxs for parents of changeid or working directory
483 483 '''
484 484 if changeid is None:
485 485 pl = self.dirstate.parents()
486 486 else:
487 487 n = self.changelog.lookup(changeid)
488 488 pl = self.changelog.parents(n)
489 489 if pl[1] == nullid:
490 490 return [self.changectx(pl[0])]
491 491 return [self.changectx(pl[0]), self.changectx(pl[1])]
492 492
493 493 def filectx(self, path, changeid=None, fileid=None):
494 494 """changeid can be a changeset revision, node, or tag.
495 495 fileid can be a file revision or node."""
496 496 return context.filectx(self, path, changeid, fileid)
497 497
498 498 def getcwd(self):
499 499 return self.dirstate.getcwd()
500 500
501 501 def pathto(self, f, cwd=None):
502 502 return self.dirstate.pathto(f, cwd)
503 503
504 504 def wfile(self, f, mode='r'):
505 505 return self.wopener(f, mode)
506 506
507 507 def _link(self, f):
508 508 return os.path.islink(self.wjoin(f))
509 509
510 510 def _filter(self, filter, filename, data):
511 511 if filter not in self.filterpats:
512 512 l = []
513 513 for pat, cmd in self.ui.configitems(filter):
514 514 mf = util.matcher(self.root, "", [pat], [], [])[1]
515 515 fn = None
516 516 params = cmd
517 517 for name, filterfn in self._datafilters.iteritems():
518 518 if cmd.startswith(name):
519 519 fn = filterfn
520 520 params = cmd[len(name):].lstrip()
521 521 break
522 522 if not fn:
523 523 fn = lambda s, c, **kwargs: util.filter(s, c)
524 524 # Wrap old filters not supporting keyword arguments
525 525 if not inspect.getargspec(fn)[2]:
526 526 oldfn = fn
527 527 fn = lambda s, c, **kwargs: oldfn(s, c)
528 528 l.append((mf, fn, params))
529 529 self.filterpats[filter] = l
530 530
531 531 for mf, fn, cmd in self.filterpats[filter]:
532 532 if mf(filename):
533 533 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
534 534 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
535 535 break
536 536
537 537 return data
538 538
539 539 def adddatafilter(self, name, filter):
540 540 self._datafilters[name] = filter
541 541
542 542 def wread(self, filename):
543 543 if self._link(filename):
544 544 data = os.readlink(self.wjoin(filename))
545 545 else:
546 546 data = self.wopener(filename, 'r').read()
547 547 return self._filter("encode", filename, data)
548 548
549 549 def wwrite(self, filename, data, flags):
550 550 data = self._filter("decode", filename, data)
551 551 try:
552 552 os.unlink(self.wjoin(filename))
553 553 except OSError:
554 554 pass
555 555 self.wopener(filename, 'w').write(data)
556 556 util.set_flags(self.wjoin(filename), flags)
557 557
558 558 def wwritedata(self, filename, data):
559 559 return self._filter("decode", filename, data)
560 560
561 561 def transaction(self):
562 562 if self._transref and self._transref():
563 563 return self._transref().nest()
564 564
565 565 # abort here if the journal already exists
566 566 if os.path.exists(self.sjoin("journal")):
567 567 raise repo.RepoError(_("journal already exists - run hg recover"))
568 568
569 569 # save dirstate for rollback
570 570 try:
571 571 ds = self.opener("dirstate").read()
572 572 except IOError:
573 573 ds = ""
574 574 self.opener("journal.dirstate", "w").write(ds)
575 575 self.opener("journal.branch", "w").write(self.dirstate.branch())
576 576
577 577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 579 (self.join("journal.branch"), self.join("undo.branch"))]
580 580 tr = transaction.transaction(self.ui.warn, self.sopener,
581 581 self.sjoin("journal"),
582 582 aftertrans(renames),
583 583 self._createmode)
584 584 self._transref = weakref.ref(tr)
585 585 return tr
586 586
587 587 def recover(self):
588 588 l = self.lock()
589 589 try:
590 590 if os.path.exists(self.sjoin("journal")):
591 591 self.ui.status(_("rolling back interrupted transaction\n"))
592 592 transaction.rollback(self.sopener, self.sjoin("journal"))
593 593 self.invalidate()
594 594 return True
595 595 else:
596 596 self.ui.warn(_("no interrupted transaction available\n"))
597 597 return False
598 598 finally:
599 599 del l
600 600
601 601 def rollback(self):
602 602 wlock = lock = None
603 603 try:
604 604 wlock = self.wlock()
605 605 lock = self.lock()
606 606 if os.path.exists(self.sjoin("undo")):
607 607 self.ui.status(_("rolling back last transaction\n"))
608 608 transaction.rollback(self.sopener, self.sjoin("undo"))
609 609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 610 try:
611 611 branch = self.opener("undo.branch").read()
612 612 self.dirstate.setbranch(branch)
613 613 except IOError:
614 614 self.ui.warn(_("Named branch could not be reset, "
615 615 "current branch still is: %s\n")
616 616 % util.tolocal(self.dirstate.branch()))
617 617 self.invalidate()
618 618 self.dirstate.invalidate()
619 619 else:
620 620 self.ui.warn(_("no rollback information available\n"))
621 621 finally:
622 622 del lock, wlock
623 623
624 624 def invalidate(self):
625 625 for a in "changelog manifest".split():
626 626 if hasattr(self, a):
627 627 self.__delattr__(a)
628 628 self.tagscache = None
629 629 self._tagstypecache = None
630 630 self.nodetagscache = None
631 631 self.branchcache = None
632 632 self._ubranchcache = None
633 633 self._branchcachetip = None
634 634
635 635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 636 try:
637 637 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 638 except lock.LockHeld, inst:
639 639 if not wait:
640 640 raise
641 641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 642 (desc, inst.locker))
643 643 # default to 600 seconds timeout
644 644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 645 releasefn, desc=desc)
646 646 if acquirefn:
647 647 acquirefn()
648 648 return l
649 649
650 650 def lock(self, wait=True):
651 651 if self._lockref and self._lockref():
652 652 return self._lockref()
653 653
654 654 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
655 655 _('repository %s') % self.origroot)
656 656 self._lockref = weakref.ref(l)
657 657 return l
658 658
659 659 def wlock(self, wait=True):
660 660 if self._wlockref and self._wlockref():
661 661 return self._wlockref()
662 662
663 663 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 664 self.dirstate.invalidate, _('working directory of %s') %
665 665 self.origroot)
666 666 self._wlockref = weakref.ref(l)
667 667 return l
668 668
669 669 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
670 670 """
671 671 commit an individual file as part of a larger transaction
672 672 """
673 673
674 674 t = self.wread(fn)
675 675 fl = self.file(fn)
676 676 fp1 = manifest1.get(fn, nullid)
677 677 fp2 = manifest2.get(fn, nullid)
678 678
679 679 meta = {}
680 680 cp = self.dirstate.copied(fn)
681 681 if cp:
682 682 # Mark the new revision of this file as a copy of another
683 683 # file. This copy data will effectively act as a parent
684 684 # of this new revision. If this is a merge, the first
685 685 # parent will be the nullid (meaning "look up the copy data")
686 686 # and the second one will be the other parent. For example:
687 687 #
688 688 # 0 --- 1 --- 3 rev1 changes file foo
689 689 # \ / rev2 renames foo to bar and changes it
690 690 # \- 2 -/ rev3 should have bar with all changes and
691 691 # should record that bar descends from
692 692 # bar in rev2 and foo in rev1
693 693 #
694 694 # this allows this merge to succeed:
695 695 #
696 696 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
697 697 # \ / merging rev3 and rev4 should use bar@rev2
698 698 # \- 2 --- 4 as the merge base
699 699 #
700 700 meta["copy"] = cp
701 701 if not manifest2: # not a branch merge
702 702 meta["copyrev"] = hex(manifest1.get(cp, nullid))
703 703 fp2 = nullid
704 704 elif fp2 != nullid: # copied on remote side
705 705 meta["copyrev"] = hex(manifest1.get(cp, nullid))
706 706 elif fp1 != nullid: # copied on local side, reversed
707 707 meta["copyrev"] = hex(manifest2.get(cp))
708 708 fp2 = fp1
709 709 elif cp in manifest2: # directory rename on local side
710 710 meta["copyrev"] = hex(manifest2[cp])
711 711 else: # directory rename on remote side
712 712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 713 self.ui.debug(_(" %s: copy %s:%s\n") %
714 714 (fn, cp, meta["copyrev"]))
715 715 fp1 = nullid
716 716 elif fp2 != nullid:
717 717 # is one parent an ancestor of the other?
718 718 fpa = fl.ancestor(fp1, fp2)
719 719 if fpa == fp1:
720 720 fp1, fp2 = fp2, nullid
721 721 elif fpa == fp2:
722 722 fp2 = nullid
723 723
724 724 # is the file unmodified from the parent? report existing entry
725 725 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
726 726 return fp1
727 727
728 728 changelist.append(fn)
729 729 return fl.add(t, meta, tr, linkrev, fp1, fp2)
730 730
731 731 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
732 732 if p1 is None:
733 733 p1, p2 = self.dirstate.parents()
734 734 return self.commit(files=files, text=text, user=user, date=date,
735 735 p1=p1, p2=p2, extra=extra, empty_ok=True)
736 736
737 737 def commit(self, files=None, text="", user=None, date=None,
738 738 match=util.always, force=False, force_editor=False,
739 739 p1=None, p2=None, extra={}, empty_ok=False):
740 740 wlock = lock = tr = None
741 741 valid = 0 # don't save the dirstate if this isn't set
742 742 if files:
743 743 files = util.unique(files)
744 744 try:
745 745 commit = []
746 746 remove = []
747 747 changed = []
748 748 use_dirstate = (p1 is None) # not rawcommit
749 749 extra = extra.copy()
750 750
751 751 if use_dirstate:
752 752 if files:
753 753 for f in files:
754 754 s = self.dirstate[f]
755 755 if s in 'nma':
756 756 commit.append(f)
757 757 elif s == 'r':
758 758 remove.append(f)
759 759 else:
760 760 self.ui.warn(_("%s not tracked!\n") % f)
761 761 else:
762 762 changes = self.status(match=match)[:5]
763 763 modified, added, removed, deleted, unknown = changes
764 764 commit = modified + added
765 765 remove = removed
766 766 else:
767 767 commit = files
768 768
769 769 if use_dirstate:
770 770 p1, p2 = self.dirstate.parents()
771 771 update_dirstate = True
772 772 else:
773 773 p1, p2 = p1, p2 or nullid
774 774 update_dirstate = (self.dirstate.parents()[0] == p1)
775 775
776 776 c1 = self.changelog.read(p1)
777 777 c2 = self.changelog.read(p2)
778 778 m1 = self.manifest.read(c1[0]).copy()
779 779 m2 = self.manifest.read(c2[0])
780 780
781 781 if use_dirstate:
782 782 branchname = self.workingctx().branch()
783 783 try:
784 784 branchname = branchname.decode('UTF-8').encode('UTF-8')
785 785 except UnicodeDecodeError:
786 786 raise util.Abort(_('branch name not in UTF-8!'))
787 787 else:
788 788 branchname = ""
789 789
790 790 if use_dirstate:
791 791 oldname = c1[5].get("branch") # stored in UTF-8
792 792 if (not commit and not remove and not force and p2 == nullid
793 793 and branchname == oldname):
794 794 self.ui.status(_("nothing changed\n"))
795 795 return None
796 796
797 797 xp1 = hex(p1)
798 798 if p2 == nullid: xp2 = ''
799 799 else: xp2 = hex(p2)
800 800
801 801 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
802 802
803 803 wlock = self.wlock()
804 804 lock = self.lock()
805 805 tr = self.transaction()
806 806 trp = weakref.proxy(tr)
807 807
808 808 # check in files
809 809 new = {}
810 810 linkrev = self.changelog.count()
811 811 commit.sort()
812 812 is_exec = util.execfunc(self.root, m1.execf)
813 813 is_link = util.linkfunc(self.root, m1.linkf)
814 814 for f in commit:
815 815 self.ui.note(f + "\n")
816 816 try:
817 817 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
818 818 new_exec = is_exec(f)
819 819 new_link = is_link(f)
820 820 if ((not changed or changed[-1] != f) and
821 821 m2.get(f) != new[f]):
822 822 # mention the file in the changelog if some
823 823 # flag changed, even if there was no content
824 824 # change.
825 825 old_exec = m1.execf(f)
826 826 old_link = m1.linkf(f)
827 827 if old_exec != new_exec or old_link != new_link:
828 828 changed.append(f)
829 829 m1.set(f, new_exec, new_link)
830 830 if use_dirstate:
831 831 self.dirstate.normal(f)
832 832
833 833 except (OSError, IOError):
834 834 if use_dirstate:
835 835 self.ui.warn(_("trouble committing %s!\n") % f)
836 836 raise
837 837 else:
838 838 remove.append(f)
839 839
840 840 # update manifest
841 841 m1.update(new)
842 842 remove.sort()
843 843 removed = []
844 844
845 845 for f in remove:
846 846 if f in m1:
847 847 del m1[f]
848 848 removed.append(f)
849 849 elif f in m2:
850 850 removed.append(f)
851 851 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
852 852 (new, removed))
853 853
854 854 # add changeset
855 855 new = new.keys()
856 856 new.sort()
857 857
858 858 user = user or self.ui.username()
859 859 if (not empty_ok and not text) or force_editor:
860 860 edittext = []
861 861 if text:
862 862 edittext.append(text)
863 863 edittext.append("")
864 864 edittext.append(_("HG: Enter commit message."
865 865 " Lines beginning with 'HG:' are removed."))
866 866 edittext.append("HG: --")
867 867 edittext.append("HG: user: %s" % user)
868 868 if p2 != nullid:
869 869 edittext.append("HG: branch merge")
870 870 if branchname:
871 871 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
872 872 edittext.extend(["HG: changed %s" % f for f in changed])
873 873 edittext.extend(["HG: removed %s" % f for f in removed])
874 874 if not changed and not remove:
875 875 edittext.append("HG: no files changed")
876 876 edittext.append("")
877 877 # run editor in the repository root
878 878 olddir = os.getcwd()
879 879 os.chdir(self.root)
880 880 text = self.ui.edit("\n".join(edittext), user)
881 881 os.chdir(olddir)
882 882
883 883 if branchname:
884 884 extra["branch"] = branchname
885 885
886 if use_dirstate:
887 lines = [line.rstrip() for line in text.rstrip().splitlines()]
888 while lines and not lines[0]:
889 del lines[0]
890 if not lines:
891 raise util.Abort(_("empty commit message"))
892 text = '\n'.join(lines)
886 lines = [line.rstrip() for line in text.rstrip().splitlines()]
887 while lines and not lines[0]:
888 del lines[0]
889 if not lines and use_dirstate:
890 raise util.Abort(_("empty commit message"))
891 text = '\n'.join(lines)
893 892
894 893 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
895 894 user, date, extra)
896 895 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
897 896 parent2=xp2)
898 897 tr.close()
899 898
900 899 if self.branchcache:
901 900 self.branchtags()
902 901
903 902 if use_dirstate or update_dirstate:
904 903 self.dirstate.setparents(n)
905 904 if use_dirstate:
906 905 for f in removed:
907 906 self.dirstate.forget(f)
908 907 valid = 1 # our dirstate updates are complete
909 908
910 909 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
911 910 return n
912 911 finally:
913 912 if not valid: # don't save our updated dirstate
914 913 self.dirstate.invalidate()
915 914 del tr, lock, wlock
916 915
917 916 def walk(self, node=None, files=[], match=util.always, badmatch=None):
918 917 '''
919 918 walk recursively through the directory tree or a given
920 919 changeset, finding all files matched by the match
921 920 function
922 921
923 922 results are yielded in a tuple (src, filename), where src
924 923 is one of:
925 924 'f' the file was found in the directory tree
926 925 'm' the file was only in the dirstate and not in the tree
927 926 'b' file was not found and matched badmatch
928 927 '''
929 928
930 929 if node:
931 930 fdict = dict.fromkeys(files)
932 931 # for dirstate.walk, files=['.'] means "walk the whole tree".
933 932 # follow that here, too
934 933 fdict.pop('.', None)
935 934 mdict = self.manifest.read(self.changelog.read(node)[0])
936 935 mfiles = mdict.keys()
937 936 mfiles.sort()
938 937 for fn in mfiles:
939 938 for ffn in fdict:
940 939 # match if the file is the exact name or a directory
941 940 if ffn == fn or fn.startswith("%s/" % ffn):
942 941 del fdict[ffn]
943 942 break
944 943 if match(fn):
945 944 yield 'm', fn
946 945 ffiles = fdict.keys()
947 946 ffiles.sort()
948 947 for fn in ffiles:
949 948 if badmatch and badmatch(fn):
950 949 if match(fn):
951 950 yield 'b', fn
952 951 else:
953 952 self.ui.warn(_('%s: No such file in rev %s\n')
954 953 % (self.pathto(fn), short(node)))
955 954 else:
956 955 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
957 956 yield src, fn
958 957
959 958 def status(self, node1=None, node2=None, files=[], match=util.always,
960 959 list_ignored=False, list_clean=False, list_unknown=True):
961 960 """return status of files between two nodes or node and working directory
962 961
963 962 If node1 is None, use the first dirstate parent instead.
964 963 If node2 is None, compare node1 with working directory.
965 964 """
966 965
967 966 def fcmp(fn, getnode):
968 967 t1 = self.wread(fn)
969 968 return self.file(fn).cmp(getnode(fn), t1)
970 969
971 970 def mfmatches(node):
972 971 change = self.changelog.read(node)
973 972 mf = self.manifest.read(change[0]).copy()
974 973 for fn in mf.keys():
975 974 if not match(fn):
976 975 del mf[fn]
977 976 return mf
978 977
979 978 modified, added, removed, deleted, unknown = [], [], [], [], []
980 979 ignored, clean = [], []
981 980
982 981 compareworking = False
983 982 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
984 983 compareworking = True
985 984
986 985 if not compareworking:
987 986 # read the manifest from node1 before the manifest from node2,
988 987 # so that we'll hit the manifest cache if we're going through
989 988 # all the revisions in parent->child order.
990 989 mf1 = mfmatches(node1)
991 990
992 991 # are we comparing the working directory?
993 992 if not node2:
994 993 (lookup, modified, added, removed, deleted, unknown,
995 994 ignored, clean) = self.dirstate.status(files, match,
996 995 list_ignored, list_clean,
997 996 list_unknown)
998 997
999 998 # are we comparing working dir against its parent?
1000 999 if compareworking:
1001 1000 if lookup:
1002 1001 fixup = []
1003 1002 # do a full compare of any files that might have changed
1004 1003 ctx = self.changectx()
1005 1004 mexec = lambda f: 'x' in ctx.fileflags(f)
1006 1005 mlink = lambda f: 'l' in ctx.fileflags(f)
1007 1006 is_exec = util.execfunc(self.root, mexec)
1008 1007 is_link = util.linkfunc(self.root, mlink)
1009 1008 def flags(f):
1010 1009 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1011 1010 for f in lookup:
1012 1011 if (f not in ctx or flags(f) != ctx.fileflags(f)
1013 1012 or ctx[f].cmp(self.wread(f))):
1014 1013 modified.append(f)
1015 1014 else:
1016 1015 fixup.append(f)
1017 1016 if list_clean:
1018 1017 clean.append(f)
1019 1018
1020 1019 # update dirstate for files that are actually clean
1021 1020 if fixup:
1022 1021 wlock = None
1023 1022 try:
1024 1023 try:
1025 1024 wlock = self.wlock(False)
1026 1025 except lock.LockException:
1027 1026 pass
1028 1027 if wlock:
1029 1028 for f in fixup:
1030 1029 self.dirstate.normal(f)
1031 1030 finally:
1032 1031 del wlock
1033 1032 else:
1034 1033 # we are comparing working dir against non-parent
1035 1034 # generate a pseudo-manifest for the working dir
1036 1035 # XXX: create it in dirstate.py ?
1037 1036 mf2 = mfmatches(self.dirstate.parents()[0])
1038 1037 is_exec = util.execfunc(self.root, mf2.execf)
1039 1038 is_link = util.linkfunc(self.root, mf2.linkf)
1040 1039 for f in lookup + modified + added:
1041 1040 mf2[f] = ""
1042 1041 mf2.set(f, is_exec(f), is_link(f))
1043 1042 for f in removed:
1044 1043 if f in mf2:
1045 1044 del mf2[f]
1046 1045
1047 1046 else:
1048 1047 # we are comparing two revisions
1049 1048 mf2 = mfmatches(node2)
1050 1049
1051 1050 if not compareworking:
1052 1051 # flush lists from dirstate before comparing manifests
1053 1052 modified, added, clean = [], [], []
1054 1053
1055 1054 # make sure to sort the files so we talk to the disk in a
1056 1055 # reasonable order
1057 1056 mf2keys = mf2.keys()
1058 1057 mf2keys.sort()
1059 1058 getnode = lambda fn: mf1.get(fn, nullid)
1060 1059 for fn in mf2keys:
1061 1060 if fn in mf1:
1062 1061 if (mf1.flags(fn) != mf2.flags(fn) or
1063 1062 (mf1[fn] != mf2[fn] and
1064 1063 (mf2[fn] != "" or fcmp(fn, getnode)))):
1065 1064 modified.append(fn)
1066 1065 elif list_clean:
1067 1066 clean.append(fn)
1068 1067 del mf1[fn]
1069 1068 else:
1070 1069 added.append(fn)
1071 1070
1072 1071 removed = mf1.keys()
1073 1072
1074 1073 # sort and return results:
1075 1074 for l in modified, added, removed, deleted, unknown, ignored, clean:
1076 1075 l.sort()
1077 1076 return (modified, added, removed, deleted, unknown, ignored, clean)
1078 1077
1079 1078 def add(self, list):
1080 1079 wlock = self.wlock()
1081 1080 try:
1082 1081 rejected = []
1083 1082 for f in list:
1084 1083 p = self.wjoin(f)
1085 1084 try:
1086 1085 st = os.lstat(p)
1087 1086 except:
1088 1087 self.ui.warn(_("%s does not exist!\n") % f)
1089 1088 rejected.append(f)
1090 1089 continue
1091 1090 if st.st_size > 10000000:
1092 1091 self.ui.warn(_("%s: files over 10MB may cause memory and"
1093 1092 " performance problems\n"
1094 1093 "(use 'hg revert %s' to unadd the file)\n")
1095 1094 % (f, f))
1096 1095 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1097 1096 self.ui.warn(_("%s not added: only files and symlinks "
1098 1097 "supported currently\n") % f)
1099 1098 rejected.append(p)
1100 1099 elif self.dirstate[f] in 'amn':
1101 1100 self.ui.warn(_("%s already tracked!\n") % f)
1102 1101 elif self.dirstate[f] == 'r':
1103 1102 self.dirstate.normallookup(f)
1104 1103 else:
1105 1104 self.dirstate.add(f)
1106 1105 return rejected
1107 1106 finally:
1108 1107 del wlock
1109 1108
1110 1109 def forget(self, list):
1111 1110 wlock = self.wlock()
1112 1111 try:
1113 1112 for f in list:
1114 1113 if self.dirstate[f] != 'a':
1115 1114 self.ui.warn(_("%s not added!\n") % f)
1116 1115 else:
1117 1116 self.dirstate.forget(f)
1118 1117 finally:
1119 1118 del wlock
1120 1119
1121 1120 def remove(self, list, unlink=False):
1122 1121 wlock = None
1123 1122 try:
1124 1123 if unlink:
1125 1124 for f in list:
1126 1125 try:
1127 1126 util.unlink(self.wjoin(f))
1128 1127 except OSError, inst:
1129 1128 if inst.errno != errno.ENOENT:
1130 1129 raise
1131 1130 wlock = self.wlock()
1132 1131 for f in list:
1133 1132 if unlink and os.path.exists(self.wjoin(f)):
1134 1133 self.ui.warn(_("%s still exists!\n") % f)
1135 1134 elif self.dirstate[f] == 'a':
1136 1135 self.dirstate.forget(f)
1137 1136 elif f not in self.dirstate:
1138 1137 self.ui.warn(_("%s not tracked!\n") % f)
1139 1138 else:
1140 1139 self.dirstate.remove(f)
1141 1140 finally:
1142 1141 del wlock
1143 1142
1144 1143 def undelete(self, list):
1145 1144 wlock = None
1146 1145 try:
1147 1146 manifests = [self.manifest.read(self.changelog.read(p)[0])
1148 1147 for p in self.dirstate.parents() if p != nullid]
1149 1148 wlock = self.wlock()
1150 1149 for f in list:
1151 1150 if self.dirstate[f] != 'r':
1152 1151 self.ui.warn("%s not removed!\n" % f)
1153 1152 else:
1154 1153 m = f in manifests[0] and manifests[0] or manifests[1]
1155 1154 t = self.file(f).read(m[f])
1156 1155 self.wwrite(f, t, m.flags(f))
1157 1156 self.dirstate.normal(f)
1158 1157 finally:
1159 1158 del wlock
1160 1159
1161 1160 def copy(self, source, dest):
1162 1161 wlock = None
1163 1162 try:
1164 1163 p = self.wjoin(dest)
1165 1164 if not (os.path.exists(p) or os.path.islink(p)):
1166 1165 self.ui.warn(_("%s does not exist!\n") % dest)
1167 1166 elif not (os.path.isfile(p) or os.path.islink(p)):
1168 1167 self.ui.warn(_("copy failed: %s is not a file or a "
1169 1168 "symbolic link\n") % dest)
1170 1169 else:
1171 1170 wlock = self.wlock()
1172 1171 if dest not in self.dirstate:
1173 1172 self.dirstate.add(dest)
1174 1173 self.dirstate.copy(source, dest)
1175 1174 finally:
1176 1175 del wlock
1177 1176
1178 1177 def heads(self, start=None):
1179 1178 heads = self.changelog.heads(start)
1180 1179 # sort the output in rev descending order
1181 1180 heads = [(-self.changelog.rev(h), h) for h in heads]
1182 1181 heads.sort()
1183 1182 return [n for (r, n) in heads]
1184 1183
1185 1184 def branchheads(self, branch, start=None):
1186 1185 branches = self.branchtags()
1187 1186 if branch not in branches:
1188 1187 return []
1189 1188 # The basic algorithm is this:
1190 1189 #
1191 1190 # Start from the branch tip since there are no later revisions that can
1192 1191 # possibly be in this branch, and the tip is a guaranteed head.
1193 1192 #
1194 1193 # Remember the tip's parents as the first ancestors, since these by
1195 1194 # definition are not heads.
1196 1195 #
1197 1196 # Step backwards from the brach tip through all the revisions. We are
1198 1197 # guaranteed by the rules of Mercurial that we will now be visiting the
1199 1198 # nodes in reverse topological order (children before parents).
1200 1199 #
1201 1200 # If a revision is one of the ancestors of a head then we can toss it
1202 1201 # out of the ancestors set (we've already found it and won't be
1203 1202 # visiting it again) and put its parents in the ancestors set.
1204 1203 #
1205 1204 # Otherwise, if a revision is in the branch it's another head, since it
1206 1205 # wasn't in the ancestor list of an existing head. So add it to the
1207 1206 # head list, and add its parents to the ancestor list.
1208 1207 #
1209 1208 # If it is not in the branch ignore it.
1210 1209 #
1211 1210 # Once we have a list of heads, use nodesbetween to filter out all the
1212 1211 # heads that cannot be reached from startrev. There may be a more
1213 1212 # efficient way to do this as part of the previous algorithm.
1214 1213
1215 1214 set = util.set
1216 1215 heads = [self.changelog.rev(branches[branch])]
1217 1216 # Don't care if ancestors contains nullrev or not.
1218 1217 ancestors = set(self.changelog.parentrevs(heads[0]))
1219 1218 for rev in xrange(heads[0] - 1, nullrev, -1):
1220 1219 if rev in ancestors:
1221 1220 ancestors.update(self.changelog.parentrevs(rev))
1222 1221 ancestors.remove(rev)
1223 1222 elif self.changectx(rev).branch() == branch:
1224 1223 heads.append(rev)
1225 1224 ancestors.update(self.changelog.parentrevs(rev))
1226 1225 heads = [self.changelog.node(rev) for rev in heads]
1227 1226 if start is not None:
1228 1227 heads = self.changelog.nodesbetween([start], heads)[2]
1229 1228 return heads
1230 1229
1231 1230 def branches(self, nodes):
1232 1231 if not nodes:
1233 1232 nodes = [self.changelog.tip()]
1234 1233 b = []
1235 1234 for n in nodes:
1236 1235 t = n
1237 1236 while 1:
1238 1237 p = self.changelog.parents(n)
1239 1238 if p[1] != nullid or p[0] == nullid:
1240 1239 b.append((t, n, p[0], p[1]))
1241 1240 break
1242 1241 n = p[0]
1243 1242 return b
1244 1243
1245 1244 def between(self, pairs):
1246 1245 r = []
1247 1246
1248 1247 for top, bottom in pairs:
1249 1248 n, l, i = top, [], 0
1250 1249 f = 1
1251 1250
1252 1251 while n != bottom:
1253 1252 p = self.changelog.parents(n)[0]
1254 1253 if i == f:
1255 1254 l.append(n)
1256 1255 f = f * 2
1257 1256 n = p
1258 1257 i += 1
1259 1258
1260 1259 r.append(l)
1261 1260
1262 1261 return r
1263 1262
1264 1263 def findincoming(self, remote, base=None, heads=None, force=False):
1265 1264 """Return list of roots of the subsets of missing nodes from remote
1266 1265
1267 1266 If base dict is specified, assume that these nodes and their parents
1268 1267 exist on the remote side and that no child of a node of base exists
1269 1268 in both remote and self.
1270 1269 Furthermore base will be updated to include the nodes that exists
1271 1270 in self and remote but no children exists in self and remote.
1272 1271 If a list of heads is specified, return only nodes which are heads
1273 1272 or ancestors of these heads.
1274 1273
1275 1274 All the ancestors of base are in self and in remote.
1276 1275 All the descendants of the list returned are missing in self.
1277 1276 (and so we know that the rest of the nodes are missing in remote, see
1278 1277 outgoing)
1279 1278 """
1280 1279 m = self.changelog.nodemap
1281 1280 search = []
1282 1281 fetch = {}
1283 1282 seen = {}
1284 1283 seenbranch = {}
1285 1284 if base == None:
1286 1285 base = {}
1287 1286
1288 1287 if not heads:
1289 1288 heads = remote.heads()
1290 1289
1291 1290 if self.changelog.tip() == nullid:
1292 1291 base[nullid] = 1
1293 1292 if heads != [nullid]:
1294 1293 return [nullid]
1295 1294 return []
1296 1295
1297 1296 # assume we're closer to the tip than the root
1298 1297 # and start by examining the heads
1299 1298 self.ui.status(_("searching for changes\n"))
1300 1299
1301 1300 unknown = []
1302 1301 for h in heads:
1303 1302 if h not in m:
1304 1303 unknown.append(h)
1305 1304 else:
1306 1305 base[h] = 1
1307 1306
1308 1307 if not unknown:
1309 1308 return []
1310 1309
1311 1310 req = dict.fromkeys(unknown)
1312 1311 reqcnt = 0
1313 1312
1314 1313 # search through remote branches
1315 1314 # a 'branch' here is a linear segment of history, with four parts:
1316 1315 # head, root, first parent, second parent
1317 1316 # (a branch always has two parents (or none) by definition)
1318 1317 unknown = remote.branches(unknown)
1319 1318 while unknown:
1320 1319 r = []
1321 1320 while unknown:
1322 1321 n = unknown.pop(0)
1323 1322 if n[0] in seen:
1324 1323 continue
1325 1324
1326 1325 self.ui.debug(_("examining %s:%s\n")
1327 1326 % (short(n[0]), short(n[1])))
1328 1327 if n[0] == nullid: # found the end of the branch
1329 1328 pass
1330 1329 elif n in seenbranch:
1331 1330 self.ui.debug(_("branch already found\n"))
1332 1331 continue
1333 1332 elif n[1] and n[1] in m: # do we know the base?
1334 1333 self.ui.debug(_("found incomplete branch %s:%s\n")
1335 1334 % (short(n[0]), short(n[1])))
1336 1335 search.append(n) # schedule branch range for scanning
1337 1336 seenbranch[n] = 1
1338 1337 else:
1339 1338 if n[1] not in seen and n[1] not in fetch:
1340 1339 if n[2] in m and n[3] in m:
1341 1340 self.ui.debug(_("found new changeset %s\n") %
1342 1341 short(n[1]))
1343 1342 fetch[n[1]] = 1 # earliest unknown
1344 1343 for p in n[2:4]:
1345 1344 if p in m:
1346 1345 base[p] = 1 # latest known
1347 1346
1348 1347 for p in n[2:4]:
1349 1348 if p not in req and p not in m:
1350 1349 r.append(p)
1351 1350 req[p] = 1
1352 1351 seen[n[0]] = 1
1353 1352
1354 1353 if r:
1355 1354 reqcnt += 1
1356 1355 self.ui.debug(_("request %d: %s\n") %
1357 1356 (reqcnt, " ".join(map(short, r))))
1358 1357 for p in xrange(0, len(r), 10):
1359 1358 for b in remote.branches(r[p:p+10]):
1360 1359 self.ui.debug(_("received %s:%s\n") %
1361 1360 (short(b[0]), short(b[1])))
1362 1361 unknown.append(b)
1363 1362
1364 1363 # do binary search on the branches we found
1365 1364 while search:
1366 1365 n = search.pop(0)
1367 1366 reqcnt += 1
1368 1367 l = remote.between([(n[0], n[1])])[0]
1369 1368 l.append(n[1])
1370 1369 p = n[0]
1371 1370 f = 1
1372 1371 for i in l:
1373 1372 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1374 1373 if i in m:
1375 1374 if f <= 2:
1376 1375 self.ui.debug(_("found new branch changeset %s\n") %
1377 1376 short(p))
1378 1377 fetch[p] = 1
1379 1378 base[i] = 1
1380 1379 else:
1381 1380 self.ui.debug(_("narrowed branch search to %s:%s\n")
1382 1381 % (short(p), short(i)))
1383 1382 search.append((p, i))
1384 1383 break
1385 1384 p, f = i, f * 2
1386 1385
1387 1386 # sanity check our fetch list
1388 1387 for f in fetch.keys():
1389 1388 if f in m:
1390 1389 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1391 1390
1392 1391 if base.keys() == [nullid]:
1393 1392 if force:
1394 1393 self.ui.warn(_("warning: repository is unrelated\n"))
1395 1394 else:
1396 1395 raise util.Abort(_("repository is unrelated"))
1397 1396
1398 1397 self.ui.debug(_("found new changesets starting at ") +
1399 1398 " ".join([short(f) for f in fetch]) + "\n")
1400 1399
1401 1400 self.ui.debug(_("%d total queries\n") % reqcnt)
1402 1401
1403 1402 return fetch.keys()
1404 1403
1405 1404 def findoutgoing(self, remote, base=None, heads=None, force=False):
1406 1405 """Return list of nodes that are roots of subsets not in remote
1407 1406
1408 1407 If base dict is specified, assume that these nodes and their parents
1409 1408 exist on the remote side.
1410 1409 If a list of heads is specified, return only nodes which are heads
1411 1410 or ancestors of these heads, and return a second element which
1412 1411 contains all remote heads which get new children.
1413 1412 """
1414 1413 if base == None:
1415 1414 base = {}
1416 1415 self.findincoming(remote, base, heads, force=force)
1417 1416
1418 1417 self.ui.debug(_("common changesets up to ")
1419 1418 + " ".join(map(short, base.keys())) + "\n")
1420 1419
1421 1420 remain = dict.fromkeys(self.changelog.nodemap)
1422 1421
1423 1422 # prune everything remote has from the tree
1424 1423 del remain[nullid]
1425 1424 remove = base.keys()
1426 1425 while remove:
1427 1426 n = remove.pop(0)
1428 1427 if n in remain:
1429 1428 del remain[n]
1430 1429 for p in self.changelog.parents(n):
1431 1430 remove.append(p)
1432 1431
1433 1432 # find every node whose parents have been pruned
1434 1433 subset = []
1435 1434 # find every remote head that will get new children
1436 1435 updated_heads = {}
1437 1436 for n in remain:
1438 1437 p1, p2 = self.changelog.parents(n)
1439 1438 if p1 not in remain and p2 not in remain:
1440 1439 subset.append(n)
1441 1440 if heads:
1442 1441 if p1 in heads:
1443 1442 updated_heads[p1] = True
1444 1443 if p2 in heads:
1445 1444 updated_heads[p2] = True
1446 1445
1447 1446 # this is the set of all roots we have to push
1448 1447 if heads:
1449 1448 return subset, updated_heads.keys()
1450 1449 else:
1451 1450 return subset
1452 1451
1453 1452 def pull(self, remote, heads=None, force=False):
1454 1453 lock = self.lock()
1455 1454 try:
1456 1455 fetch = self.findincoming(remote, heads=heads, force=force)
1457 1456 if fetch == [nullid]:
1458 1457 self.ui.status(_("requesting all changes\n"))
1459 1458
1460 1459 if not fetch:
1461 1460 self.ui.status(_("no changes found\n"))
1462 1461 return 0
1463 1462
1464 1463 if heads is None:
1465 1464 cg = remote.changegroup(fetch, 'pull')
1466 1465 else:
1467 1466 if 'changegroupsubset' not in remote.capabilities:
1468 1467 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1469 1468 cg = remote.changegroupsubset(fetch, heads, 'pull')
1470 1469 return self.addchangegroup(cg, 'pull', remote.url())
1471 1470 finally:
1472 1471 del lock
1473 1472
1474 1473 def push(self, remote, force=False, revs=None):
1475 1474 # there are two ways to push to remote repo:
1476 1475 #
1477 1476 # addchangegroup assumes local user can lock remote
1478 1477 # repo (local filesystem, old ssh servers).
1479 1478 #
1480 1479 # unbundle assumes local user cannot lock remote repo (new ssh
1481 1480 # servers, http servers).
1482 1481
1483 1482 if remote.capable('unbundle'):
1484 1483 return self.push_unbundle(remote, force, revs)
1485 1484 return self.push_addchangegroup(remote, force, revs)
1486 1485
1487 1486 def prepush(self, remote, force, revs):
1488 1487 base = {}
1489 1488 remote_heads = remote.heads()
1490 1489 inc = self.findincoming(remote, base, remote_heads, force=force)
1491 1490
1492 1491 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1493 1492 if revs is not None:
1494 1493 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1495 1494 else:
1496 1495 bases, heads = update, self.changelog.heads()
1497 1496
1498 1497 if not bases:
1499 1498 self.ui.status(_("no changes found\n"))
1500 1499 return None, 1
1501 1500 elif not force:
1502 1501 # check if we're creating new remote heads
1503 1502 # to be a remote head after push, node must be either
1504 1503 # - unknown locally
1505 1504 # - a local outgoing head descended from update
1506 1505 # - a remote head that's known locally and not
1507 1506 # ancestral to an outgoing head
1508 1507
1509 1508 warn = 0
1510 1509
1511 1510 if remote_heads == [nullid]:
1512 1511 warn = 0
1513 1512 elif not revs and len(heads) > len(remote_heads):
1514 1513 warn = 1
1515 1514 else:
1516 1515 newheads = list(heads)
1517 1516 for r in remote_heads:
1518 1517 if r in self.changelog.nodemap:
1519 1518 desc = self.changelog.heads(r, heads)
1520 1519 l = [h for h in heads if h in desc]
1521 1520 if not l:
1522 1521 newheads.append(r)
1523 1522 else:
1524 1523 newheads.append(r)
1525 1524 if len(newheads) > len(remote_heads):
1526 1525 warn = 1
1527 1526
1528 1527 if warn:
1529 1528 self.ui.warn(_("abort: push creates new remote heads!\n"))
1530 1529 self.ui.status(_("(did you forget to merge?"
1531 1530 " use push -f to force)\n"))
1532 1531 return None, 0
1533 1532 elif inc:
1534 1533 self.ui.warn(_("note: unsynced remote changes!\n"))
1535 1534
1536 1535
1537 1536 if revs is None:
1538 1537 cg = self.changegroup(update, 'push')
1539 1538 else:
1540 1539 cg = self.changegroupsubset(update, revs, 'push')
1541 1540 return cg, remote_heads
1542 1541
1543 1542 def push_addchangegroup(self, remote, force, revs):
1544 1543 lock = remote.lock()
1545 1544 try:
1546 1545 ret = self.prepush(remote, force, revs)
1547 1546 if ret[0] is not None:
1548 1547 cg, remote_heads = ret
1549 1548 return remote.addchangegroup(cg, 'push', self.url())
1550 1549 return ret[1]
1551 1550 finally:
1552 1551 del lock
1553 1552
1554 1553 def push_unbundle(self, remote, force, revs):
1555 1554 # local repo finds heads on server, finds out what revs it
1556 1555 # must push. once revs transferred, if server finds it has
1557 1556 # different heads (someone else won commit/push race), server
1558 1557 # aborts.
1559 1558
1560 1559 ret = self.prepush(remote, force, revs)
1561 1560 if ret[0] is not None:
1562 1561 cg, remote_heads = ret
1563 1562 if force: remote_heads = ['force']
1564 1563 return remote.unbundle(cg, remote_heads, 'push')
1565 1564 return ret[1]
1566 1565
1567 1566 def changegroupinfo(self, nodes, source):
1568 1567 if self.ui.verbose or source == 'bundle':
1569 1568 self.ui.status(_("%d changesets found\n") % len(nodes))
1570 1569 if self.ui.debugflag:
1571 1570 self.ui.debug(_("List of changesets:\n"))
1572 1571 for node in nodes:
1573 1572 self.ui.debug("%s\n" % hex(node))
1574 1573
1575 1574 def changegroupsubset(self, bases, heads, source, extranodes=None):
1576 1575 """This function generates a changegroup consisting of all the nodes
1577 1576 that are descendents of any of the bases, and ancestors of any of
1578 1577 the heads.
1579 1578
1580 1579 It is fairly complex as determining which filenodes and which
1581 1580 manifest nodes need to be included for the changeset to be complete
1582 1581 is non-trivial.
1583 1582
1584 1583 Another wrinkle is doing the reverse, figuring out which changeset in
1585 1584 the changegroup a particular filenode or manifestnode belongs to.
1586 1585
1587 1586 The caller can specify some nodes that must be included in the
1588 1587 changegroup using the extranodes argument. It should be a dict
1589 1588 where the keys are the filenames (or 1 for the manifest), and the
1590 1589 values are lists of (node, linknode) tuples, where node is a wanted
1591 1590 node and linknode is the changelog node that should be transmitted as
1592 1591 the linkrev.
1593 1592 """
1594 1593
1595 1594 self.hook('preoutgoing', throw=True, source=source)
1596 1595
1597 1596 # Set up some initial variables
1598 1597 # Make it easy to refer to self.changelog
1599 1598 cl = self.changelog
1600 1599 # msng is short for missing - compute the list of changesets in this
1601 1600 # changegroup.
1602 1601 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1603 1602 self.changegroupinfo(msng_cl_lst, source)
1604 1603 # Some bases may turn out to be superfluous, and some heads may be
1605 1604 # too. nodesbetween will return the minimal set of bases and heads
1606 1605 # necessary to re-create the changegroup.
1607 1606
1608 1607 # Known heads are the list of heads that it is assumed the recipient
1609 1608 # of this changegroup will know about.
1610 1609 knownheads = {}
1611 1610 # We assume that all parents of bases are known heads.
1612 1611 for n in bases:
1613 1612 for p in cl.parents(n):
1614 1613 if p != nullid:
1615 1614 knownheads[p] = 1
1616 1615 knownheads = knownheads.keys()
1617 1616 if knownheads:
1618 1617 # Now that we know what heads are known, we can compute which
1619 1618 # changesets are known. The recipient must know about all
1620 1619 # changesets required to reach the known heads from the null
1621 1620 # changeset.
1622 1621 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1623 1622 junk = None
1624 1623 # Transform the list into an ersatz set.
1625 1624 has_cl_set = dict.fromkeys(has_cl_set)
1626 1625 else:
1627 1626 # If there were no known heads, the recipient cannot be assumed to
1628 1627 # know about any changesets.
1629 1628 has_cl_set = {}
1630 1629
1631 1630 # Make it easy to refer to self.manifest
1632 1631 mnfst = self.manifest
1633 1632 # We don't know which manifests are missing yet
1634 1633 msng_mnfst_set = {}
1635 1634 # Nor do we know which filenodes are missing.
1636 1635 msng_filenode_set = {}
1637 1636
1638 1637 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1639 1638 junk = None
1640 1639
1641 1640 # A changeset always belongs to itself, so the changenode lookup
1642 1641 # function for a changenode is identity.
1643 1642 def identity(x):
1644 1643 return x
1645 1644
1646 1645 # A function generating function. Sets up an environment for the
1647 1646 # inner function.
1648 1647 def cmp_by_rev_func(revlog):
1649 1648 # Compare two nodes by their revision number in the environment's
1650 1649 # revision history. Since the revision number both represents the
1651 1650 # most efficient order to read the nodes in, and represents a
1652 1651 # topological sorting of the nodes, this function is often useful.
1653 1652 def cmp_by_rev(a, b):
1654 1653 return cmp(revlog.rev(a), revlog.rev(b))
1655 1654 return cmp_by_rev
1656 1655
1657 1656 # If we determine that a particular file or manifest node must be a
1658 1657 # node that the recipient of the changegroup will already have, we can
1659 1658 # also assume the recipient will have all the parents. This function
1660 1659 # prunes them from the set of missing nodes.
1661 1660 def prune_parents(revlog, hasset, msngset):
1662 1661 haslst = hasset.keys()
1663 1662 haslst.sort(cmp_by_rev_func(revlog))
1664 1663 for node in haslst:
1665 1664 parentlst = [p for p in revlog.parents(node) if p != nullid]
1666 1665 while parentlst:
1667 1666 n = parentlst.pop()
1668 1667 if n not in hasset:
1669 1668 hasset[n] = 1
1670 1669 p = [p for p in revlog.parents(n) if p != nullid]
1671 1670 parentlst.extend(p)
1672 1671 for n in hasset:
1673 1672 msngset.pop(n, None)
1674 1673
1675 1674 # This is a function generating function used to set up an environment
1676 1675 # for the inner function to execute in.
1677 1676 def manifest_and_file_collector(changedfileset):
1678 1677 # This is an information gathering function that gathers
1679 1678 # information from each changeset node that goes out as part of
1680 1679 # the changegroup. The information gathered is a list of which
1681 1680 # manifest nodes are potentially required (the recipient may
1682 1681 # already have them) and total list of all files which were
1683 1682 # changed in any changeset in the changegroup.
1684 1683 #
1685 1684 # We also remember the first changenode we saw any manifest
1686 1685 # referenced by so we can later determine which changenode 'owns'
1687 1686 # the manifest.
1688 1687 def collect_manifests_and_files(clnode):
1689 1688 c = cl.read(clnode)
1690 1689 for f in c[3]:
1691 1690 # This is to make sure we only have one instance of each
1692 1691 # filename string for each filename.
1693 1692 changedfileset.setdefault(f, f)
1694 1693 msng_mnfst_set.setdefault(c[0], clnode)
1695 1694 return collect_manifests_and_files
1696 1695
1697 1696 # Figure out which manifest nodes (of the ones we think might be part
1698 1697 # of the changegroup) the recipient must know about and remove them
1699 1698 # from the changegroup.
1700 1699 def prune_manifests():
1701 1700 has_mnfst_set = {}
1702 1701 for n in msng_mnfst_set:
1703 1702 # If a 'missing' manifest thinks it belongs to a changenode
1704 1703 # the recipient is assumed to have, obviously the recipient
1705 1704 # must have that manifest.
1706 1705 linknode = cl.node(mnfst.linkrev(n))
1707 1706 if linknode in has_cl_set:
1708 1707 has_mnfst_set[n] = 1
1709 1708 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1710 1709
1711 1710 # Use the information collected in collect_manifests_and_files to say
1712 1711 # which changenode any manifestnode belongs to.
1713 1712 def lookup_manifest_link(mnfstnode):
1714 1713 return msng_mnfst_set[mnfstnode]
1715 1714
1716 1715 # A function generating function that sets up the initial environment
1717 1716 # the inner function.
1718 1717 def filenode_collector(changedfiles):
1719 1718 next_rev = [0]
1720 1719 # This gathers information from each manifestnode included in the
1721 1720 # changegroup about which filenodes the manifest node references
1722 1721 # so we can include those in the changegroup too.
1723 1722 #
1724 1723 # It also remembers which changenode each filenode belongs to. It
1725 1724 # does this by assuming the a filenode belongs to the changenode
1726 1725 # the first manifest that references it belongs to.
1727 1726 def collect_msng_filenodes(mnfstnode):
1728 1727 r = mnfst.rev(mnfstnode)
1729 1728 if r == next_rev[0]:
1730 1729 # If the last rev we looked at was the one just previous,
1731 1730 # we only need to see a diff.
1732 1731 deltamf = mnfst.readdelta(mnfstnode)
1733 1732 # For each line in the delta
1734 1733 for f, fnode in deltamf.items():
1735 1734 f = changedfiles.get(f, None)
1736 1735 # And if the file is in the list of files we care
1737 1736 # about.
1738 1737 if f is not None:
1739 1738 # Get the changenode this manifest belongs to
1740 1739 clnode = msng_mnfst_set[mnfstnode]
1741 1740 # Create the set of filenodes for the file if
1742 1741 # there isn't one already.
1743 1742 ndset = msng_filenode_set.setdefault(f, {})
1744 1743 # And set the filenode's changelog node to the
1745 1744 # manifest's if it hasn't been set already.
1746 1745 ndset.setdefault(fnode, clnode)
1747 1746 else:
1748 1747 # Otherwise we need a full manifest.
1749 1748 m = mnfst.read(mnfstnode)
1750 1749 # For every file in we care about.
1751 1750 for f in changedfiles:
1752 1751 fnode = m.get(f, None)
1753 1752 # If it's in the manifest
1754 1753 if fnode is not None:
1755 1754 # See comments above.
1756 1755 clnode = msng_mnfst_set[mnfstnode]
1757 1756 ndset = msng_filenode_set.setdefault(f, {})
1758 1757 ndset.setdefault(fnode, clnode)
1759 1758 # Remember the revision we hope to see next.
1760 1759 next_rev[0] = r + 1
1761 1760 return collect_msng_filenodes
1762 1761
1763 1762 # We have a list of filenodes we think we need for a file, lets remove
1764 1763 # all those we now the recipient must have.
1765 1764 def prune_filenodes(f, filerevlog):
1766 1765 msngset = msng_filenode_set[f]
1767 1766 hasset = {}
1768 1767 # If a 'missing' filenode thinks it belongs to a changenode we
1769 1768 # assume the recipient must have, then the recipient must have
1770 1769 # that filenode.
1771 1770 for n in msngset:
1772 1771 clnode = cl.node(filerevlog.linkrev(n))
1773 1772 if clnode in has_cl_set:
1774 1773 hasset[n] = 1
1775 1774 prune_parents(filerevlog, hasset, msngset)
1776 1775
1777 1776 # A function generator function that sets up the a context for the
1778 1777 # inner function.
1779 1778 def lookup_filenode_link_func(fname):
1780 1779 msngset = msng_filenode_set[fname]
1781 1780 # Lookup the changenode the filenode belongs to.
1782 1781 def lookup_filenode_link(fnode):
1783 1782 return msngset[fnode]
1784 1783 return lookup_filenode_link
1785 1784
1786 1785 # Add the nodes that were explicitly requested.
1787 1786 def add_extra_nodes(name, nodes):
1788 1787 if not extranodes or name not in extranodes:
1789 1788 return
1790 1789
1791 1790 for node, linknode in extranodes[name]:
1792 1791 if node not in nodes:
1793 1792 nodes[node] = linknode
1794 1793
1795 1794 # Now that we have all theses utility functions to help out and
1796 1795 # logically divide up the task, generate the group.
1797 1796 def gengroup():
1798 1797 # The set of changed files starts empty.
1799 1798 changedfiles = {}
1800 1799 # Create a changenode group generator that will call our functions
1801 1800 # back to lookup the owning changenode and collect information.
1802 1801 group = cl.group(msng_cl_lst, identity,
1803 1802 manifest_and_file_collector(changedfiles))
1804 1803 for chnk in group:
1805 1804 yield chnk
1806 1805
1807 1806 # The list of manifests has been collected by the generator
1808 1807 # calling our functions back.
1809 1808 prune_manifests()
1810 1809 add_extra_nodes(1, msng_mnfst_set)
1811 1810 msng_mnfst_lst = msng_mnfst_set.keys()
1812 1811 # Sort the manifestnodes by revision number.
1813 1812 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1814 1813 # Create a generator for the manifestnodes that calls our lookup
1815 1814 # and data collection functions back.
1816 1815 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1817 1816 filenode_collector(changedfiles))
1818 1817 for chnk in group:
1819 1818 yield chnk
1820 1819
1821 1820 # These are no longer needed, dereference and toss the memory for
1822 1821 # them.
1823 1822 msng_mnfst_lst = None
1824 1823 msng_mnfst_set.clear()
1825 1824
1826 1825 if extranodes:
1827 1826 for fname in extranodes:
1828 1827 if isinstance(fname, int):
1829 1828 continue
1830 1829 add_extra_nodes(fname,
1831 1830 msng_filenode_set.setdefault(fname, {}))
1832 1831 changedfiles[fname] = 1
1833 1832 changedfiles = changedfiles.keys()
1834 1833 changedfiles.sort()
1835 1834 # Go through all our files in order sorted by name.
1836 1835 for fname in changedfiles:
1837 1836 filerevlog = self.file(fname)
1838 1837 if filerevlog.count() == 0:
1839 1838 raise util.Abort(_("empty or missing revlog for %s") % fname)
1840 1839 # Toss out the filenodes that the recipient isn't really
1841 1840 # missing.
1842 1841 if fname in msng_filenode_set:
1843 1842 prune_filenodes(fname, filerevlog)
1844 1843 msng_filenode_lst = msng_filenode_set[fname].keys()
1845 1844 else:
1846 1845 msng_filenode_lst = []
1847 1846 # If any filenodes are left, generate the group for them,
1848 1847 # otherwise don't bother.
1849 1848 if len(msng_filenode_lst) > 0:
1850 1849 yield changegroup.chunkheader(len(fname))
1851 1850 yield fname
1852 1851 # Sort the filenodes by their revision #
1853 1852 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1854 1853 # Create a group generator and only pass in a changenode
1855 1854 # lookup function as we need to collect no information
1856 1855 # from filenodes.
1857 1856 group = filerevlog.group(msng_filenode_lst,
1858 1857 lookup_filenode_link_func(fname))
1859 1858 for chnk in group:
1860 1859 yield chnk
1861 1860 if fname in msng_filenode_set:
1862 1861 # Don't need this anymore, toss it to free memory.
1863 1862 del msng_filenode_set[fname]
1864 1863 # Signal that no more groups are left.
1865 1864 yield changegroup.closechunk()
1866 1865
1867 1866 if msng_cl_lst:
1868 1867 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1869 1868
1870 1869 return util.chunkbuffer(gengroup())
1871 1870
1872 1871 def changegroup(self, basenodes, source):
1873 1872 """Generate a changegroup of all nodes that we have that a recipient
1874 1873 doesn't.
1875 1874
1876 1875 This is much easier than the previous function as we can assume that
1877 1876 the recipient has any changenode we aren't sending them."""
1878 1877
1879 1878 self.hook('preoutgoing', throw=True, source=source)
1880 1879
1881 1880 cl = self.changelog
1882 1881 nodes = cl.nodesbetween(basenodes, None)[0]
1883 1882 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1884 1883 self.changegroupinfo(nodes, source)
1885 1884
1886 1885 def identity(x):
1887 1886 return x
1888 1887
1889 1888 def gennodelst(revlog):
1890 1889 for r in xrange(0, revlog.count()):
1891 1890 n = revlog.node(r)
1892 1891 if revlog.linkrev(n) in revset:
1893 1892 yield n
1894 1893
1895 1894 def changed_file_collector(changedfileset):
1896 1895 def collect_changed_files(clnode):
1897 1896 c = cl.read(clnode)
1898 1897 for fname in c[3]:
1899 1898 changedfileset[fname] = 1
1900 1899 return collect_changed_files
1901 1900
1902 1901 def lookuprevlink_func(revlog):
1903 1902 def lookuprevlink(n):
1904 1903 return cl.node(revlog.linkrev(n))
1905 1904 return lookuprevlink
1906 1905
1907 1906 def gengroup():
1908 1907 # construct a list of all changed files
1909 1908 changedfiles = {}
1910 1909
1911 1910 for chnk in cl.group(nodes, identity,
1912 1911 changed_file_collector(changedfiles)):
1913 1912 yield chnk
1914 1913 changedfiles = changedfiles.keys()
1915 1914 changedfiles.sort()
1916 1915
1917 1916 mnfst = self.manifest
1918 1917 nodeiter = gennodelst(mnfst)
1919 1918 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1920 1919 yield chnk
1921 1920
1922 1921 for fname in changedfiles:
1923 1922 filerevlog = self.file(fname)
1924 1923 if filerevlog.count() == 0:
1925 1924 raise util.Abort(_("empty or missing revlog for %s") % fname)
1926 1925 nodeiter = gennodelst(filerevlog)
1927 1926 nodeiter = list(nodeiter)
1928 1927 if nodeiter:
1929 1928 yield changegroup.chunkheader(len(fname))
1930 1929 yield fname
1931 1930 lookup = lookuprevlink_func(filerevlog)
1932 1931 for chnk in filerevlog.group(nodeiter, lookup):
1933 1932 yield chnk
1934 1933
1935 1934 yield changegroup.closechunk()
1936 1935
1937 1936 if nodes:
1938 1937 self.hook('outgoing', node=hex(nodes[0]), source=source)
1939 1938
1940 1939 return util.chunkbuffer(gengroup())
1941 1940
1942 1941 def addchangegroup(self, source, srctype, url, emptyok=False):
1943 1942 """add changegroup to repo.
1944 1943
1945 1944 return values:
1946 1945 - nothing changed or no source: 0
1947 1946 - more heads than before: 1+added heads (2..n)
1948 1947 - less heads than before: -1-removed heads (-2..-n)
1949 1948 - number of heads stays the same: 1
1950 1949 """
1951 1950 def csmap(x):
1952 1951 self.ui.debug(_("add changeset %s\n") % short(x))
1953 1952 return cl.count()
1954 1953
1955 1954 def revmap(x):
1956 1955 return cl.rev(x)
1957 1956
1958 1957 if not source:
1959 1958 return 0
1960 1959
1961 1960 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1962 1961
1963 1962 changesets = files = revisions = 0
1964 1963
1965 1964 # write changelog data to temp files so concurrent readers will not see
1966 1965 # inconsistent view
1967 1966 cl = self.changelog
1968 1967 cl.delayupdate()
1969 1968 oldheads = len(cl.heads())
1970 1969
1971 1970 tr = self.transaction()
1972 1971 try:
1973 1972 trp = weakref.proxy(tr)
1974 1973 # pull off the changeset group
1975 1974 self.ui.status(_("adding changesets\n"))
1976 1975 cor = cl.count() - 1
1977 1976 chunkiter = changegroup.chunkiter(source)
1978 1977 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1979 1978 raise util.Abort(_("received changelog group is empty"))
1980 1979 cnr = cl.count() - 1
1981 1980 changesets = cnr - cor
1982 1981
1983 1982 # pull off the manifest group
1984 1983 self.ui.status(_("adding manifests\n"))
1985 1984 chunkiter = changegroup.chunkiter(source)
1986 1985 # no need to check for empty manifest group here:
1987 1986 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1988 1987 # no new manifest will be created and the manifest group will
1989 1988 # be empty during the pull
1990 1989 self.manifest.addgroup(chunkiter, revmap, trp)
1991 1990
1992 1991 # process the files
1993 1992 self.ui.status(_("adding file changes\n"))
1994 1993 while 1:
1995 1994 f = changegroup.getchunk(source)
1996 1995 if not f:
1997 1996 break
1998 1997 self.ui.debug(_("adding %s revisions\n") % f)
1999 1998 fl = self.file(f)
2000 1999 o = fl.count()
2001 2000 chunkiter = changegroup.chunkiter(source)
2002 2001 if fl.addgroup(chunkiter, revmap, trp) is None:
2003 2002 raise util.Abort(_("received file revlog group is empty"))
2004 2003 revisions += fl.count() - o
2005 2004 files += 1
2006 2005
2007 2006 # make changelog see real files again
2008 2007 cl.finalize(trp)
2009 2008
2010 2009 newheads = len(self.changelog.heads())
2011 2010 heads = ""
2012 2011 if oldheads and newheads != oldheads:
2013 2012 heads = _(" (%+d heads)") % (newheads - oldheads)
2014 2013
2015 2014 self.ui.status(_("added %d changesets"
2016 2015 " with %d changes to %d files%s\n")
2017 2016 % (changesets, revisions, files, heads))
2018 2017
2019 2018 if changesets > 0:
2020 2019 self.hook('pretxnchangegroup', throw=True,
2021 2020 node=hex(self.changelog.node(cor+1)), source=srctype,
2022 2021 url=url)
2023 2022
2024 2023 tr.close()
2025 2024 finally:
2026 2025 del tr
2027 2026
2028 2027 if changesets > 0:
2029 2028 # forcefully update the on-disk branch cache
2030 2029 self.ui.debug(_("updating the branch cache\n"))
2031 2030 self.branchtags()
2032 2031 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2033 2032 source=srctype, url=url)
2034 2033
2035 2034 for i in xrange(cor + 1, cnr + 1):
2036 2035 self.hook("incoming", node=hex(self.changelog.node(i)),
2037 2036 source=srctype, url=url)
2038 2037
2039 2038 # never return 0 here:
2040 2039 if newheads < oldheads:
2041 2040 return newheads - oldheads - 1
2042 2041 else:
2043 2042 return newheads - oldheads + 1
2044 2043
2045 2044
2046 2045 def stream_in(self, remote):
2047 2046 fp = remote.stream_out()
2048 2047 l = fp.readline()
2049 2048 try:
2050 2049 resp = int(l)
2051 2050 except ValueError:
2052 2051 raise util.UnexpectedOutput(
2053 2052 _('Unexpected response from remote server:'), l)
2054 2053 if resp == 1:
2055 2054 raise util.Abort(_('operation forbidden by server'))
2056 2055 elif resp == 2:
2057 2056 raise util.Abort(_('locking the remote repository failed'))
2058 2057 elif resp != 0:
2059 2058 raise util.Abort(_('the server sent an unknown error code'))
2060 2059 self.ui.status(_('streaming all changes\n'))
2061 2060 l = fp.readline()
2062 2061 try:
2063 2062 total_files, total_bytes = map(int, l.split(' ', 1))
2064 2063 except ValueError, TypeError:
2065 2064 raise util.UnexpectedOutput(
2066 2065 _('Unexpected response from remote server:'), l)
2067 2066 self.ui.status(_('%d files to transfer, %s of data\n') %
2068 2067 (total_files, util.bytecount(total_bytes)))
2069 2068 start = time.time()
2070 2069 for i in xrange(total_files):
2071 2070 # XXX doesn't support '\n' or '\r' in filenames
2072 2071 l = fp.readline()
2073 2072 try:
2074 2073 name, size = l.split('\0', 1)
2075 2074 size = int(size)
2076 2075 except ValueError, TypeError:
2077 2076 raise util.UnexpectedOutput(
2078 2077 _('Unexpected response from remote server:'), l)
2079 2078 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2080 2079 ofp = self.sopener(name, 'w')
2081 2080 for chunk in util.filechunkiter(fp, limit=size):
2082 2081 ofp.write(chunk)
2083 2082 ofp.close()
2084 2083 elapsed = time.time() - start
2085 2084 if elapsed <= 0:
2086 2085 elapsed = 0.001
2087 2086 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2088 2087 (util.bytecount(total_bytes), elapsed,
2089 2088 util.bytecount(total_bytes / elapsed)))
2090 2089 self.invalidate()
2091 2090 return len(self.heads()) + 1
2092 2091
2093 2092 def clone(self, remote, heads=[], stream=False):
2094 2093 '''clone remote repository.
2095 2094
2096 2095 keyword arguments:
2097 2096 heads: list of revs to clone (forces use of pull)
2098 2097 stream: use streaming clone if possible'''
2099 2098
2100 2099 # now, all clients that can request uncompressed clones can
2101 2100 # read repo formats supported by all servers that can serve
2102 2101 # them.
2103 2102
2104 2103 # if revlog format changes, client will have to check version
2105 2104 # and format flags on "stream" capability, and use
2106 2105 # uncompressed only if compatible.
2107 2106
2108 2107 if stream and not heads and remote.capable('stream'):
2109 2108 return self.stream_in(remote)
2110 2109 return self.pull(remote, heads)
2111 2110
2112 2111 # used to avoid circular references so destructors work
2113 2112 def aftertrans(files):
2114 2113 renamefiles = [tuple(t) for t in files]
2115 2114 def a():
2116 2115 for src, dest in renamefiles:
2117 2116 util.rename(src, dest)
2118 2117 return a
2119 2118
2120 2119 def instance(ui, path, create):
2121 2120 return localrepository(ui, util.drop_scheme('file', path), create)
2122 2121
2123 2122 def islocal(path):
2124 2123 return True
@@ -1,90 +1,90 b''
1 1 rm 'd/b'
2 2 assuming destination git-repo-hg
3 3 initializing destination git-repo-hg repository
4 4 scanning source...
5 5 sorting...
6 6 converting...
7 7 5 t1
8 8 4 t2
9 9 3 t3
10 10 2 t4.1
11 11 1 t4.2
12 12 0 Merge branch other
13 changeset: 5:c6d72c98aa00
13 changeset: 5:4ab1af49a271
14 14 tag: tip
15 parent: 3:a18bdfccf429
16 parent: 4:48cb5b72ce56
15 parent: 3:0222ab0998d7
16 parent: 4:5333c870e3c2
17 17 user: test <test@example.org>
18 18 date: Mon Jan 01 00:00:15 2007 +0000
19 19 files: a
20 20 description:
21 21 Merge branch other
22 22
23 23 committer: test <test@example.org>
24 24
25 25
26 26 % full conversion
27 27 o 9 "Discard change to foo" files: foo
28 28 |\
29 29 | o 8 "change foo" files: foo
30 30 | |
31 31 o | 7 "change bar" files: bar
32 32 |/
33 33 o 6 "(octopus merge fixup)" files:
34 34 |\
35 35 | o 5 "Octopus merge" files: baz
36 36 | |\
37 37 o | | 4 "add baz" files: baz
38 38 | | |
39 39 +---o 3 "add bar" files: bar
40 40 | |
41 41 o | 2 "add quux" files: quux
42 42 | |
43 43 | o 1 "change foo" files: foo
44 44 |/
45 45 o 0 "add foo" files: foo
46 46
47 47 245a3b8bc653999c2b22cdabd517ccb47aecafdf 644 bar
48 48 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
49 49 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
50 50 88dfeab657e8cf2cef3dec67b914f49791ae76b1 644 quux
51 51 % foo bar baz: octopus merge
52 52 o 8 "Discard change to foo" files: foo
53 53 |\
54 54 | o 7 "change foo" files: foo
55 55 | |
56 56 o | 6 "change bar" files: bar
57 57 |/
58 58 o 5 "(octopus merge fixup)" files:
59 59 |\
60 60 | o 4 "Octopus merge" files: baz
61 61 | |\
62 62 o | | 3 "add baz" files: baz
63 63 | | |
64 64 +---o 2 "add bar" files: bar
65 65 | |
66 66 | o 1 "change foo" files: foo
67 67 |/
68 68 o 0 "add foo" files: foo
69 69
70 70 245a3b8bc653999c2b22cdabd517ccb47aecafdf 644 bar
71 71 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
72 72 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
73 73 % foo baz quux: only some parents of an octopus merge; "discard" a head
74 74 o 6 "Discard change to foo" files: foo
75 75 |
76 76 o 5 "change foo" files: foo
77 77 |
78 78 o 4 "Octopus merge" files:
79 79 |\
80 80 | o 3 "add baz" files: baz
81 81 | |
82 82 | o 2 "add quux" files: quux
83 83 | |
84 84 o | 1 "change foo" files: foo
85 85 |/
86 86 o 0 "add foo" files: foo
87 87
88 88 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
89 89 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
90 90 88dfeab657e8cf2cef3dec67b914f49791ae76b1 644 quux
General Comments 0
You need to be logged in to leave comments. Login now