##// END OF EJS Templates
localrepo.commit: only munge comment text if committing via dirstate
Bryan O'Sullivan -
r5023:7f5c3fb0 default
parent child Browse files
Show More
@@ -1,1983 +1,1986
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.path = path
22 22 self.root = os.path.realpath(path)
23 23 self.path = os.path.join(self.root, ".hg")
24 24 self.origroot = path
25 25 self.opener = util.opener(self.path)
26 26 self.wopener = util.opener(self.root)
27 27
28 28 if not os.path.isdir(self.path):
29 29 if create:
30 30 if not os.path.exists(path):
31 31 os.mkdir(path)
32 32 os.mkdir(self.path)
33 33 requirements = ["revlogv1"]
34 34 if parentui.configbool('format', 'usestore', True):
35 35 os.mkdir(os.path.join(self.path, "store"))
36 36 requirements.append("store")
37 37 # create an invalid changelog
38 38 self.opener("00changelog.i", "a").write(
39 39 '\0\0\0\2' # represents revlogv2
40 40 ' dummy changelog to prevent using the old repo layout'
41 41 )
42 42 reqfile = self.opener("requires", "w")
43 43 for r in requirements:
44 44 reqfile.write("%s\n" % r)
45 45 reqfile.close()
46 46 else:
47 47 raise repo.RepoError(_("repository %s not found") % path)
48 48 elif create:
49 49 raise repo.RepoError(_("repository %s already exists") % path)
50 50 else:
51 51 # find requirements
52 52 try:
53 53 requirements = self.opener("requires").read().splitlines()
54 54 except IOError, inst:
55 55 if inst.errno != errno.ENOENT:
56 56 raise
57 57 requirements = []
58 58 # check them
59 59 for r in requirements:
60 60 if r not in self.supported:
61 61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62 62
63 63 # setup store
64 64 if "store" in requirements:
65 65 self.encodefn = util.encodefilename
66 66 self.decodefn = util.decodefilename
67 67 self.spath = os.path.join(self.path, "store")
68 68 else:
69 69 self.encodefn = lambda x: x
70 70 self.decodefn = lambda x: x
71 71 self.spath = self.path
72 72 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
73 73
74 74 self.ui = ui.ui(parentui=parentui)
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.filterpats = {}
85 85 self._transref = self._lockref = self._wlockref = None
86 86
87 87 def __getattr__(self, name):
88 88 if name == 'changelog':
89 89 self.changelog = changelog.changelog(self.sopener)
90 90 self.sopener.defversion = self.changelog.version
91 91 return self.changelog
92 92 if name == 'manifest':
93 93 self.changelog
94 94 self.manifest = manifest.manifest(self.sopener)
95 95 return self.manifest
96 96 if name == 'dirstate':
97 97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 98 return self.dirstate
99 99 else:
100 100 raise AttributeError, name
101 101
102 102 def url(self):
103 103 return 'file:' + self.root
104 104
105 105 def hook(self, name, throw=False, **args):
106 106 return hook.hook(self.ui, self, name, throw, **args)
107 107
108 108 tag_disallowed = ':\r\n'
109 109
110 110 def _tag(self, name, node, message, local, user, date, parent=None,
111 111 extra={}):
112 112 use_dirstate = parent is None
113 113
114 114 for c in self.tag_disallowed:
115 115 if c in name:
116 116 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 117
118 118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 119
120 120 def writetag(fp, name, munge, prevtags):
121 121 if prevtags and prevtags[-1] != '\n':
122 122 fp.write('\n')
123 123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 124 fp.close()
125 125 self.hook('tag', node=hex(node), tag=name, local=local)
126 126
127 127 prevtags = ''
128 128 if local:
129 129 try:
130 130 fp = self.opener('localtags', 'r+')
131 131 except IOError, err:
132 132 fp = self.opener('localtags', 'a')
133 133 else:
134 134 prevtags = fp.read()
135 135
136 136 # local tags are stored in the current charset
137 137 writetag(fp, name, None, prevtags)
138 138 return
139 139
140 140 if use_dirstate:
141 141 try:
142 142 fp = self.wfile('.hgtags', 'rb+')
143 143 except IOError, err:
144 144 fp = self.wfile('.hgtags', 'ab')
145 145 else:
146 146 prevtags = fp.read()
147 147 else:
148 148 try:
149 149 prevtags = self.filectx('.hgtags', parent).data()
150 150 except revlog.LookupError:
151 151 pass
152 152 fp = self.wfile('.hgtags', 'wb')
153 153
154 154 # committed tags are stored in UTF-8
155 155 writetag(fp, name, util.fromlocal, prevtags)
156 156
157 157 if use_dirstate and '.hgtags' not in self.dirstate:
158 158 self.add(['.hgtags'])
159 159
160 160 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
161 161 extra=extra)
162 162
163 163 self.hook('tag', node=hex(node), tag=name, local=local)
164 164
165 165 return tagnode
166 166
167 167 def tag(self, name, node, message, local, user, date):
168 168 '''tag a revision with a symbolic name.
169 169
170 170 if local is True, the tag is stored in a per-repository file.
171 171 otherwise, it is stored in the .hgtags file, and a new
172 172 changeset is committed with the change.
173 173
174 174 keyword arguments:
175 175
176 176 local: whether to store tag in non-version-controlled file
177 177 (default False)
178 178
179 179 message: commit message to use if committing
180 180
181 181 user: name of user to use if committing
182 182
183 183 date: date tuple to use if committing'''
184 184
185 185 for x in self.status()[:5]:
186 186 if '.hgtags' in x:
187 187 raise util.Abort(_('working copy of .hgtags is changed '
188 188 '(please commit .hgtags manually)'))
189 189
190 190
191 191 self._tag(name, node, message, local, user, date)
192 192
193 193 def tags(self):
194 194 '''return a mapping of tag to node'''
195 195 if self.tagscache:
196 196 return self.tagscache
197 197
198 198 globaltags = {}
199 199
200 200 def readtags(lines, fn):
201 201 filetags = {}
202 202 count = 0
203 203
204 204 def warn(msg):
205 205 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
206 206
207 207 for l in lines:
208 208 count += 1
209 209 if not l:
210 210 continue
211 211 s = l.split(" ", 1)
212 212 if len(s) != 2:
213 213 warn(_("cannot parse entry"))
214 214 continue
215 215 node, key = s
216 216 key = util.tolocal(key.strip()) # stored in UTF-8
217 217 try:
218 218 bin_n = bin(node)
219 219 except TypeError:
220 220 warn(_("node '%s' is not well formed") % node)
221 221 continue
222 222 if bin_n not in self.changelog.nodemap:
223 223 warn(_("tag '%s' refers to unknown node") % key)
224 224 continue
225 225
226 226 h = []
227 227 if key in filetags:
228 228 n, h = filetags[key]
229 229 h.append(n)
230 230 filetags[key] = (bin_n, h)
231 231
232 232 for k, nh in filetags.items():
233 233 if k not in globaltags:
234 234 globaltags[k] = nh
235 235 continue
236 236 # we prefer the global tag if:
237 237 # it supercedes us OR
238 238 # mutual supercedes and it has a higher rank
239 239 # otherwise we win because we're tip-most
240 240 an, ah = nh
241 241 bn, bh = globaltags[k]
242 242 if (bn != an and an in bh and
243 243 (bn not in ah or len(bh) > len(ah))):
244 244 an = bn
245 245 ah.extend([n for n in bh if n not in ah])
246 246 globaltags[k] = an, ah
247 247
248 248 # read the tags file from each head, ending with the tip
249 249 f = None
250 250 for rev, node, fnode in self._hgtagsnodes():
251 251 f = (f and f.filectx(fnode) or
252 252 self.filectx('.hgtags', fileid=fnode))
253 253 readtags(f.data().splitlines(), f)
254 254
255 255 try:
256 256 data = util.fromlocal(self.opener("localtags").read())
257 257 # localtags are stored in the local character set
258 258 # while the internal tag table is stored in UTF-8
259 259 readtags(data.splitlines(), "localtags")
260 260 except IOError:
261 261 pass
262 262
263 263 self.tagscache = {}
264 264 for k,nh in globaltags.items():
265 265 n = nh[0]
266 266 if n != nullid:
267 267 self.tagscache[k] = n
268 268 self.tagscache['tip'] = self.changelog.tip()
269 269
270 270 return self.tagscache
271 271
272 272 def _hgtagsnodes(self):
273 273 heads = self.heads()
274 274 heads.reverse()
275 275 last = {}
276 276 ret = []
277 277 for node in heads:
278 278 c = self.changectx(node)
279 279 rev = c.rev()
280 280 try:
281 281 fnode = c.filenode('.hgtags')
282 282 except revlog.LookupError:
283 283 continue
284 284 ret.append((rev, node, fnode))
285 285 if fnode in last:
286 286 ret[last[fnode]] = None
287 287 last[fnode] = len(ret) - 1
288 288 return [item for item in ret if item]
289 289
290 290 def tagslist(self):
291 291 '''return a list of tags ordered by revision'''
292 292 l = []
293 293 for t, n in self.tags().items():
294 294 try:
295 295 r = self.changelog.rev(n)
296 296 except:
297 297 r = -2 # sort to the beginning of the list if unknown
298 298 l.append((r, t, n))
299 299 l.sort()
300 300 return [(t, n) for r, t, n in l]
301 301
302 302 def nodetags(self, node):
303 303 '''return the tags associated with a node'''
304 304 if not self.nodetagscache:
305 305 self.nodetagscache = {}
306 306 for t, n in self.tags().items():
307 307 self.nodetagscache.setdefault(n, []).append(t)
308 308 return self.nodetagscache.get(node, [])
309 309
310 310 def _branchtags(self):
311 311 partial, last, lrev = self._readbranchcache()
312 312
313 313 tiprev = self.changelog.count() - 1
314 314 if lrev != tiprev:
315 315 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 316 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317 317
318 318 return partial
319 319
320 320 def branchtags(self):
321 321 if self.branchcache is not None:
322 322 return self.branchcache
323 323
324 324 self.branchcache = {} # avoid recursion in changectx
325 325 partial = self._branchtags()
326 326
327 327 # the branch cache is stored on disk as UTF-8, but in the local
328 328 # charset internally
329 329 for k, v in partial.items():
330 330 self.branchcache[util.tolocal(k)] = v
331 331 return self.branchcache
332 332
333 333 def _readbranchcache(self):
334 334 partial = {}
335 335 try:
336 336 f = self.opener("branch.cache")
337 337 lines = f.read().split('\n')
338 338 f.close()
339 339 except (IOError, OSError):
340 340 return {}, nullid, nullrev
341 341
342 342 try:
343 343 last, lrev = lines.pop(0).split(" ", 1)
344 344 last, lrev = bin(last), int(lrev)
345 345 if not (lrev < self.changelog.count() and
346 346 self.changelog.node(lrev) == last): # sanity check
347 347 # invalidate the cache
348 348 raise ValueError('Invalid branch cache: unknown tip')
349 349 for l in lines:
350 350 if not l: continue
351 351 node, label = l.split(" ", 1)
352 352 partial[label.strip()] = bin(node)
353 353 except (KeyboardInterrupt, util.SignalInterrupt):
354 354 raise
355 355 except Exception, inst:
356 356 if self.ui.debugflag:
357 357 self.ui.warn(str(inst), '\n')
358 358 partial, last, lrev = {}, nullid, nullrev
359 359 return partial, last, lrev
360 360
361 361 def _writebranchcache(self, branches, tip, tiprev):
362 362 try:
363 363 f = self.opener("branch.cache", "w", atomictemp=True)
364 364 f.write("%s %s\n" % (hex(tip), tiprev))
365 365 for label, node in branches.iteritems():
366 366 f.write("%s %s\n" % (hex(node), label))
367 367 f.rename()
368 368 except (IOError, OSError):
369 369 pass
370 370
371 371 def _updatebranchcache(self, partial, start, end):
372 372 for r in xrange(start, end):
373 373 c = self.changectx(r)
374 374 b = c.branch()
375 375 partial[b] = c.node()
376 376
377 377 def lookup(self, key):
378 378 if key == '.':
379 379 key, second = self.dirstate.parents()
380 380 if key == nullid:
381 381 raise repo.RepoError(_("no revision checked out"))
382 382 if second != nullid:
383 383 self.ui.warn(_("warning: working directory has two parents, "
384 384 "tag '.' uses the first\n"))
385 385 elif key == 'null':
386 386 return nullid
387 387 n = self.changelog._match(key)
388 388 if n:
389 389 return n
390 390 if key in self.tags():
391 391 return self.tags()[key]
392 392 if key in self.branchtags():
393 393 return self.branchtags()[key]
394 394 n = self.changelog._partialmatch(key)
395 395 if n:
396 396 return n
397 397 try:
398 398 if len(key) == 20:
399 399 key = hex(key)
400 400 except:
401 401 pass
402 402 raise repo.RepoError(_("unknown revision '%s'") % key)
403 403
404 404 def dev(self):
405 405 return os.lstat(self.path).st_dev
406 406
407 407 def local(self):
408 408 return True
409 409
410 410 def join(self, f):
411 411 return os.path.join(self.path, f)
412 412
413 413 def sjoin(self, f):
414 414 f = self.encodefn(f)
415 415 return os.path.join(self.spath, f)
416 416
417 417 def wjoin(self, f):
418 418 return os.path.join(self.root, f)
419 419
420 420 def file(self, f):
421 421 if f[0] == '/':
422 422 f = f[1:]
423 423 return filelog.filelog(self.sopener, f)
424 424
425 425 def changectx(self, changeid=None):
426 426 return context.changectx(self, changeid)
427 427
428 428 def workingctx(self):
429 429 return context.workingctx(self)
430 430
431 431 def parents(self, changeid=None):
432 432 '''
433 433 get list of changectxs for parents of changeid or working directory
434 434 '''
435 435 if changeid is None:
436 436 pl = self.dirstate.parents()
437 437 else:
438 438 n = self.changelog.lookup(changeid)
439 439 pl = self.changelog.parents(n)
440 440 if pl[1] == nullid:
441 441 return [self.changectx(pl[0])]
442 442 return [self.changectx(pl[0]), self.changectx(pl[1])]
443 443
444 444 def filectx(self, path, changeid=None, fileid=None):
445 445 """changeid can be a changeset revision, node, or tag.
446 446 fileid can be a file revision or node."""
447 447 return context.filectx(self, path, changeid, fileid)
448 448
449 449 def getcwd(self):
450 450 return self.dirstate.getcwd()
451 451
452 452 def pathto(self, f, cwd=None):
453 453 return self.dirstate.pathto(f, cwd)
454 454
455 455 def wfile(self, f, mode='r'):
456 456 return self.wopener(f, mode)
457 457
458 458 def _link(self, f):
459 459 return os.path.islink(self.wjoin(f))
460 460
461 461 def _filter(self, filter, filename, data):
462 462 if filter not in self.filterpats:
463 463 l = []
464 464 for pat, cmd in self.ui.configitems(filter):
465 465 mf = util.matcher(self.root, "", [pat], [], [])[1]
466 466 l.append((mf, cmd))
467 467 self.filterpats[filter] = l
468 468
469 469 for mf, cmd in self.filterpats[filter]:
470 470 if mf(filename):
471 471 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
472 472 data = util.filter(data, cmd)
473 473 break
474 474
475 475 return data
476 476
477 477 def wread(self, filename):
478 478 if self._link(filename):
479 479 data = os.readlink(self.wjoin(filename))
480 480 else:
481 481 data = self.wopener(filename, 'r').read()
482 482 return self._filter("encode", filename, data)
483 483
484 484 def wwrite(self, filename, data, flags):
485 485 data = self._filter("decode", filename, data)
486 486 if "l" in flags:
487 487 self.wopener.symlink(data, filename)
488 488 else:
489 489 try:
490 490 if self._link(filename):
491 491 os.unlink(self.wjoin(filename))
492 492 except OSError:
493 493 pass
494 494 self.wopener(filename, 'w').write(data)
495 495 util.set_exec(self.wjoin(filename), "x" in flags)
496 496
497 497 def wwritedata(self, filename, data):
498 498 return self._filter("decode", filename, data)
499 499
500 500 def transaction(self):
501 501 if self._transref and self._transref():
502 502 return self._transref().nest()
503 503
504 504 # save dirstate for rollback
505 505 try:
506 506 ds = self.opener("dirstate").read()
507 507 except IOError:
508 508 ds = ""
509 509 self.opener("journal.dirstate", "w").write(ds)
510 510
511 511 renames = [(self.sjoin("journal"), self.sjoin("undo")),
512 512 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
513 513 tr = transaction.transaction(self.ui.warn, self.sopener,
514 514 self.sjoin("journal"),
515 515 aftertrans(renames))
516 516 self._transref = weakref.ref(tr)
517 517 return tr
518 518
519 519 def recover(self):
520 520 l = self.lock()
521 521 try:
522 522 if os.path.exists(self.sjoin("journal")):
523 523 self.ui.status(_("rolling back interrupted transaction\n"))
524 524 transaction.rollback(self.sopener, self.sjoin("journal"))
525 525 self.invalidate()
526 526 return True
527 527 else:
528 528 self.ui.warn(_("no interrupted transaction available\n"))
529 529 return False
530 530 finally:
531 531 del l
532 532
533 533 def rollback(self):
534 534 wlock = lock = None
535 535 try:
536 536 wlock = self.wlock()
537 537 lock = self.lock()
538 538 if os.path.exists(self.sjoin("undo")):
539 539 self.ui.status(_("rolling back last transaction\n"))
540 540 transaction.rollback(self.sopener, self.sjoin("undo"))
541 541 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
542 542 self.invalidate()
543 543 self.dirstate.invalidate()
544 544 else:
545 545 self.ui.warn(_("no rollback information available\n"))
546 546 finally:
547 547 del wlock, lock
548 548
549 549 def invalidate(self):
550 550 for a in "changelog manifest".split():
551 551 if hasattr(self, a):
552 552 self.__delattr__(a)
553 553 self.tagscache = None
554 554 self.nodetagscache = None
555 555
556 556 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
557 557 try:
558 558 l = lock.lock(lockname, 0, releasefn, desc=desc)
559 559 except lock.LockHeld, inst:
560 560 if not wait:
561 561 raise
562 562 self.ui.warn(_("waiting for lock on %s held by %r\n") %
563 563 (desc, inst.locker))
564 564 # default to 600 seconds timeout
565 565 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
566 566 releasefn, desc=desc)
567 567 if acquirefn:
568 568 acquirefn()
569 569 return l
570 570
571 571 def lock(self, wait=True):
572 572 if self._lockref and self._lockref():
573 573 return self._lockref()
574 574
575 575 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
576 576 _('repository %s') % self.origroot)
577 577 self._lockref = weakref.ref(l)
578 578 return l
579 579
580 580 def wlock(self, wait=True):
581 581 if self._wlockref and self._wlockref():
582 582 return self._wlockref()
583 583
584 584 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
585 585 self.dirstate.invalidate, _('working directory of %s') %
586 586 self.origroot)
587 587 self._wlockref = weakref.ref(l)
588 588 return l
589 589
590 590 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
591 591 """
592 592 commit an individual file as part of a larger transaction
593 593 """
594 594
595 595 t = self.wread(fn)
596 596 fl = self.file(fn)
597 597 fp1 = manifest1.get(fn, nullid)
598 598 fp2 = manifest2.get(fn, nullid)
599 599
600 600 meta = {}
601 601 cp = self.dirstate.copied(fn)
602 602 if cp:
603 603 # Mark the new revision of this file as a copy of another
604 604 # file. This copy data will effectively act as a parent
605 605 # of this new revision. If this is a merge, the first
606 606 # parent will be the nullid (meaning "look up the copy data")
607 607 # and the second one will be the other parent. For example:
608 608 #
609 609 # 0 --- 1 --- 3 rev1 changes file foo
610 610 # \ / rev2 renames foo to bar and changes it
611 611 # \- 2 -/ rev3 should have bar with all changes and
612 612 # should record that bar descends from
613 613 # bar in rev2 and foo in rev1
614 614 #
615 615 # this allows this merge to succeed:
616 616 #
617 617 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
618 618 # \ / merging rev3 and rev4 should use bar@rev2
619 619 # \- 2 --- 4 as the merge base
620 620 #
621 621 meta["copy"] = cp
622 622 if not manifest2: # not a branch merge
623 623 meta["copyrev"] = hex(manifest1.get(cp, nullid))
624 624 fp2 = nullid
625 625 elif fp2 != nullid: # copied on remote side
626 626 meta["copyrev"] = hex(manifest1.get(cp, nullid))
627 627 elif fp1 != nullid: # copied on local side, reversed
628 628 meta["copyrev"] = hex(manifest2.get(cp))
629 629 fp2 = fp1
630 630 else: # directory rename
631 631 meta["copyrev"] = hex(manifest1.get(cp, nullid))
632 632 self.ui.debug(_(" %s: copy %s:%s\n") %
633 633 (fn, cp, meta["copyrev"]))
634 634 fp1 = nullid
635 635 elif fp2 != nullid:
636 636 # is one parent an ancestor of the other?
637 637 fpa = fl.ancestor(fp1, fp2)
638 638 if fpa == fp1:
639 639 fp1, fp2 = fp2, nullid
640 640 elif fpa == fp2:
641 641 fp2 = nullid
642 642
643 643 # is the file unmodified from the parent? report existing entry
644 644 if fp2 == nullid and not fl.cmp(fp1, t):
645 645 return fp1
646 646
647 647 changelist.append(fn)
648 648 return fl.add(t, meta, tr, linkrev, fp1, fp2)
649 649
650 650 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
651 651 if p1 is None:
652 652 p1, p2 = self.dirstate.parents()
653 653 return self.commit(files=files, text=text, user=user, date=date,
654 654 p1=p1, p2=p2, extra=extra)
655 655
656 656 def commit(self, files=None, text="", user=None, date=None,
657 657 match=util.always, force=False, force_editor=False,
658 658 p1=None, p2=None, extra={}):
659 659 wlock = lock = tr = None
660 660 try:
661 661 commit = []
662 662 remove = []
663 663 changed = []
664 664 use_dirstate = (p1 is None) # not rawcommit
665 665 extra = extra.copy()
666 666
667 667 if use_dirstate:
668 668 if files:
669 669 for f in files:
670 670 s = self.dirstate[f]
671 671 if s in 'nma':
672 672 commit.append(f)
673 673 elif s == 'r':
674 674 remove.append(f)
675 675 else:
676 676 self.ui.warn(_("%s not tracked!\n") % f)
677 677 else:
678 678 changes = self.status(match=match)[:5]
679 679 modified, added, removed, deleted, unknown = changes
680 680 commit = modified + added
681 681 remove = removed
682 682 else:
683 683 commit = files
684 684
685 685 if use_dirstate:
686 686 p1, p2 = self.dirstate.parents()
687 687 update_dirstate = True
688 688 else:
689 689 p1, p2 = p1, p2 or nullid
690 690 update_dirstate = (self.dirstate.parents()[0] == p1)
691 691
692 692 c1 = self.changelog.read(p1)
693 693 c2 = self.changelog.read(p2)
694 694 m1 = self.manifest.read(c1[0]).copy()
695 695 m2 = self.manifest.read(c2[0])
696 696
697 697 if use_dirstate:
698 698 branchname = self.workingctx().branch()
699 699 try:
700 700 branchname = branchname.decode('UTF-8').encode('UTF-8')
701 701 except UnicodeDecodeError:
702 702 raise util.Abort(_('branch name not in UTF-8!'))
703 703 else:
704 704 branchname = ""
705 705
706 706 if use_dirstate:
707 707 oldname = c1[5].get("branch") # stored in UTF-8
708 708 if (not commit and not remove and not force and p2 == nullid
709 709 and branchname == oldname):
710 710 self.ui.status(_("nothing changed\n"))
711 711 return None
712 712
713 713 xp1 = hex(p1)
714 714 if p2 == nullid: xp2 = ''
715 715 else: xp2 = hex(p2)
716 716
717 717 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
718 718
719 719 wlock = self.wlock()
720 720 lock = self.lock()
721 721 tr = self.transaction()
722 722 trp = weakref.proxy(tr)
723 723
724 724 # check in files
725 725 new = {}
726 726 linkrev = self.changelog.count()
727 727 commit.sort()
728 728 is_exec = util.execfunc(self.root, m1.execf)
729 729 is_link = util.linkfunc(self.root, m1.linkf)
730 730 for f in commit:
731 731 self.ui.note(f + "\n")
732 732 try:
733 733 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
734 734 new_exec = is_exec(f)
735 735 new_link = is_link(f)
736 736 if not changed or changed[-1] != f:
737 737 # mention the file in the changelog if some
738 738 # flag changed, even if there was no content
739 739 # change.
740 740 old_exec = m1.execf(f)
741 741 old_link = m1.linkf(f)
742 742 if old_exec != new_exec or old_link != new_link:
743 743 changed.append(f)
744 744 m1.set(f, new_exec, new_link)
745 745 except (OSError, IOError):
746 746 if use_dirstate:
747 747 self.ui.warn(_("trouble committing %s!\n") % f)
748 748 raise
749 749 else:
750 750 remove.append(f)
751 751
752 752 # update manifest
753 753 m1.update(new)
754 754 remove.sort()
755 755 removed = []
756 756
757 757 for f in remove:
758 758 if f in m1:
759 759 del m1[f]
760 760 removed.append(f)
761 761 elif f in m2:
762 762 removed.append(f)
763 763 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
764 764 (new, removed))
765 765
766 766 # add changeset
767 767 new = new.keys()
768 768 new.sort()
769 769
770 770 user = user or self.ui.username()
771 771 if not text or force_editor:
772 772 edittext = []
773 773 if text:
774 774 edittext.append(text)
775 775 edittext.append("")
776 776 edittext.append("HG: user: %s" % user)
777 777 if p2 != nullid:
778 778 edittext.append("HG: branch merge")
779 779 if branchname:
780 780 edittext.append("HG: branch %s" % util.tolocal(branchname))
781 781 edittext.extend(["HG: changed %s" % f for f in changed])
782 782 edittext.extend(["HG: removed %s" % f for f in removed])
783 783 if not changed and not remove:
784 784 edittext.append("HG: no files changed")
785 785 edittext.append("")
786 786 # run editor in the repository root
787 787 olddir = os.getcwd()
788 788 os.chdir(self.root)
789 789 text = self.ui.edit("\n".join(edittext), user)
790 790 os.chdir(olddir)
791 791
792 if branchname:
793 extra["branch"] = branchname
794
795 if use_dirstate:
792 796 lines = [line.rstrip() for line in text.rstrip().splitlines()]
793 797 while lines and not lines[0]:
794 798 del lines[0]
795 799 if not lines:
796 800 return None
797 801 text = '\n'.join(lines)
798 if branchname:
799 extra["branch"] = branchname
802
800 803 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
801 804 user, date, extra)
802 805 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
803 806 parent2=xp2)
804 807 tr.close()
805 808
806 809 if self.branchcache and "branch" in extra:
807 810 self.branchcache[util.tolocal(extra["branch"])] = n
808 811
809 812 if use_dirstate or update_dirstate:
810 813 self.dirstate.setparents(n)
811 814 if use_dirstate:
812 815 for f in new:
813 816 self.dirstate.normal(f)
814 817 for f in removed:
815 818 self.dirstate.forget(f)
816 819
817 820 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
818 821 return n
819 822 finally:
820 823 del lock, wlock, tr
821 824
822 825 def walk(self, node=None, files=[], match=util.always, badmatch=None):
823 826 '''
824 827 walk recursively through the directory tree or a given
825 828 changeset, finding all files matched by the match
826 829 function
827 830
828 831 results are yielded in a tuple (src, filename), where src
829 832 is one of:
830 833 'f' the file was found in the directory tree
831 834 'm' the file was only in the dirstate and not in the tree
832 835 'b' file was not found and matched badmatch
833 836 '''
834 837
835 838 if node:
836 839 fdict = dict.fromkeys(files)
837 840 # for dirstate.walk, files=['.'] means "walk the whole tree".
838 841 # follow that here, too
839 842 fdict.pop('.', None)
840 843 mdict = self.manifest.read(self.changelog.read(node)[0])
841 844 mfiles = mdict.keys()
842 845 mfiles.sort()
843 846 for fn in mfiles:
844 847 for ffn in fdict:
845 848 # match if the file is the exact name or a directory
846 849 if ffn == fn or fn.startswith("%s/" % ffn):
847 850 del fdict[ffn]
848 851 break
849 852 if match(fn):
850 853 yield 'm', fn
851 854 ffiles = fdict.keys()
852 855 ffiles.sort()
853 856 for fn in ffiles:
854 857 if badmatch and badmatch(fn):
855 858 if match(fn):
856 859 yield 'b', fn
857 860 else:
858 861 self.ui.warn(_('%s: No such file in rev %s\n')
859 862 % (self.pathto(fn), short(node)))
860 863 else:
861 864 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
862 865 yield src, fn
863 866
864 867 def status(self, node1=None, node2=None, files=[], match=util.always,
865 868 list_ignored=False, list_clean=False):
866 869 """return status of files between two nodes or node and working directory
867 870
868 871 If node1 is None, use the first dirstate parent instead.
869 872 If node2 is None, compare node1 with working directory.
870 873 """
871 874
872 875 def fcmp(fn, getnode):
873 876 t1 = self.wread(fn)
874 877 return self.file(fn).cmp(getnode(fn), t1)
875 878
876 879 def mfmatches(node):
877 880 change = self.changelog.read(node)
878 881 mf = self.manifest.read(change[0]).copy()
879 882 for fn in mf.keys():
880 883 if not match(fn):
881 884 del mf[fn]
882 885 return mf
883 886
884 887 modified, added, removed, deleted, unknown = [], [], [], [], []
885 888 ignored, clean = [], []
886 889
887 890 compareworking = False
888 891 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
889 892 compareworking = True
890 893
891 894 if not compareworking:
892 895 # read the manifest from node1 before the manifest from node2,
893 896 # so that we'll hit the manifest cache if we're going through
894 897 # all the revisions in parent->child order.
895 898 mf1 = mfmatches(node1)
896 899
897 900 # are we comparing the working directory?
898 901 if not node2:
899 902 (lookup, modified, added, removed, deleted, unknown,
900 903 ignored, clean) = self.dirstate.status(files, match,
901 904 list_ignored, list_clean)
902 905
903 906 # are we comparing working dir against its parent?
904 907 if compareworking:
905 908 if lookup:
906 909 fixup = []
907 910 # do a full compare of any files that might have changed
908 911 ctx = self.changectx()
909 912 for f in lookup:
910 913 if f not in ctx or ctx[f].cmp(self.wread(f)):
911 914 modified.append(f)
912 915 else:
913 916 fixup.append(f)
914 917 if list_clean:
915 918 clean.append(f)
916 919
917 920 # update dirstate for files that are actually clean
918 921 if fixup:
919 922 wlock = None
920 923 try:
921 924 try:
922 925 wlock = self.wlock(False)
923 926 except lock.LockException:
924 927 pass
925 928 if wlock:
926 929 for f in fixup:
927 930 self.dirstate.normal(f)
928 931 finally:
929 932 del wlock
930 933 else:
931 934 # we are comparing working dir against non-parent
932 935 # generate a pseudo-manifest for the working dir
933 936 # XXX: create it in dirstate.py ?
934 937 mf2 = mfmatches(self.dirstate.parents()[0])
935 938 is_exec = util.execfunc(self.root, mf2.execf)
936 939 is_link = util.linkfunc(self.root, mf2.linkf)
937 940 for f in lookup + modified + added:
938 941 mf2[f] = ""
939 942 mf2.set(f, is_exec(f), is_link(f))
940 943 for f in removed:
941 944 if f in mf2:
942 945 del mf2[f]
943 946
944 947 else:
945 948 # we are comparing two revisions
946 949 mf2 = mfmatches(node2)
947 950
948 951 if not compareworking:
949 952 # flush lists from dirstate before comparing manifests
950 953 modified, added, clean = [], [], []
951 954
952 955 # make sure to sort the files so we talk to the disk in a
953 956 # reasonable order
954 957 mf2keys = mf2.keys()
955 958 mf2keys.sort()
956 959 getnode = lambda fn: mf1.get(fn, nullid)
957 960 for fn in mf2keys:
958 961 if mf1.has_key(fn):
959 962 if (mf1.flags(fn) != mf2.flags(fn) or
960 963 (mf1[fn] != mf2[fn] and
961 964 (mf2[fn] != "" or fcmp(fn, getnode)))):
962 965 modified.append(fn)
963 966 elif list_clean:
964 967 clean.append(fn)
965 968 del mf1[fn]
966 969 else:
967 970 added.append(fn)
968 971
969 972 removed = mf1.keys()
970 973
971 974 # sort and return results:
972 975 for l in modified, added, removed, deleted, unknown, ignored, clean:
973 976 l.sort()
974 977 return (modified, added, removed, deleted, unknown, ignored, clean)
975 978
976 979 def add(self, list):
977 980 wlock = self.wlock()
978 981 try:
979 982 for f in list:
980 983 p = self.wjoin(f)
981 984 try:
982 985 st = os.lstat(p)
983 986 except:
984 987 self.ui.warn(_("%s does not exist!\n") % f)
985 988 continue
986 989 if st.st_size > 10000000:
987 990 self.ui.warn(_("%s: files over 10MB may cause memory and"
988 991 " performance problems\n"
989 992 "(use 'hg revert %s' to unadd the file)\n")
990 993 % (f, f))
991 994 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
992 995 self.ui.warn(_("%s not added: only files and symlinks "
993 996 "supported currently\n") % f)
994 997 elif self.dirstate[f] in 'an':
995 998 self.ui.warn(_("%s already tracked!\n") % f)
996 999 else:
997 1000 self.dirstate.add(f)
998 1001 finally:
999 1002 del wlock
1000 1003
1001 1004 def forget(self, list):
1002 1005 wlock = self.wlock()
1003 1006 try:
1004 1007 for f in list:
1005 1008 if self.dirstate[f] != 'a':
1006 1009 self.ui.warn(_("%s not added!\n") % f)
1007 1010 else:
1008 1011 self.dirstate.forget(f)
1009 1012 finally:
1010 1013 del wlock
1011 1014
1012 1015 def remove(self, list, unlink=False):
1013 1016 wlock = None
1014 1017 try:
1015 1018 if unlink:
1016 1019 for f in list:
1017 1020 try:
1018 1021 util.unlink(self.wjoin(f))
1019 1022 except OSError, inst:
1020 1023 if inst.errno != errno.ENOENT:
1021 1024 raise
1022 1025 wlock = self.wlock()
1023 1026 for f in list:
1024 1027 if unlink and os.path.exists(self.wjoin(f)):
1025 1028 self.ui.warn(_("%s still exists!\n") % f)
1026 1029 elif self.dirstate[f] == 'a':
1027 1030 self.dirstate.forget(f)
1028 1031 elif f not in self.dirstate:
1029 1032 self.ui.warn(_("%s not tracked!\n") % f)
1030 1033 else:
1031 1034 self.dirstate.remove(f)
1032 1035 finally:
1033 1036 del wlock
1034 1037
1035 1038 def undelete(self, list):
1036 1039 wlock = None
1037 1040 try:
1038 1041 p = self.dirstate.parents()[0]
1039 1042 mn = self.changelog.read(p)[0]
1040 1043 m = self.manifest.read(mn)
1041 1044 wlock = self.wlock()
1042 1045 for f in list:
1043 1046 if self.dirstate[f] != 'r':
1044 1047 self.ui.warn("%s not removed!\n" % f)
1045 1048 else:
1046 1049 t = self.file(f).read(m[f])
1047 1050 self.wwrite(f, t, m.flags(f))
1048 1051 self.dirstate.normal(f)
1049 1052 finally:
1050 1053 del wlock
1051 1054
1052 1055 def copy(self, source, dest):
1053 1056 wlock = None
1054 1057 try:
1055 1058 p = self.wjoin(dest)
1056 1059 if not (os.path.exists(p) or os.path.islink(p)):
1057 1060 self.ui.warn(_("%s does not exist!\n") % dest)
1058 1061 elif not (os.path.isfile(p) or os.path.islink(p)):
1059 1062 self.ui.warn(_("copy failed: %s is not a file or a "
1060 1063 "symbolic link\n") % dest)
1061 1064 else:
1062 1065 wlock = self.wlock()
1063 1066 if dest not in self.dirstate:
1064 1067 self.dirstate.add(dest)
1065 1068 self.dirstate.copy(source, dest)
1066 1069 finally:
1067 1070 del wlock
1068 1071
1069 1072 def heads(self, start=None):
1070 1073 heads = self.changelog.heads(start)
1071 1074 # sort the output in rev descending order
1072 1075 heads = [(-self.changelog.rev(h), h) for h in heads]
1073 1076 heads.sort()
1074 1077 return [n for (r, n) in heads]
1075 1078
1076 1079 def branchheads(self, branch, start=None):
1077 1080 branches = self.branchtags()
1078 1081 if branch not in branches:
1079 1082 return []
1080 1083 # The basic algorithm is this:
1081 1084 #
1082 1085 # Start from the branch tip since there are no later revisions that can
1083 1086 # possibly be in this branch, and the tip is a guaranteed head.
1084 1087 #
1085 1088 # Remember the tip's parents as the first ancestors, since these by
1086 1089 # definition are not heads.
1087 1090 #
1088 1091 # Step backwards from the brach tip through all the revisions. We are
1089 1092 # guaranteed by the rules of Mercurial that we will now be visiting the
1090 1093 # nodes in reverse topological order (children before parents).
1091 1094 #
1092 1095 # If a revision is one of the ancestors of a head then we can toss it
1093 1096 # out of the ancestors set (we've already found it and won't be
1094 1097 # visiting it again) and put its parents in the ancestors set.
1095 1098 #
1096 1099 # Otherwise, if a revision is in the branch it's another head, since it
1097 1100 # wasn't in the ancestor list of an existing head. So add it to the
1098 1101 # head list, and add its parents to the ancestor list.
1099 1102 #
1100 1103 # If it is not in the branch ignore it.
1101 1104 #
1102 1105 # Once we have a list of heads, use nodesbetween to filter out all the
1103 1106 # heads that cannot be reached from startrev. There may be a more
1104 1107 # efficient way to do this as part of the previous algorithm.
1105 1108
1106 1109 set = util.set
1107 1110 heads = [self.changelog.rev(branches[branch])]
1108 1111 # Don't care if ancestors contains nullrev or not.
1109 1112 ancestors = set(self.changelog.parentrevs(heads[0]))
1110 1113 for rev in xrange(heads[0] - 1, nullrev, -1):
1111 1114 if rev in ancestors:
1112 1115 ancestors.update(self.changelog.parentrevs(rev))
1113 1116 ancestors.remove(rev)
1114 1117 elif self.changectx(rev).branch() == branch:
1115 1118 heads.append(rev)
1116 1119 ancestors.update(self.changelog.parentrevs(rev))
1117 1120 heads = [self.changelog.node(rev) for rev in heads]
1118 1121 if start is not None:
1119 1122 heads = self.changelog.nodesbetween([start], heads)[2]
1120 1123 return heads
1121 1124
1122 1125 def branches(self, nodes):
1123 1126 if not nodes:
1124 1127 nodes = [self.changelog.tip()]
1125 1128 b = []
1126 1129 for n in nodes:
1127 1130 t = n
1128 1131 while 1:
1129 1132 p = self.changelog.parents(n)
1130 1133 if p[1] != nullid or p[0] == nullid:
1131 1134 b.append((t, n, p[0], p[1]))
1132 1135 break
1133 1136 n = p[0]
1134 1137 return b
1135 1138
1136 1139 def between(self, pairs):
1137 1140 r = []
1138 1141
1139 1142 for top, bottom in pairs:
1140 1143 n, l, i = top, [], 0
1141 1144 f = 1
1142 1145
1143 1146 while n != bottom:
1144 1147 p = self.changelog.parents(n)[0]
1145 1148 if i == f:
1146 1149 l.append(n)
1147 1150 f = f * 2
1148 1151 n = p
1149 1152 i += 1
1150 1153
1151 1154 r.append(l)
1152 1155
1153 1156 return r
1154 1157
1155 1158 def findincoming(self, remote, base=None, heads=None, force=False):
1156 1159 """Return list of roots of the subsets of missing nodes from remote
1157 1160
1158 1161 If base dict is specified, assume that these nodes and their parents
1159 1162 exist on the remote side and that no child of a node of base exists
1160 1163 in both remote and self.
1161 1164 Furthermore base will be updated to include the nodes that exists
1162 1165 in self and remote but no children exists in self and remote.
1163 1166 If a list of heads is specified, return only nodes which are heads
1164 1167 or ancestors of these heads.
1165 1168
1166 1169 All the ancestors of base are in self and in remote.
1167 1170 All the descendants of the list returned are missing in self.
1168 1171 (and so we know that the rest of the nodes are missing in remote, see
1169 1172 outgoing)
1170 1173 """
1171 1174 m = self.changelog.nodemap
1172 1175 search = []
1173 1176 fetch = {}
1174 1177 seen = {}
1175 1178 seenbranch = {}
1176 1179 if base == None:
1177 1180 base = {}
1178 1181
1179 1182 if not heads:
1180 1183 heads = remote.heads()
1181 1184
1182 1185 if self.changelog.tip() == nullid:
1183 1186 base[nullid] = 1
1184 1187 if heads != [nullid]:
1185 1188 return [nullid]
1186 1189 return []
1187 1190
1188 1191 # assume we're closer to the tip than the root
1189 1192 # and start by examining the heads
1190 1193 self.ui.status(_("searching for changes\n"))
1191 1194
1192 1195 unknown = []
1193 1196 for h in heads:
1194 1197 if h not in m:
1195 1198 unknown.append(h)
1196 1199 else:
1197 1200 base[h] = 1
1198 1201
1199 1202 if not unknown:
1200 1203 return []
1201 1204
1202 1205 req = dict.fromkeys(unknown)
1203 1206 reqcnt = 0
1204 1207
1205 1208 # search through remote branches
1206 1209 # a 'branch' here is a linear segment of history, with four parts:
1207 1210 # head, root, first parent, second parent
1208 1211 # (a branch always has two parents (or none) by definition)
1209 1212 unknown = remote.branches(unknown)
1210 1213 while unknown:
1211 1214 r = []
1212 1215 while unknown:
1213 1216 n = unknown.pop(0)
1214 1217 if n[0] in seen:
1215 1218 continue
1216 1219
1217 1220 self.ui.debug(_("examining %s:%s\n")
1218 1221 % (short(n[0]), short(n[1])))
1219 1222 if n[0] == nullid: # found the end of the branch
1220 1223 pass
1221 1224 elif n in seenbranch:
1222 1225 self.ui.debug(_("branch already found\n"))
1223 1226 continue
1224 1227 elif n[1] and n[1] in m: # do we know the base?
1225 1228 self.ui.debug(_("found incomplete branch %s:%s\n")
1226 1229 % (short(n[0]), short(n[1])))
1227 1230 search.append(n) # schedule branch range for scanning
1228 1231 seenbranch[n] = 1
1229 1232 else:
1230 1233 if n[1] not in seen and n[1] not in fetch:
1231 1234 if n[2] in m and n[3] in m:
1232 1235 self.ui.debug(_("found new changeset %s\n") %
1233 1236 short(n[1]))
1234 1237 fetch[n[1]] = 1 # earliest unknown
1235 1238 for p in n[2:4]:
1236 1239 if p in m:
1237 1240 base[p] = 1 # latest known
1238 1241
1239 1242 for p in n[2:4]:
1240 1243 if p not in req and p not in m:
1241 1244 r.append(p)
1242 1245 req[p] = 1
1243 1246 seen[n[0]] = 1
1244 1247
1245 1248 if r:
1246 1249 reqcnt += 1
1247 1250 self.ui.debug(_("request %d: %s\n") %
1248 1251 (reqcnt, " ".join(map(short, r))))
1249 1252 for p in xrange(0, len(r), 10):
1250 1253 for b in remote.branches(r[p:p+10]):
1251 1254 self.ui.debug(_("received %s:%s\n") %
1252 1255 (short(b[0]), short(b[1])))
1253 1256 unknown.append(b)
1254 1257
1255 1258 # do binary search on the branches we found
1256 1259 while search:
1257 1260 n = search.pop(0)
1258 1261 reqcnt += 1
1259 1262 l = remote.between([(n[0], n[1])])[0]
1260 1263 l.append(n[1])
1261 1264 p = n[0]
1262 1265 f = 1
1263 1266 for i in l:
1264 1267 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1265 1268 if i in m:
1266 1269 if f <= 2:
1267 1270 self.ui.debug(_("found new branch changeset %s\n") %
1268 1271 short(p))
1269 1272 fetch[p] = 1
1270 1273 base[i] = 1
1271 1274 else:
1272 1275 self.ui.debug(_("narrowed branch search to %s:%s\n")
1273 1276 % (short(p), short(i)))
1274 1277 search.append((p, i))
1275 1278 break
1276 1279 p, f = i, f * 2
1277 1280
1278 1281 # sanity check our fetch list
1279 1282 for f in fetch.keys():
1280 1283 if f in m:
1281 1284 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1282 1285
1283 1286 if base.keys() == [nullid]:
1284 1287 if force:
1285 1288 self.ui.warn(_("warning: repository is unrelated\n"))
1286 1289 else:
1287 1290 raise util.Abort(_("repository is unrelated"))
1288 1291
1289 1292 self.ui.debug(_("found new changesets starting at ") +
1290 1293 " ".join([short(f) for f in fetch]) + "\n")
1291 1294
1292 1295 self.ui.debug(_("%d total queries\n") % reqcnt)
1293 1296
1294 1297 return fetch.keys()
1295 1298
1296 1299 def findoutgoing(self, remote, base=None, heads=None, force=False):
1297 1300 """Return list of nodes that are roots of subsets not in remote
1298 1301
1299 1302 If base dict is specified, assume that these nodes and their parents
1300 1303 exist on the remote side.
1301 1304 If a list of heads is specified, return only nodes which are heads
1302 1305 or ancestors of these heads, and return a second element which
1303 1306 contains all remote heads which get new children.
1304 1307 """
1305 1308 if base == None:
1306 1309 base = {}
1307 1310 self.findincoming(remote, base, heads, force=force)
1308 1311
1309 1312 self.ui.debug(_("common changesets up to ")
1310 1313 + " ".join(map(short, base.keys())) + "\n")
1311 1314
1312 1315 remain = dict.fromkeys(self.changelog.nodemap)
1313 1316
1314 1317 # prune everything remote has from the tree
1315 1318 del remain[nullid]
1316 1319 remove = base.keys()
1317 1320 while remove:
1318 1321 n = remove.pop(0)
1319 1322 if n in remain:
1320 1323 del remain[n]
1321 1324 for p in self.changelog.parents(n):
1322 1325 remove.append(p)
1323 1326
1324 1327 # find every node whose parents have been pruned
1325 1328 subset = []
1326 1329 # find every remote head that will get new children
1327 1330 updated_heads = {}
1328 1331 for n in remain:
1329 1332 p1, p2 = self.changelog.parents(n)
1330 1333 if p1 not in remain and p2 not in remain:
1331 1334 subset.append(n)
1332 1335 if heads:
1333 1336 if p1 in heads:
1334 1337 updated_heads[p1] = True
1335 1338 if p2 in heads:
1336 1339 updated_heads[p2] = True
1337 1340
1338 1341 # this is the set of all roots we have to push
1339 1342 if heads:
1340 1343 return subset, updated_heads.keys()
1341 1344 else:
1342 1345 return subset
1343 1346
1344 1347 def pull(self, remote, heads=None, force=False):
1345 1348 lock = self.lock()
1346 1349 try:
1347 1350 fetch = self.findincoming(remote, force=force)
1348 1351 if fetch == [nullid]:
1349 1352 self.ui.status(_("requesting all changes\n"))
1350 1353
1351 1354 if not fetch:
1352 1355 self.ui.status(_("no changes found\n"))
1353 1356 return 0
1354 1357
1355 1358 if heads is None:
1356 1359 cg = remote.changegroup(fetch, 'pull')
1357 1360 else:
1358 1361 if 'changegroupsubset' not in remote.capabilities:
1359 1362 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1360 1363 cg = remote.changegroupsubset(fetch, heads, 'pull')
1361 1364 return self.addchangegroup(cg, 'pull', remote.url())
1362 1365 finally:
1363 1366 del lock
1364 1367
1365 1368 def push(self, remote, force=False, revs=None):
1366 1369 # there are two ways to push to remote repo:
1367 1370 #
1368 1371 # addchangegroup assumes local user can lock remote
1369 1372 # repo (local filesystem, old ssh servers).
1370 1373 #
1371 1374 # unbundle assumes local user cannot lock remote repo (new ssh
1372 1375 # servers, http servers).
1373 1376
1374 1377 if remote.capable('unbundle'):
1375 1378 return self.push_unbundle(remote, force, revs)
1376 1379 return self.push_addchangegroup(remote, force, revs)
1377 1380
1378 1381 def prepush(self, remote, force, revs):
1379 1382 base = {}
1380 1383 remote_heads = remote.heads()
1381 1384 inc = self.findincoming(remote, base, remote_heads, force=force)
1382 1385
1383 1386 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1384 1387 if revs is not None:
1385 1388 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1386 1389 else:
1387 1390 bases, heads = update, self.changelog.heads()
1388 1391
1389 1392 if not bases:
1390 1393 self.ui.status(_("no changes found\n"))
1391 1394 return None, 1
1392 1395 elif not force:
1393 1396 # check if we're creating new remote heads
1394 1397 # to be a remote head after push, node must be either
1395 1398 # - unknown locally
1396 1399 # - a local outgoing head descended from update
1397 1400 # - a remote head that's known locally and not
1398 1401 # ancestral to an outgoing head
1399 1402
1400 1403 warn = 0
1401 1404
1402 1405 if remote_heads == [nullid]:
1403 1406 warn = 0
1404 1407 elif not revs and len(heads) > len(remote_heads):
1405 1408 warn = 1
1406 1409 else:
1407 1410 newheads = list(heads)
1408 1411 for r in remote_heads:
1409 1412 if r in self.changelog.nodemap:
1410 1413 desc = self.changelog.heads(r, heads)
1411 1414 l = [h for h in heads if h in desc]
1412 1415 if not l:
1413 1416 newheads.append(r)
1414 1417 else:
1415 1418 newheads.append(r)
1416 1419 if len(newheads) > len(remote_heads):
1417 1420 warn = 1
1418 1421
1419 1422 if warn:
1420 1423 self.ui.warn(_("abort: push creates new remote branches!\n"))
1421 1424 self.ui.status(_("(did you forget to merge?"
1422 1425 " use push -f to force)\n"))
1423 1426 return None, 1
1424 1427 elif inc:
1425 1428 self.ui.warn(_("note: unsynced remote changes!\n"))
1426 1429
1427 1430
1428 1431 if revs is None:
1429 1432 cg = self.changegroup(update, 'push')
1430 1433 else:
1431 1434 cg = self.changegroupsubset(update, revs, 'push')
1432 1435 return cg, remote_heads
1433 1436
1434 1437 def push_addchangegroup(self, remote, force, revs):
1435 1438 lock = remote.lock()
1436 1439 try:
1437 1440 ret = self.prepush(remote, force, revs)
1438 1441 if ret[0] is not None:
1439 1442 cg, remote_heads = ret
1440 1443 return remote.addchangegroup(cg, 'push', self.url())
1441 1444 return ret[1]
1442 1445 finally:
1443 1446 del lock
1444 1447
1445 1448 def push_unbundle(self, remote, force, revs):
1446 1449 # local repo finds heads on server, finds out what revs it
1447 1450 # must push. once revs transferred, if server finds it has
1448 1451 # different heads (someone else won commit/push race), server
1449 1452 # aborts.
1450 1453
1451 1454 ret = self.prepush(remote, force, revs)
1452 1455 if ret[0] is not None:
1453 1456 cg, remote_heads = ret
1454 1457 if force: remote_heads = ['force']
1455 1458 return remote.unbundle(cg, remote_heads, 'push')
1456 1459 return ret[1]
1457 1460
1458 1461 def changegroupinfo(self, nodes):
1459 1462 self.ui.note(_("%d changesets found\n") % len(nodes))
1460 1463 if self.ui.debugflag:
1461 1464 self.ui.debug(_("List of changesets:\n"))
1462 1465 for node in nodes:
1463 1466 self.ui.debug("%s\n" % hex(node))
1464 1467
1465 1468 def changegroupsubset(self, bases, heads, source):
1466 1469 """This function generates a changegroup consisting of all the nodes
1467 1470 that are descendents of any of the bases, and ancestors of any of
1468 1471 the heads.
1469 1472
1470 1473 It is fairly complex as determining which filenodes and which
1471 1474 manifest nodes need to be included for the changeset to be complete
1472 1475 is non-trivial.
1473 1476
1474 1477 Another wrinkle is doing the reverse, figuring out which changeset in
1475 1478 the changegroup a particular filenode or manifestnode belongs to."""
1476 1479
1477 1480 self.hook('preoutgoing', throw=True, source=source)
1478 1481
1479 1482 # Set up some initial variables
1480 1483 # Make it easy to refer to self.changelog
1481 1484 cl = self.changelog
1482 1485 # msng is short for missing - compute the list of changesets in this
1483 1486 # changegroup.
1484 1487 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1485 1488 self.changegroupinfo(msng_cl_lst)
1486 1489 # Some bases may turn out to be superfluous, and some heads may be
1487 1490 # too. nodesbetween will return the minimal set of bases and heads
1488 1491 # necessary to re-create the changegroup.
1489 1492
1490 1493 # Known heads are the list of heads that it is assumed the recipient
1491 1494 # of this changegroup will know about.
1492 1495 knownheads = {}
1493 1496 # We assume that all parents of bases are known heads.
1494 1497 for n in bases:
1495 1498 for p in cl.parents(n):
1496 1499 if p != nullid:
1497 1500 knownheads[p] = 1
1498 1501 knownheads = knownheads.keys()
1499 1502 if knownheads:
1500 1503 # Now that we know what heads are known, we can compute which
1501 1504 # changesets are known. The recipient must know about all
1502 1505 # changesets required to reach the known heads from the null
1503 1506 # changeset.
1504 1507 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1505 1508 junk = None
1506 1509 # Transform the list into an ersatz set.
1507 1510 has_cl_set = dict.fromkeys(has_cl_set)
1508 1511 else:
1509 1512 # If there were no known heads, the recipient cannot be assumed to
1510 1513 # know about any changesets.
1511 1514 has_cl_set = {}
1512 1515
1513 1516 # Make it easy to refer to self.manifest
1514 1517 mnfst = self.manifest
1515 1518 # We don't know which manifests are missing yet
1516 1519 msng_mnfst_set = {}
1517 1520 # Nor do we know which filenodes are missing.
1518 1521 msng_filenode_set = {}
1519 1522
1520 1523 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1521 1524 junk = None
1522 1525
1523 1526 # A changeset always belongs to itself, so the changenode lookup
1524 1527 # function for a changenode is identity.
1525 1528 def identity(x):
1526 1529 return x
1527 1530
1528 1531 # A function generating function. Sets up an environment for the
1529 1532 # inner function.
1530 1533 def cmp_by_rev_func(revlog):
1531 1534 # Compare two nodes by their revision number in the environment's
1532 1535 # revision history. Since the revision number both represents the
1533 1536 # most efficient order to read the nodes in, and represents a
1534 1537 # topological sorting of the nodes, this function is often useful.
1535 1538 def cmp_by_rev(a, b):
1536 1539 return cmp(revlog.rev(a), revlog.rev(b))
1537 1540 return cmp_by_rev
1538 1541
1539 1542 # If we determine that a particular file or manifest node must be a
1540 1543 # node that the recipient of the changegroup will already have, we can
1541 1544 # also assume the recipient will have all the parents. This function
1542 1545 # prunes them from the set of missing nodes.
1543 1546 def prune_parents(revlog, hasset, msngset):
1544 1547 haslst = hasset.keys()
1545 1548 haslst.sort(cmp_by_rev_func(revlog))
1546 1549 for node in haslst:
1547 1550 parentlst = [p for p in revlog.parents(node) if p != nullid]
1548 1551 while parentlst:
1549 1552 n = parentlst.pop()
1550 1553 if n not in hasset:
1551 1554 hasset[n] = 1
1552 1555 p = [p for p in revlog.parents(n) if p != nullid]
1553 1556 parentlst.extend(p)
1554 1557 for n in hasset:
1555 1558 msngset.pop(n, None)
1556 1559
1557 1560 # This is a function generating function used to set up an environment
1558 1561 # for the inner function to execute in.
1559 1562 def manifest_and_file_collector(changedfileset):
1560 1563 # This is an information gathering function that gathers
1561 1564 # information from each changeset node that goes out as part of
1562 1565 # the changegroup. The information gathered is a list of which
1563 1566 # manifest nodes are potentially required (the recipient may
1564 1567 # already have them) and total list of all files which were
1565 1568 # changed in any changeset in the changegroup.
1566 1569 #
1567 1570 # We also remember the first changenode we saw any manifest
1568 1571 # referenced by so we can later determine which changenode 'owns'
1569 1572 # the manifest.
1570 1573 def collect_manifests_and_files(clnode):
1571 1574 c = cl.read(clnode)
1572 1575 for f in c[3]:
1573 1576 # This is to make sure we only have one instance of each
1574 1577 # filename string for each filename.
1575 1578 changedfileset.setdefault(f, f)
1576 1579 msng_mnfst_set.setdefault(c[0], clnode)
1577 1580 return collect_manifests_and_files
1578 1581
1579 1582 # Figure out which manifest nodes (of the ones we think might be part
1580 1583 # of the changegroup) the recipient must know about and remove them
1581 1584 # from the changegroup.
1582 1585 def prune_manifests():
1583 1586 has_mnfst_set = {}
1584 1587 for n in msng_mnfst_set:
1585 1588 # If a 'missing' manifest thinks it belongs to a changenode
1586 1589 # the recipient is assumed to have, obviously the recipient
1587 1590 # must have that manifest.
1588 1591 linknode = cl.node(mnfst.linkrev(n))
1589 1592 if linknode in has_cl_set:
1590 1593 has_mnfst_set[n] = 1
1591 1594 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1592 1595
1593 1596 # Use the information collected in collect_manifests_and_files to say
1594 1597 # which changenode any manifestnode belongs to.
1595 1598 def lookup_manifest_link(mnfstnode):
1596 1599 return msng_mnfst_set[mnfstnode]
1597 1600
1598 1601 # A function generating function that sets up the initial environment
1599 1602 # the inner function.
1600 1603 def filenode_collector(changedfiles):
1601 1604 next_rev = [0]
1602 1605 # This gathers information from each manifestnode included in the
1603 1606 # changegroup about which filenodes the manifest node references
1604 1607 # so we can include those in the changegroup too.
1605 1608 #
1606 1609 # It also remembers which changenode each filenode belongs to. It
1607 1610 # does this by assuming the a filenode belongs to the changenode
1608 1611 # the first manifest that references it belongs to.
1609 1612 def collect_msng_filenodes(mnfstnode):
1610 1613 r = mnfst.rev(mnfstnode)
1611 1614 if r == next_rev[0]:
1612 1615 # If the last rev we looked at was the one just previous,
1613 1616 # we only need to see a diff.
1614 1617 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1615 1618 # For each line in the delta
1616 1619 for dline in delta.splitlines():
1617 1620 # get the filename and filenode for that line
1618 1621 f, fnode = dline.split('\0')
1619 1622 fnode = bin(fnode[:40])
1620 1623 f = changedfiles.get(f, None)
1621 1624 # And if the file is in the list of files we care
1622 1625 # about.
1623 1626 if f is not None:
1624 1627 # Get the changenode this manifest belongs to
1625 1628 clnode = msng_mnfst_set[mnfstnode]
1626 1629 # Create the set of filenodes for the file if
1627 1630 # there isn't one already.
1628 1631 ndset = msng_filenode_set.setdefault(f, {})
1629 1632 # And set the filenode's changelog node to the
1630 1633 # manifest's if it hasn't been set already.
1631 1634 ndset.setdefault(fnode, clnode)
1632 1635 else:
1633 1636 # Otherwise we need a full manifest.
1634 1637 m = mnfst.read(mnfstnode)
1635 1638 # For every file in we care about.
1636 1639 for f in changedfiles:
1637 1640 fnode = m.get(f, None)
1638 1641 # If it's in the manifest
1639 1642 if fnode is not None:
1640 1643 # See comments above.
1641 1644 clnode = msng_mnfst_set[mnfstnode]
1642 1645 ndset = msng_filenode_set.setdefault(f, {})
1643 1646 ndset.setdefault(fnode, clnode)
1644 1647 # Remember the revision we hope to see next.
1645 1648 next_rev[0] = r + 1
1646 1649 return collect_msng_filenodes
1647 1650
1648 1651 # We have a list of filenodes we think we need for a file, lets remove
1649 1652 # all those we now the recipient must have.
1650 1653 def prune_filenodes(f, filerevlog):
1651 1654 msngset = msng_filenode_set[f]
1652 1655 hasset = {}
1653 1656 # If a 'missing' filenode thinks it belongs to a changenode we
1654 1657 # assume the recipient must have, then the recipient must have
1655 1658 # that filenode.
1656 1659 for n in msngset:
1657 1660 clnode = cl.node(filerevlog.linkrev(n))
1658 1661 if clnode in has_cl_set:
1659 1662 hasset[n] = 1
1660 1663 prune_parents(filerevlog, hasset, msngset)
1661 1664
1662 1665 # A function generator function that sets up the a context for the
1663 1666 # inner function.
1664 1667 def lookup_filenode_link_func(fname):
1665 1668 msngset = msng_filenode_set[fname]
1666 1669 # Lookup the changenode the filenode belongs to.
1667 1670 def lookup_filenode_link(fnode):
1668 1671 return msngset[fnode]
1669 1672 return lookup_filenode_link
1670 1673
1671 1674 # Now that we have all theses utility functions to help out and
1672 1675 # logically divide up the task, generate the group.
1673 1676 def gengroup():
1674 1677 # The set of changed files starts empty.
1675 1678 changedfiles = {}
1676 1679 # Create a changenode group generator that will call our functions
1677 1680 # back to lookup the owning changenode and collect information.
1678 1681 group = cl.group(msng_cl_lst, identity,
1679 1682 manifest_and_file_collector(changedfiles))
1680 1683 for chnk in group:
1681 1684 yield chnk
1682 1685
1683 1686 # The list of manifests has been collected by the generator
1684 1687 # calling our functions back.
1685 1688 prune_manifests()
1686 1689 msng_mnfst_lst = msng_mnfst_set.keys()
1687 1690 # Sort the manifestnodes by revision number.
1688 1691 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1689 1692 # Create a generator for the manifestnodes that calls our lookup
1690 1693 # and data collection functions back.
1691 1694 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1692 1695 filenode_collector(changedfiles))
1693 1696 for chnk in group:
1694 1697 yield chnk
1695 1698
1696 1699 # These are no longer needed, dereference and toss the memory for
1697 1700 # them.
1698 1701 msng_mnfst_lst = None
1699 1702 msng_mnfst_set.clear()
1700 1703
1701 1704 changedfiles = changedfiles.keys()
1702 1705 changedfiles.sort()
1703 1706 # Go through all our files in order sorted by name.
1704 1707 for fname in changedfiles:
1705 1708 filerevlog = self.file(fname)
1706 1709 # Toss out the filenodes that the recipient isn't really
1707 1710 # missing.
1708 1711 if msng_filenode_set.has_key(fname):
1709 1712 prune_filenodes(fname, filerevlog)
1710 1713 msng_filenode_lst = msng_filenode_set[fname].keys()
1711 1714 else:
1712 1715 msng_filenode_lst = []
1713 1716 # If any filenodes are left, generate the group for them,
1714 1717 # otherwise don't bother.
1715 1718 if len(msng_filenode_lst) > 0:
1716 1719 yield changegroup.genchunk(fname)
1717 1720 # Sort the filenodes by their revision #
1718 1721 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1719 1722 # Create a group generator and only pass in a changenode
1720 1723 # lookup function as we need to collect no information
1721 1724 # from filenodes.
1722 1725 group = filerevlog.group(msng_filenode_lst,
1723 1726 lookup_filenode_link_func(fname))
1724 1727 for chnk in group:
1725 1728 yield chnk
1726 1729 if msng_filenode_set.has_key(fname):
1727 1730 # Don't need this anymore, toss it to free memory.
1728 1731 del msng_filenode_set[fname]
1729 1732 # Signal that no more groups are left.
1730 1733 yield changegroup.closechunk()
1731 1734
1732 1735 if msng_cl_lst:
1733 1736 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1734 1737
1735 1738 return util.chunkbuffer(gengroup())
1736 1739
1737 1740 def changegroup(self, basenodes, source):
1738 1741 """Generate a changegroup of all nodes that we have that a recipient
1739 1742 doesn't.
1740 1743
1741 1744 This is much easier than the previous function as we can assume that
1742 1745 the recipient has any changenode we aren't sending them."""
1743 1746
1744 1747 self.hook('preoutgoing', throw=True, source=source)
1745 1748
1746 1749 cl = self.changelog
1747 1750 nodes = cl.nodesbetween(basenodes, None)[0]
1748 1751 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1749 1752 self.changegroupinfo(nodes)
1750 1753
1751 1754 def identity(x):
1752 1755 return x
1753 1756
1754 1757 def gennodelst(revlog):
1755 1758 for r in xrange(0, revlog.count()):
1756 1759 n = revlog.node(r)
1757 1760 if revlog.linkrev(n) in revset:
1758 1761 yield n
1759 1762
1760 1763 def changed_file_collector(changedfileset):
1761 1764 def collect_changed_files(clnode):
1762 1765 c = cl.read(clnode)
1763 1766 for fname in c[3]:
1764 1767 changedfileset[fname] = 1
1765 1768 return collect_changed_files
1766 1769
1767 1770 def lookuprevlink_func(revlog):
1768 1771 def lookuprevlink(n):
1769 1772 return cl.node(revlog.linkrev(n))
1770 1773 return lookuprevlink
1771 1774
1772 1775 def gengroup():
1773 1776 # construct a list of all changed files
1774 1777 changedfiles = {}
1775 1778
1776 1779 for chnk in cl.group(nodes, identity,
1777 1780 changed_file_collector(changedfiles)):
1778 1781 yield chnk
1779 1782 changedfiles = changedfiles.keys()
1780 1783 changedfiles.sort()
1781 1784
1782 1785 mnfst = self.manifest
1783 1786 nodeiter = gennodelst(mnfst)
1784 1787 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1785 1788 yield chnk
1786 1789
1787 1790 for fname in changedfiles:
1788 1791 filerevlog = self.file(fname)
1789 1792 nodeiter = gennodelst(filerevlog)
1790 1793 nodeiter = list(nodeiter)
1791 1794 if nodeiter:
1792 1795 yield changegroup.genchunk(fname)
1793 1796 lookup = lookuprevlink_func(filerevlog)
1794 1797 for chnk in filerevlog.group(nodeiter, lookup):
1795 1798 yield chnk
1796 1799
1797 1800 yield changegroup.closechunk()
1798 1801
1799 1802 if nodes:
1800 1803 self.hook('outgoing', node=hex(nodes[0]), source=source)
1801 1804
1802 1805 return util.chunkbuffer(gengroup())
1803 1806
1804 1807 def addchangegroup(self, source, srctype, url):
1805 1808 """add changegroup to repo.
1806 1809
1807 1810 return values:
1808 1811 - nothing changed or no source: 0
1809 1812 - more heads than before: 1+added heads (2..n)
1810 1813 - less heads than before: -1-removed heads (-2..-n)
1811 1814 - number of heads stays the same: 1
1812 1815 """
1813 1816 def csmap(x):
1814 1817 self.ui.debug(_("add changeset %s\n") % short(x))
1815 1818 return cl.count()
1816 1819
1817 1820 def revmap(x):
1818 1821 return cl.rev(x)
1819 1822
1820 1823 if not source:
1821 1824 return 0
1822 1825
1823 1826 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1824 1827
1825 1828 changesets = files = revisions = 0
1826 1829
1827 1830 # write changelog data to temp files so concurrent readers will not see
1828 1831 # inconsistent view
1829 1832 cl = self.changelog
1830 1833 cl.delayupdate()
1831 1834 oldheads = len(cl.heads())
1832 1835
1833 1836 tr = self.transaction()
1834 1837 try:
1835 1838 trp = weakref.proxy(tr)
1836 1839 # pull off the changeset group
1837 1840 self.ui.status(_("adding changesets\n"))
1838 1841 cor = cl.count() - 1
1839 1842 chunkiter = changegroup.chunkiter(source)
1840 1843 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1841 1844 raise util.Abort(_("received changelog group is empty"))
1842 1845 cnr = cl.count() - 1
1843 1846 changesets = cnr - cor
1844 1847
1845 1848 # pull off the manifest group
1846 1849 self.ui.status(_("adding manifests\n"))
1847 1850 chunkiter = changegroup.chunkiter(source)
1848 1851 # no need to check for empty manifest group here:
1849 1852 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1850 1853 # no new manifest will be created and the manifest group will
1851 1854 # be empty during the pull
1852 1855 self.manifest.addgroup(chunkiter, revmap, trp)
1853 1856
1854 1857 # process the files
1855 1858 self.ui.status(_("adding file changes\n"))
1856 1859 while 1:
1857 1860 f = changegroup.getchunk(source)
1858 1861 if not f:
1859 1862 break
1860 1863 self.ui.debug(_("adding %s revisions\n") % f)
1861 1864 fl = self.file(f)
1862 1865 o = fl.count()
1863 1866 chunkiter = changegroup.chunkiter(source)
1864 1867 if fl.addgroup(chunkiter, revmap, trp) is None:
1865 1868 raise util.Abort(_("received file revlog group is empty"))
1866 1869 revisions += fl.count() - o
1867 1870 files += 1
1868 1871
1869 1872 # make changelog see real files again
1870 1873 cl.finalize(trp)
1871 1874
1872 1875 newheads = len(self.changelog.heads())
1873 1876 heads = ""
1874 1877 if oldheads and newheads != oldheads:
1875 1878 heads = _(" (%+d heads)") % (newheads - oldheads)
1876 1879
1877 1880 self.ui.status(_("added %d changesets"
1878 1881 " with %d changes to %d files%s\n")
1879 1882 % (changesets, revisions, files, heads))
1880 1883
1881 1884 if changesets > 0:
1882 1885 self.hook('pretxnchangegroup', throw=True,
1883 1886 node=hex(self.changelog.node(cor+1)), source=srctype,
1884 1887 url=url)
1885 1888
1886 1889 tr.close()
1887 1890 finally:
1888 1891 del tr
1889 1892
1890 1893 if changesets > 0:
1891 1894 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1892 1895 source=srctype, url=url)
1893 1896
1894 1897 for i in xrange(cor + 1, cnr + 1):
1895 1898 self.hook("incoming", node=hex(self.changelog.node(i)),
1896 1899 source=srctype, url=url)
1897 1900
1898 1901 # never return 0 here:
1899 1902 if newheads < oldheads:
1900 1903 return newheads - oldheads - 1
1901 1904 else:
1902 1905 return newheads - oldheads + 1
1903 1906
1904 1907
1905 1908 def stream_in(self, remote):
1906 1909 fp = remote.stream_out()
1907 1910 l = fp.readline()
1908 1911 try:
1909 1912 resp = int(l)
1910 1913 except ValueError:
1911 1914 raise util.UnexpectedOutput(
1912 1915 _('Unexpected response from remote server:'), l)
1913 1916 if resp == 1:
1914 1917 raise util.Abort(_('operation forbidden by server'))
1915 1918 elif resp == 2:
1916 1919 raise util.Abort(_('locking the remote repository failed'))
1917 1920 elif resp != 0:
1918 1921 raise util.Abort(_('the server sent an unknown error code'))
1919 1922 self.ui.status(_('streaming all changes\n'))
1920 1923 l = fp.readline()
1921 1924 try:
1922 1925 total_files, total_bytes = map(int, l.split(' ', 1))
1923 1926 except ValueError, TypeError:
1924 1927 raise util.UnexpectedOutput(
1925 1928 _('Unexpected response from remote server:'), l)
1926 1929 self.ui.status(_('%d files to transfer, %s of data\n') %
1927 1930 (total_files, util.bytecount(total_bytes)))
1928 1931 start = time.time()
1929 1932 for i in xrange(total_files):
1930 1933 # XXX doesn't support '\n' or '\r' in filenames
1931 1934 l = fp.readline()
1932 1935 try:
1933 1936 name, size = l.split('\0', 1)
1934 1937 size = int(size)
1935 1938 except ValueError, TypeError:
1936 1939 raise util.UnexpectedOutput(
1937 1940 _('Unexpected response from remote server:'), l)
1938 1941 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1939 1942 ofp = self.sopener(name, 'w')
1940 1943 for chunk in util.filechunkiter(fp, limit=size):
1941 1944 ofp.write(chunk)
1942 1945 ofp.close()
1943 1946 elapsed = time.time() - start
1944 1947 if elapsed <= 0:
1945 1948 elapsed = 0.001
1946 1949 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1947 1950 (util.bytecount(total_bytes), elapsed,
1948 1951 util.bytecount(total_bytes / elapsed)))
1949 1952 self.invalidate()
1950 1953 return len(self.heads()) + 1
1951 1954
1952 1955 def clone(self, remote, heads=[], stream=False):
1953 1956 '''clone remote repository.
1954 1957
1955 1958 keyword arguments:
1956 1959 heads: list of revs to clone (forces use of pull)
1957 1960 stream: use streaming clone if possible'''
1958 1961
1959 1962 # now, all clients that can request uncompressed clones can
1960 1963 # read repo formats supported by all servers that can serve
1961 1964 # them.
1962 1965
1963 1966 # if revlog format changes, client will have to check version
1964 1967 # and format flags on "stream" capability, and use
1965 1968 # uncompressed only if compatible.
1966 1969
1967 1970 if stream and not heads and remote.capable('stream'):
1968 1971 return self.stream_in(remote)
1969 1972 return self.pull(remote, heads)
1970 1973
1971 1974 # used to avoid circular references so destructors work
1972 1975 def aftertrans(files):
1973 1976 renamefiles = [tuple(t) for t in files]
1974 1977 def a():
1975 1978 for src, dest in renamefiles:
1976 1979 util.rename(src, dest)
1977 1980 return a
1978 1981
1979 1982 def instance(ui, path, create):
1980 1983 return localrepository(ui, util.drop_scheme('file', path), create)
1981 1984
1982 1985 def islocal(path):
1983 1986 return True
General Comments 0
You need to be logged in to leave comments. Login now