##// END OF EJS Templates
simplify dirstate fixups in repo.status()
Matt Mackall -
r4912:312c845e default
parent child Browse files
Show More
@@ -1,1947 +1,1952 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.path = path
24 24 self.root = os.path.realpath(path)
25 25 self.path = os.path.join(self.root, ".hg")
26 26 self.origroot = path
27 27 self.opener = util.opener(self.path)
28 28 self.wopener = util.opener(self.root)
29 29
30 30 if not os.path.isdir(self.path):
31 31 if create:
32 32 if not os.path.exists(path):
33 33 os.mkdir(path)
34 34 os.mkdir(self.path)
35 35 requirements = ["revlogv1"]
36 36 if parentui.configbool('format', 'usestore', True):
37 37 os.mkdir(os.path.join(self.path, "store"))
38 38 requirements.append("store")
39 39 # create an invalid changelog
40 40 self.opener("00changelog.i", "a").write(
41 41 '\0\0\0\2' # represents revlogv2
42 42 ' dummy changelog to prevent using the old repo layout'
43 43 )
44 44 reqfile = self.opener("requires", "w")
45 45 for r in requirements:
46 46 reqfile.write("%s\n" % r)
47 47 reqfile.close()
48 48 else:
49 49 raise repo.RepoError(_("repository %s not found") % path)
50 50 elif create:
51 51 raise repo.RepoError(_("repository %s already exists") % path)
52 52 else:
53 53 # find requirements
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 requirements = []
60 60 # check them
61 61 for r in requirements:
62 62 if r not in self.supported:
63 63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64 64
65 65 # setup store
66 66 if "store" in requirements:
67 67 self.encodefn = util.encodefilename
68 68 self.decodefn = util.decodefilename
69 69 self.spath = os.path.join(self.path, "store")
70 70 else:
71 71 self.encodefn = lambda x: x
72 72 self.decodefn = lambda x: x
73 73 self.spath = self.path
74 74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75 75
76 76 self.ui = ui.ui(parentui=parentui)
77 77 try:
78 78 self.ui.readconfig(self.join("hgrc"), self.root)
79 79 extensions.loadall(self.ui)
80 80 except IOError:
81 81 pass
82 82
83 83 self.tagscache = None
84 84 self.branchcache = None
85 85 self.nodetagscache = None
86 86 self.filterpats = {}
87 87 self.transhandle = None
88 88
89 89 def __getattr__(self, name):
90 90 if name == 'changelog':
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 return self.changelog
94 94 if name == 'manifest':
95 95 self.changelog
96 96 self.manifest = manifest.manifest(self.sopener)
97 97 return self.manifest
98 98 if name == 'dirstate':
99 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 100 return self.dirstate
101 101 else:
102 102 raise AttributeError, name
103 103
104 104 def url(self):
105 105 return 'file:' + self.root
106 106
107 107 def hook(self, name, throw=False, **args):
108 108 return hook.hook(self.ui, self, name, throw, **args)
109 109
110 110 tag_disallowed = ':\r\n'
111 111
112 112 def _tag(self, name, node, message, local, user, date, parent=None,
113 113 extra={}):
114 114 use_dirstate = parent is None
115 115
116 116 for c in self.tag_disallowed:
117 117 if c in name:
118 118 raise util.Abort(_('%r cannot be used in a tag name') % c)
119 119
120 120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
121 121
122 122 def writetag(fp, name, munge, prevtags):
123 123 if prevtags and prevtags[-1] != '\n':
124 124 fp.write('\n')
125 125 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
126 126 fp.close()
127 127 self.hook('tag', node=hex(node), tag=name, local=local)
128 128
129 129 prevtags = ''
130 130 if local:
131 131 try:
132 132 fp = self.opener('localtags', 'r+')
133 133 except IOError, err:
134 134 fp = self.opener('localtags', 'a')
135 135 else:
136 136 prevtags = fp.read()
137 137
138 138 # local tags are stored in the current charset
139 139 writetag(fp, name, None, prevtags)
140 140 return
141 141
142 142 if use_dirstate:
143 143 try:
144 144 fp = self.wfile('.hgtags', 'rb+')
145 145 except IOError, err:
146 146 fp = self.wfile('.hgtags', 'ab')
147 147 else:
148 148 prevtags = fp.read()
149 149 else:
150 150 try:
151 151 prevtags = self.filectx('.hgtags', parent).data()
152 152 except revlog.LookupError:
153 153 pass
154 154 fp = self.wfile('.hgtags', 'wb')
155 155
156 156 # committed tags are stored in UTF-8
157 157 writetag(fp, name, util.fromlocal, prevtags)
158 158
159 159 if use_dirstate and '.hgtags' not in self.dirstate:
160 160 self.add(['.hgtags'])
161 161
162 162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 163 extra=extra)
164 164
165 165 self.hook('tag', node=hex(node), tag=name, local=local)
166 166
167 167 return tagnode
168 168
169 169 def tag(self, name, node, message, local, user, date):
170 170 '''tag a revision with a symbolic name.
171 171
172 172 if local is True, the tag is stored in a per-repository file.
173 173 otherwise, it is stored in the .hgtags file, and a new
174 174 changeset is committed with the change.
175 175
176 176 keyword arguments:
177 177
178 178 local: whether to store tag in non-version-controlled file
179 179 (default False)
180 180
181 181 message: commit message to use if committing
182 182
183 183 user: name of user to use if committing
184 184
185 185 date: date tuple to use if committing'''
186 186
187 187 for x in self.status()[:5]:
188 188 if '.hgtags' in x:
189 189 raise util.Abort(_('working copy of .hgtags is changed '
190 190 '(please commit .hgtags manually)'))
191 191
192 192
193 193 self._tag(name, node, message, local, user, date)
194 194
195 195 def tags(self):
196 196 '''return a mapping of tag to node'''
197 197 if self.tagscache:
198 198 return self.tagscache
199 199
200 200 globaltags = {}
201 201
202 202 def readtags(lines, fn):
203 203 filetags = {}
204 204 count = 0
205 205
206 206 def warn(msg):
207 207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208 208
209 209 for l in lines:
210 210 count += 1
211 211 if not l:
212 212 continue
213 213 s = l.split(" ", 1)
214 214 if len(s) != 2:
215 215 warn(_("cannot parse entry"))
216 216 continue
217 217 node, key = s
218 218 key = util.tolocal(key.strip()) # stored in UTF-8
219 219 try:
220 220 bin_n = bin(node)
221 221 except TypeError:
222 222 warn(_("node '%s' is not well formed") % node)
223 223 continue
224 224 if bin_n not in self.changelog.nodemap:
225 225 warn(_("tag '%s' refers to unknown node") % key)
226 226 continue
227 227
228 228 h = []
229 229 if key in filetags:
230 230 n, h = filetags[key]
231 231 h.append(n)
232 232 filetags[key] = (bin_n, h)
233 233
234 234 for k, nh in filetags.items():
235 235 if k not in globaltags:
236 236 globaltags[k] = nh
237 237 continue
238 238 # we prefer the global tag if:
239 239 # it supercedes us OR
240 240 # mutual supercedes and it has a higher rank
241 241 # otherwise we win because we're tip-most
242 242 an, ah = nh
243 243 bn, bh = globaltags[k]
244 244 if (bn != an and an in bh and
245 245 (bn not in ah or len(bh) > len(ah))):
246 246 an = bn
247 247 ah.extend([n for n in bh if n not in ah])
248 248 globaltags[k] = an, ah
249 249
250 250 # read the tags file from each head, ending with the tip
251 251 f = None
252 252 for rev, node, fnode in self._hgtagsnodes():
253 253 f = (f and f.filectx(fnode) or
254 254 self.filectx('.hgtags', fileid=fnode))
255 255 readtags(f.data().splitlines(), f)
256 256
257 257 try:
258 258 data = util.fromlocal(self.opener("localtags").read())
259 259 # localtags are stored in the local character set
260 260 # while the internal tag table is stored in UTF-8
261 261 readtags(data.splitlines(), "localtags")
262 262 except IOError:
263 263 pass
264 264
265 265 self.tagscache = {}
266 266 for k,nh in globaltags.items():
267 267 n = nh[0]
268 268 if n != nullid:
269 269 self.tagscache[k] = n
270 270 self.tagscache['tip'] = self.changelog.tip()
271 271
272 272 return self.tagscache
273 273
274 274 def _hgtagsnodes(self):
275 275 heads = self.heads()
276 276 heads.reverse()
277 277 last = {}
278 278 ret = []
279 279 for node in heads:
280 280 c = self.changectx(node)
281 281 rev = c.rev()
282 282 try:
283 283 fnode = c.filenode('.hgtags')
284 284 except revlog.LookupError:
285 285 continue
286 286 ret.append((rev, node, fnode))
287 287 if fnode in last:
288 288 ret[last[fnode]] = None
289 289 last[fnode] = len(ret) - 1
290 290 return [item for item in ret if item]
291 291
292 292 def tagslist(self):
293 293 '''return a list of tags ordered by revision'''
294 294 l = []
295 295 for t, n in self.tags().items():
296 296 try:
297 297 r = self.changelog.rev(n)
298 298 except:
299 299 r = -2 # sort to the beginning of the list if unknown
300 300 l.append((r, t, n))
301 301 l.sort()
302 302 return [(t, n) for r, t, n in l]
303 303
304 304 def nodetags(self, node):
305 305 '''return the tags associated with a node'''
306 306 if not self.nodetagscache:
307 307 self.nodetagscache = {}
308 308 for t, n in self.tags().items():
309 309 self.nodetagscache.setdefault(n, []).append(t)
310 310 return self.nodetagscache.get(node, [])
311 311
312 312 def _branchtags(self):
313 313 partial, last, lrev = self._readbranchcache()
314 314
315 315 tiprev = self.changelog.count() - 1
316 316 if lrev != tiprev:
317 317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 319
320 320 return partial
321 321
322 322 def branchtags(self):
323 323 if self.branchcache is not None:
324 324 return self.branchcache
325 325
326 326 self.branchcache = {} # avoid recursion in changectx
327 327 partial = self._branchtags()
328 328
329 329 # the branch cache is stored on disk as UTF-8, but in the local
330 330 # charset internally
331 331 for k, v in partial.items():
332 332 self.branchcache[util.tolocal(k)] = v
333 333 return self.branchcache
334 334
335 335 def _readbranchcache(self):
336 336 partial = {}
337 337 try:
338 338 f = self.opener("branch.cache")
339 339 lines = f.read().split('\n')
340 340 f.close()
341 341 except (IOError, OSError):
342 342 return {}, nullid, nullrev
343 343
344 344 try:
345 345 last, lrev = lines.pop(0).split(" ", 1)
346 346 last, lrev = bin(last), int(lrev)
347 347 if not (lrev < self.changelog.count() and
348 348 self.changelog.node(lrev) == last): # sanity check
349 349 # invalidate the cache
350 350 raise ValueError('Invalid branch cache: unknown tip')
351 351 for l in lines:
352 352 if not l: continue
353 353 node, label = l.split(" ", 1)
354 354 partial[label.strip()] = bin(node)
355 355 except (KeyboardInterrupt, util.SignalInterrupt):
356 356 raise
357 357 except Exception, inst:
358 358 if self.ui.debugflag:
359 359 self.ui.warn(str(inst), '\n')
360 360 partial, last, lrev = {}, nullid, nullrev
361 361 return partial, last, lrev
362 362
363 363 def _writebranchcache(self, branches, tip, tiprev):
364 364 try:
365 365 f = self.opener("branch.cache", "w", atomictemp=True)
366 366 f.write("%s %s\n" % (hex(tip), tiprev))
367 367 for label, node in branches.iteritems():
368 368 f.write("%s %s\n" % (hex(node), label))
369 369 f.rename()
370 370 except (IOError, OSError):
371 371 pass
372 372
373 373 def _updatebranchcache(self, partial, start, end):
374 374 for r in xrange(start, end):
375 375 c = self.changectx(r)
376 376 b = c.branch()
377 377 partial[b] = c.node()
378 378
379 379 def lookup(self, key):
380 380 if key == '.':
381 381 key, second = self.dirstate.parents()
382 382 if key == nullid:
383 383 raise repo.RepoError(_("no revision checked out"))
384 384 if second != nullid:
385 385 self.ui.warn(_("warning: working directory has two parents, "
386 386 "tag '.' uses the first\n"))
387 387 elif key == 'null':
388 388 return nullid
389 389 n = self.changelog._match(key)
390 390 if n:
391 391 return n
392 392 if key in self.tags():
393 393 return self.tags()[key]
394 394 if key in self.branchtags():
395 395 return self.branchtags()[key]
396 396 n = self.changelog._partialmatch(key)
397 397 if n:
398 398 return n
399 399 raise repo.RepoError(_("unknown revision '%s'") % key)
400 400
401 401 def dev(self):
402 402 return os.lstat(self.path).st_dev
403 403
404 404 def local(self):
405 405 return True
406 406
407 407 def join(self, f):
408 408 return os.path.join(self.path, f)
409 409
410 410 def sjoin(self, f):
411 411 f = self.encodefn(f)
412 412 return os.path.join(self.spath, f)
413 413
414 414 def wjoin(self, f):
415 415 return os.path.join(self.root, f)
416 416
417 417 def file(self, f):
418 418 if f[0] == '/':
419 419 f = f[1:]
420 420 return filelog.filelog(self.sopener, f)
421 421
422 422 def changectx(self, changeid=None):
423 423 return context.changectx(self, changeid)
424 424
425 425 def workingctx(self):
426 426 return context.workingctx(self)
427 427
428 428 def parents(self, changeid=None):
429 429 '''
430 430 get list of changectxs for parents of changeid or working directory
431 431 '''
432 432 if changeid is None:
433 433 pl = self.dirstate.parents()
434 434 else:
435 435 n = self.changelog.lookup(changeid)
436 436 pl = self.changelog.parents(n)
437 437 if pl[1] == nullid:
438 438 return [self.changectx(pl[0])]
439 439 return [self.changectx(pl[0]), self.changectx(pl[1])]
440 440
441 441 def filectx(self, path, changeid=None, fileid=None):
442 442 """changeid can be a changeset revision, node, or tag.
443 443 fileid can be a file revision or node."""
444 444 return context.filectx(self, path, changeid, fileid)
445 445
446 446 def getcwd(self):
447 447 return self.dirstate.getcwd()
448 448
449 449 def pathto(self, f, cwd=None):
450 450 return self.dirstate.pathto(f, cwd)
451 451
452 452 def wfile(self, f, mode='r'):
453 453 return self.wopener(f, mode)
454 454
455 455 def _link(self, f):
456 456 return os.path.islink(self.wjoin(f))
457 457
458 458 def _filter(self, filter, filename, data):
459 459 if filter not in self.filterpats:
460 460 l = []
461 461 for pat, cmd in self.ui.configitems(filter):
462 462 mf = util.matcher(self.root, "", [pat], [], [])[1]
463 463 l.append((mf, cmd))
464 464 self.filterpats[filter] = l
465 465
466 466 for mf, cmd in self.filterpats[filter]:
467 467 if mf(filename):
468 468 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
469 469 data = util.filter(data, cmd)
470 470 break
471 471
472 472 return data
473 473
474 474 def wread(self, filename):
475 475 if self._link(filename):
476 476 data = os.readlink(self.wjoin(filename))
477 477 else:
478 478 data = self.wopener(filename, 'r').read()
479 479 return self._filter("encode", filename, data)
480 480
481 481 def wwrite(self, filename, data, flags):
482 482 data = self._filter("decode", filename, data)
483 483 if "l" in flags:
484 484 self.wopener.symlink(data, filename)
485 485 else:
486 486 try:
487 487 if self._link(filename):
488 488 os.unlink(self.wjoin(filename))
489 489 except OSError:
490 490 pass
491 491 self.wopener(filename, 'w').write(data)
492 492 util.set_exec(self.wjoin(filename), "x" in flags)
493 493
494 494 def wwritedata(self, filename, data):
495 495 return self._filter("decode", filename, data)
496 496
497 497 def transaction(self):
498 498 tr = self.transhandle
499 499 if tr != None and tr.running():
500 500 return tr.nest()
501 501
502 502 # save dirstate for rollback
503 503 try:
504 504 ds = self.opener("dirstate").read()
505 505 except IOError:
506 506 ds = ""
507 507 self.opener("journal.dirstate", "w").write(ds)
508 508
509 509 renames = [(self.sjoin("journal"), self.sjoin("undo")),
510 510 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
511 511 tr = transaction.transaction(self.ui.warn, self.sopener,
512 512 self.sjoin("journal"),
513 513 aftertrans(renames))
514 514 self.transhandle = tr
515 515 return tr
516 516
517 517 def recover(self):
518 518 l = self.lock()
519 519 if os.path.exists(self.sjoin("journal")):
520 520 self.ui.status(_("rolling back interrupted transaction\n"))
521 521 transaction.rollback(self.sopener, self.sjoin("journal"))
522 522 self.invalidate()
523 523 return True
524 524 else:
525 525 self.ui.warn(_("no interrupted transaction available\n"))
526 526 return False
527 527
528 528 def rollback(self, wlock=None, lock=None):
529 529 if not wlock:
530 530 wlock = self.wlock()
531 531 if not lock:
532 532 lock = self.lock()
533 533 if os.path.exists(self.sjoin("undo")):
534 534 self.ui.status(_("rolling back last transaction\n"))
535 535 transaction.rollback(self.sopener, self.sjoin("undo"))
536 536 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
537 537 self.invalidate()
538 538 self.dirstate.invalidate()
539 539 else:
540 540 self.ui.warn(_("no rollback information available\n"))
541 541
542 542 def invalidate(self):
543 543 for a in "changelog manifest".split():
544 544 if hasattr(self, a):
545 545 self.__delattr__(a)
546 546 self.tagscache = None
547 547 self.nodetagscache = None
548 548
549 549 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
550 550 desc=None):
551 551 try:
552 552 l = lock.lock(lockname, 0, releasefn, desc=desc)
553 553 except lock.LockHeld, inst:
554 554 if not wait:
555 555 raise
556 556 self.ui.warn(_("waiting for lock on %s held by %r\n") %
557 557 (desc, inst.locker))
558 558 # default to 600 seconds timeout
559 559 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
560 560 releasefn, desc=desc)
561 561 if acquirefn:
562 562 acquirefn()
563 563 return l
564 564
565 565 def lock(self, wait=1):
566 566 return self.do_lock(self.sjoin("lock"), wait,
567 567 acquirefn=self.invalidate,
568 568 desc=_('repository %s') % self.origroot)
569 569
570 570 def wlock(self, wait=1):
571 571 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
572 572 self.dirstate.invalidate,
573 573 desc=_('working directory of %s') % self.origroot)
574 574
575 575 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
576 576 """
577 577 commit an individual file as part of a larger transaction
578 578 """
579 579
580 580 t = self.wread(fn)
581 581 fl = self.file(fn)
582 582 fp1 = manifest1.get(fn, nullid)
583 583 fp2 = manifest2.get(fn, nullid)
584 584
585 585 meta = {}
586 586 cp = self.dirstate.copied(fn)
587 587 if cp:
588 588 # Mark the new revision of this file as a copy of another
589 589 # file. This copy data will effectively act as a parent
590 590 # of this new revision. If this is a merge, the first
591 591 # parent will be the nullid (meaning "look up the copy data")
592 592 # and the second one will be the other parent. For example:
593 593 #
594 594 # 0 --- 1 --- 3 rev1 changes file foo
595 595 # \ / rev2 renames foo to bar and changes it
596 596 # \- 2 -/ rev3 should have bar with all changes and
597 597 # should record that bar descends from
598 598 # bar in rev2 and foo in rev1
599 599 #
600 600 # this allows this merge to succeed:
601 601 #
602 602 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
603 603 # \ / merging rev3 and rev4 should use bar@rev2
604 604 # \- 2 --- 4 as the merge base
605 605 #
606 606 meta["copy"] = cp
607 607 if not manifest2: # not a branch merge
608 608 meta["copyrev"] = hex(manifest1.get(cp, nullid))
609 609 fp2 = nullid
610 610 elif fp2 != nullid: # copied on remote side
611 611 meta["copyrev"] = hex(manifest1.get(cp, nullid))
612 612 elif fp1 != nullid: # copied on local side, reversed
613 613 meta["copyrev"] = hex(manifest2.get(cp))
614 614 fp2 = fp1
615 615 else: # directory rename
616 616 meta["copyrev"] = hex(manifest1.get(cp, nullid))
617 617 self.ui.debug(_(" %s: copy %s:%s\n") %
618 618 (fn, cp, meta["copyrev"]))
619 619 fp1 = nullid
620 620 elif fp2 != nullid:
621 621 # is one parent an ancestor of the other?
622 622 fpa = fl.ancestor(fp1, fp2)
623 623 if fpa == fp1:
624 624 fp1, fp2 = fp2, nullid
625 625 elif fpa == fp2:
626 626 fp2 = nullid
627 627
628 628 # is the file unmodified from the parent? report existing entry
629 629 if fp2 == nullid and not fl.cmp(fp1, t):
630 630 return fp1
631 631
632 632 changelist.append(fn)
633 633 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
634 634
635 635 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
636 636 if p1 is None:
637 637 p1, p2 = self.dirstate.parents()
638 638 return self.commit(files=files, text=text, user=user, date=date,
639 639 p1=p1, p2=p2, wlock=wlock, extra=extra)
640 640
641 641 def commit(self, files=None, text="", user=None, date=None,
642 642 match=util.always, force=False, lock=None, wlock=None,
643 643 force_editor=False, p1=None, p2=None, extra={}):
644 644
645 645 commit = []
646 646 remove = []
647 647 changed = []
648 648 use_dirstate = (p1 is None) # not rawcommit
649 649 extra = extra.copy()
650 650
651 651 if use_dirstate:
652 652 if files:
653 653 for f in files:
654 654 s = self.dirstate[f]
655 655 if s in 'nma':
656 656 commit.append(f)
657 657 elif s == 'r':
658 658 remove.append(f)
659 659 else:
660 660 self.ui.warn(_("%s not tracked!\n") % f)
661 661 else:
662 662 changes = self.status(match=match)[:5]
663 663 modified, added, removed, deleted, unknown = changes
664 664 commit = modified + added
665 665 remove = removed
666 666 else:
667 667 commit = files
668 668
669 669 if use_dirstate:
670 670 p1, p2 = self.dirstate.parents()
671 671 update_dirstate = True
672 672 else:
673 673 p1, p2 = p1, p2 or nullid
674 674 update_dirstate = (self.dirstate.parents()[0] == p1)
675 675
676 676 c1 = self.changelog.read(p1)
677 677 c2 = self.changelog.read(p2)
678 678 m1 = self.manifest.read(c1[0]).copy()
679 679 m2 = self.manifest.read(c2[0])
680 680
681 681 if use_dirstate:
682 682 branchname = self.workingctx().branch()
683 683 try:
684 684 branchname = branchname.decode('UTF-8').encode('UTF-8')
685 685 except UnicodeDecodeError:
686 686 raise util.Abort(_('branch name not in UTF-8!'))
687 687 else:
688 688 branchname = ""
689 689
690 690 if use_dirstate:
691 691 oldname = c1[5].get("branch") # stored in UTF-8
692 692 if (not commit and not remove and not force and p2 == nullid
693 693 and branchname == oldname):
694 694 self.ui.status(_("nothing changed\n"))
695 695 return None
696 696
697 697 xp1 = hex(p1)
698 698 if p2 == nullid: xp2 = ''
699 699 else: xp2 = hex(p2)
700 700
701 701 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
702 702
703 703 if not wlock:
704 704 wlock = self.wlock()
705 705 if not lock:
706 706 lock = self.lock()
707 707 tr = self.transaction()
708 708
709 709 # check in files
710 710 new = {}
711 711 linkrev = self.changelog.count()
712 712 commit.sort()
713 713 is_exec = util.execfunc(self.root, m1.execf)
714 714 is_link = util.linkfunc(self.root, m1.linkf)
715 715 for f in commit:
716 716 self.ui.note(f + "\n")
717 717 try:
718 718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
719 719 new_exec = is_exec(f)
720 720 new_link = is_link(f)
721 721 if not changed or changed[-1] != f:
722 722 # mention the file in the changelog if some flag changed,
723 723 # even if there was no content change.
724 724 old_exec = m1.execf(f)
725 725 old_link = m1.linkf(f)
726 726 if old_exec != new_exec or old_link != new_link:
727 727 changed.append(f)
728 728 m1.set(f, new_exec, new_link)
729 729 except (OSError, IOError):
730 730 if use_dirstate:
731 731 self.ui.warn(_("trouble committing %s!\n") % f)
732 732 raise
733 733 else:
734 734 remove.append(f)
735 735
736 736 # update manifest
737 737 m1.update(new)
738 738 remove.sort()
739 739 removed = []
740 740
741 741 for f in remove:
742 742 if f in m1:
743 743 del m1[f]
744 744 removed.append(f)
745 745 elif f in m2:
746 746 removed.append(f)
747 747 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
748 748
749 749 # add changeset
750 750 new = new.keys()
751 751 new.sort()
752 752
753 753 user = user or self.ui.username()
754 754 if not text or force_editor:
755 755 edittext = []
756 756 if text:
757 757 edittext.append(text)
758 758 edittext.append("")
759 759 edittext.append("HG: user: %s" % user)
760 760 if p2 != nullid:
761 761 edittext.append("HG: branch merge")
762 762 if branchname:
763 763 edittext.append("HG: branch %s" % util.tolocal(branchname))
764 764 edittext.extend(["HG: changed %s" % f for f in changed])
765 765 edittext.extend(["HG: removed %s" % f for f in removed])
766 766 if not changed and not remove:
767 767 edittext.append("HG: no files changed")
768 768 edittext.append("")
769 769 # run editor in the repository root
770 770 olddir = os.getcwd()
771 771 os.chdir(self.root)
772 772 text = self.ui.edit("\n".join(edittext), user)
773 773 os.chdir(olddir)
774 774
775 775 lines = [line.rstrip() for line in text.rstrip().splitlines()]
776 776 while lines and not lines[0]:
777 777 del lines[0]
778 778 if not lines:
779 779 return None
780 780 text = '\n'.join(lines)
781 781 if branchname:
782 782 extra["branch"] = branchname
783 783 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
784 784 user, date, extra)
785 785 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
786 786 parent2=xp2)
787 787 tr.close()
788 788
789 789 if self.branchcache and "branch" in extra:
790 790 self.branchcache[util.tolocal(extra["branch"])] = n
791 791
792 792 if use_dirstate or update_dirstate:
793 793 self.dirstate.setparents(n)
794 794 if use_dirstate:
795 795 for f in new:
796 796 self.dirstate.normal(f)
797 797 for f in removed:
798 798 self.dirstate.forget(f)
799 799
800 800 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
801 801 return n
802 802
803 803 def walk(self, node=None, files=[], match=util.always, badmatch=None):
804 804 '''
805 805 walk recursively through the directory tree or a given
806 806 changeset, finding all files matched by the match
807 807 function
808 808
809 809 results are yielded in a tuple (src, filename), where src
810 810 is one of:
811 811 'f' the file was found in the directory tree
812 812 'm' the file was only in the dirstate and not in the tree
813 813 'b' file was not found and matched badmatch
814 814 '''
815 815
816 816 if node:
817 817 fdict = dict.fromkeys(files)
818 818 # for dirstate.walk, files=['.'] means "walk the whole tree".
819 819 # follow that here, too
820 820 fdict.pop('.', None)
821 821 mdict = self.manifest.read(self.changelog.read(node)[0])
822 822 mfiles = mdict.keys()
823 823 mfiles.sort()
824 824 for fn in mfiles:
825 825 for ffn in fdict:
826 826 # match if the file is the exact name or a directory
827 827 if ffn == fn or fn.startswith("%s/" % ffn):
828 828 del fdict[ffn]
829 829 break
830 830 if match(fn):
831 831 yield 'm', fn
832 832 ffiles = fdict.keys()
833 833 ffiles.sort()
834 834 for fn in ffiles:
835 835 if badmatch and badmatch(fn):
836 836 if match(fn):
837 837 yield 'b', fn
838 838 else:
839 839 self.ui.warn(_('%s: No such file in rev %s\n')
840 840 % (self.pathto(fn), short(node)))
841 841 else:
842 842 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
843 843 yield src, fn
844 844
845 845 def status(self, node1=None, node2=None, files=[], match=util.always,
846 846 wlock=None, list_ignored=False, list_clean=False):
847 847 """return status of files between two nodes or node and working directory
848 848
849 849 If node1 is None, use the first dirstate parent instead.
850 850 If node2 is None, compare node1 with working directory.
851 851 """
852 852
853 853 def fcmp(fn, getnode):
854 854 t1 = self.wread(fn)
855 855 return self.file(fn).cmp(getnode(fn), t1)
856 856
857 857 def mfmatches(node):
858 858 change = self.changelog.read(node)
859 859 mf = self.manifest.read(change[0]).copy()
860 860 for fn in mf.keys():
861 861 if not match(fn):
862 862 del mf[fn]
863 863 return mf
864 864
865 865 modified, added, removed, deleted, unknown = [], [], [], [], []
866 866 ignored, clean = [], []
867 867
868 868 compareworking = False
869 869 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
870 870 compareworking = True
871 871
872 872 if not compareworking:
873 873 # read the manifest from node1 before the manifest from node2,
874 874 # so that we'll hit the manifest cache if we're going through
875 875 # all the revisions in parent->child order.
876 876 mf1 = mfmatches(node1)
877 877
878 mywlock = False
879
880 878 # are we comparing the working directory?
881 879 if not node2:
882 880 (lookup, modified, added, removed, deleted, unknown,
883 881 ignored, clean) = self.dirstate.status(files, match,
884 882 list_ignored, list_clean)
885 883
886 884 # are we comparing working dir against its parent?
887 885 if compareworking:
888 886 if lookup:
887 fixup = []
889 888 # do a full compare of any files that might have changed
890 889 ctx = self.changectx()
891 890 for f in lookup:
892 891 if f not in ctx or ctx[f].cmp(self.wread(f)):
893 892 modified.append(f)
894 893 else:
894 fixup.append(f)
895 895 if list_clean:
896 896 clean.append(f)
897 if not wlock and not mywlock:
898 mywlock = True
899 try:
900 wlock = self.wlock(wait=0)
901 except lock.LockException:
902 pass
903 if wlock:
897
898 # update dirstate for files that are actually clean
899 if fixup:
900 cleanup = False
901 if not wlock:
902 try:
903 wlock = self.wlock(wait=0)
904 cleanup = True
905 except lock.LockException:
906 pass
907 if wlock:
908 for f in fixup:
904 909 self.dirstate.normal(f)
910 if cleanup:
911 wlock.release()
905 912 else:
906 913 # we are comparing working dir against non-parent
907 914 # generate a pseudo-manifest for the working dir
908 915 # XXX: create it in dirstate.py ?
909 916 mf2 = mfmatches(self.dirstate.parents()[0])
910 917 is_exec = util.execfunc(self.root, mf2.execf)
911 918 is_link = util.linkfunc(self.root, mf2.linkf)
912 919 for f in lookup + modified + added:
913 920 mf2[f] = ""
914 921 mf2.set(f, is_exec(f), is_link(f))
915 922 for f in removed:
916 923 if f in mf2:
917 924 del mf2[f]
918 925
919 if mywlock and wlock:
920 wlock.release()
921 926 else:
922 927 # we are comparing two revisions
923 928 mf2 = mfmatches(node2)
924 929
925 930 if not compareworking:
926 931 # flush lists from dirstate before comparing manifests
927 932 modified, added, clean = [], [], []
928 933
929 934 # make sure to sort the files so we talk to the disk in a
930 935 # reasonable order
931 936 mf2keys = mf2.keys()
932 937 mf2keys.sort()
933 938 getnode = lambda fn: mf1.get(fn, nullid)
934 939 for fn in mf2keys:
935 940 if mf1.has_key(fn):
936 941 if (mf1.flags(fn) != mf2.flags(fn) or
937 942 (mf1[fn] != mf2[fn] and
938 943 (mf2[fn] != "" or fcmp(fn, getnode)))):
939 944 modified.append(fn)
940 945 elif list_clean:
941 946 clean.append(fn)
942 947 del mf1[fn]
943 948 else:
944 949 added.append(fn)
945 950
946 951 removed = mf1.keys()
947 952
948 953 # sort and return results:
949 954 for l in modified, added, removed, deleted, unknown, ignored, clean:
950 955 l.sort()
951 956 return (modified, added, removed, deleted, unknown, ignored, clean)
952 957
953 958 def add(self, list, wlock=None):
954 959 if not wlock:
955 960 wlock = self.wlock()
956 961 for f in list:
957 962 p = self.wjoin(f)
958 963 try:
959 964 st = os.lstat(p)
960 965 except:
961 966 self.ui.warn(_("%s does not exist!\n") % f)
962 967 continue
963 968 if st.st_size > 10000000:
964 969 self.ui.warn(_("%s: files over 10MB may cause memory and"
965 970 " performance problems\n"
966 971 "(use 'hg revert %s' to unadd the file)\n")
967 972 % (f, f))
968 973 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
969 974 self.ui.warn(_("%s not added: only files and symlinks "
970 975 "supported currently\n") % f)
971 976 elif self.dirstate[f] in 'an':
972 977 self.ui.warn(_("%s already tracked!\n") % f)
973 978 else:
974 979 self.dirstate.add(f)
975 980
976 981 def forget(self, list, wlock=None):
977 982 if not wlock:
978 983 wlock = self.wlock()
979 984 for f in list:
980 985 if self.dirstate[f] != 'a':
981 986 self.ui.warn(_("%s not added!\n") % f)
982 987 else:
983 988 self.dirstate.forget(f)
984 989
985 990 def remove(self, list, unlink=False, wlock=None):
986 991 if unlink:
987 992 for f in list:
988 993 try:
989 994 util.unlink(self.wjoin(f))
990 995 except OSError, inst:
991 996 if inst.errno != errno.ENOENT:
992 997 raise
993 998 if not wlock:
994 999 wlock = self.wlock()
995 1000 for f in list:
996 1001 if unlink and os.path.exists(self.wjoin(f)):
997 1002 self.ui.warn(_("%s still exists!\n") % f)
998 1003 elif self.dirstate[f] == 'a':
999 1004 self.dirstate.forget(f)
1000 1005 elif f not in self.dirstate:
1001 1006 self.ui.warn(_("%s not tracked!\n") % f)
1002 1007 else:
1003 1008 self.dirstate.remove(f)
1004 1009
1005 1010 def undelete(self, list, wlock=None):
1006 1011 p = self.dirstate.parents()[0]
1007 1012 mn = self.changelog.read(p)[0]
1008 1013 m = self.manifest.read(mn)
1009 1014 if not wlock:
1010 1015 wlock = self.wlock()
1011 1016 for f in list:
1012 1017 if self.dirstate[f] != 'r':
1013 1018 self.ui.warn("%s not removed!\n" % f)
1014 1019 else:
1015 1020 t = self.file(f).read(m[f])
1016 1021 self.wwrite(f, t, m.flags(f))
1017 1022 self.dirstate.normal(f)
1018 1023
1019 1024 def copy(self, source, dest, wlock=None):
1020 1025 p = self.wjoin(dest)
1021 1026 if not (os.path.exists(p) or os.path.islink(p)):
1022 1027 self.ui.warn(_("%s does not exist!\n") % dest)
1023 1028 elif not (os.path.isfile(p) or os.path.islink(p)):
1024 1029 self.ui.warn(_("copy failed: %s is not a file or a "
1025 1030 "symbolic link\n") % dest)
1026 1031 else:
1027 1032 if not wlock:
1028 1033 wlock = self.wlock()
1029 1034 if dest not in self.dirstate:
1030 1035 self.dirstate.add(dest)
1031 1036 self.dirstate.copy(source, dest)
1032 1037
1033 1038 def heads(self, start=None):
1034 1039 heads = self.changelog.heads(start)
1035 1040 # sort the output in rev descending order
1036 1041 heads = [(-self.changelog.rev(h), h) for h in heads]
1037 1042 heads.sort()
1038 1043 return [n for (r, n) in heads]
1039 1044
1040 1045 def branchheads(self, branch, start=None):
1041 1046 branches = self.branchtags()
1042 1047 if branch not in branches:
1043 1048 return []
1044 1049 # The basic algorithm is this:
1045 1050 #
1046 1051 # Start from the branch tip since there are no later revisions that can
1047 1052 # possibly be in this branch, and the tip is a guaranteed head.
1048 1053 #
1049 1054 # Remember the tip's parents as the first ancestors, since these by
1050 1055 # definition are not heads.
1051 1056 #
1052 1057 # Step backwards from the brach tip through all the revisions. We are
1053 1058 # guaranteed by the rules of Mercurial that we will now be visiting the
1054 1059 # nodes in reverse topological order (children before parents).
1055 1060 #
1056 1061 # If a revision is one of the ancestors of a head then we can toss it
1057 1062 # out of the ancestors set (we've already found it and won't be
1058 1063 # visiting it again) and put its parents in the ancestors set.
1059 1064 #
1060 1065 # Otherwise, if a revision is in the branch it's another head, since it
1061 1066 # wasn't in the ancestor list of an existing head. So add it to the
1062 1067 # head list, and add its parents to the ancestor list.
1063 1068 #
1064 1069 # If it is not in the branch ignore it.
1065 1070 #
1066 1071 # Once we have a list of heads, use nodesbetween to filter out all the
1067 1072 # heads that cannot be reached from startrev. There may be a more
1068 1073 # efficient way to do this as part of the previous algorithm.
1069 1074
1070 1075 set = util.set
1071 1076 heads = [self.changelog.rev(branches[branch])]
1072 1077 # Don't care if ancestors contains nullrev or not.
1073 1078 ancestors = set(self.changelog.parentrevs(heads[0]))
1074 1079 for rev in xrange(heads[0] - 1, nullrev, -1):
1075 1080 if rev in ancestors:
1076 1081 ancestors.update(self.changelog.parentrevs(rev))
1077 1082 ancestors.remove(rev)
1078 1083 elif self.changectx(rev).branch() == branch:
1079 1084 heads.append(rev)
1080 1085 ancestors.update(self.changelog.parentrevs(rev))
1081 1086 heads = [self.changelog.node(rev) for rev in heads]
1082 1087 if start is not None:
1083 1088 heads = self.changelog.nodesbetween([start], heads)[2]
1084 1089 return heads
1085 1090
1086 1091 def branches(self, nodes):
1087 1092 if not nodes:
1088 1093 nodes = [self.changelog.tip()]
1089 1094 b = []
1090 1095 for n in nodes:
1091 1096 t = n
1092 1097 while 1:
1093 1098 p = self.changelog.parents(n)
1094 1099 if p[1] != nullid or p[0] == nullid:
1095 1100 b.append((t, n, p[0], p[1]))
1096 1101 break
1097 1102 n = p[0]
1098 1103 return b
1099 1104
1100 1105 def between(self, pairs):
1101 1106 r = []
1102 1107
1103 1108 for top, bottom in pairs:
1104 1109 n, l, i = top, [], 0
1105 1110 f = 1
1106 1111
1107 1112 while n != bottom:
1108 1113 p = self.changelog.parents(n)[0]
1109 1114 if i == f:
1110 1115 l.append(n)
1111 1116 f = f * 2
1112 1117 n = p
1113 1118 i += 1
1114 1119
1115 1120 r.append(l)
1116 1121
1117 1122 return r
1118 1123
1119 1124 def findincoming(self, remote, base=None, heads=None, force=False):
1120 1125 """Return list of roots of the subsets of missing nodes from remote
1121 1126
1122 1127 If base dict is specified, assume that these nodes and their parents
1123 1128 exist on the remote side and that no child of a node of base exists
1124 1129 in both remote and self.
1125 1130 Furthermore base will be updated to include the nodes that exists
1126 1131 in self and remote but no children exists in self and remote.
1127 1132 If a list of heads is specified, return only nodes which are heads
1128 1133 or ancestors of these heads.
1129 1134
1130 1135 All the ancestors of base are in self and in remote.
1131 1136 All the descendants of the list returned are missing in self.
1132 1137 (and so we know that the rest of the nodes are missing in remote, see
1133 1138 outgoing)
1134 1139 """
1135 1140 m = self.changelog.nodemap
1136 1141 search = []
1137 1142 fetch = {}
1138 1143 seen = {}
1139 1144 seenbranch = {}
1140 1145 if base == None:
1141 1146 base = {}
1142 1147
1143 1148 if not heads:
1144 1149 heads = remote.heads()
1145 1150
1146 1151 if self.changelog.tip() == nullid:
1147 1152 base[nullid] = 1
1148 1153 if heads != [nullid]:
1149 1154 return [nullid]
1150 1155 return []
1151 1156
1152 1157 # assume we're closer to the tip than the root
1153 1158 # and start by examining the heads
1154 1159 self.ui.status(_("searching for changes\n"))
1155 1160
1156 1161 unknown = []
1157 1162 for h in heads:
1158 1163 if h not in m:
1159 1164 unknown.append(h)
1160 1165 else:
1161 1166 base[h] = 1
1162 1167
1163 1168 if not unknown:
1164 1169 return []
1165 1170
1166 1171 req = dict.fromkeys(unknown)
1167 1172 reqcnt = 0
1168 1173
1169 1174 # search through remote branches
1170 1175 # a 'branch' here is a linear segment of history, with four parts:
1171 1176 # head, root, first parent, second parent
1172 1177 # (a branch always has two parents (or none) by definition)
1173 1178 unknown = remote.branches(unknown)
1174 1179 while unknown:
1175 1180 r = []
1176 1181 while unknown:
1177 1182 n = unknown.pop(0)
1178 1183 if n[0] in seen:
1179 1184 continue
1180 1185
1181 1186 self.ui.debug(_("examining %s:%s\n")
1182 1187 % (short(n[0]), short(n[1])))
1183 1188 if n[0] == nullid: # found the end of the branch
1184 1189 pass
1185 1190 elif n in seenbranch:
1186 1191 self.ui.debug(_("branch already found\n"))
1187 1192 continue
1188 1193 elif n[1] and n[1] in m: # do we know the base?
1189 1194 self.ui.debug(_("found incomplete branch %s:%s\n")
1190 1195 % (short(n[0]), short(n[1])))
1191 1196 search.append(n) # schedule branch range for scanning
1192 1197 seenbranch[n] = 1
1193 1198 else:
1194 1199 if n[1] not in seen and n[1] not in fetch:
1195 1200 if n[2] in m and n[3] in m:
1196 1201 self.ui.debug(_("found new changeset %s\n") %
1197 1202 short(n[1]))
1198 1203 fetch[n[1]] = 1 # earliest unknown
1199 1204 for p in n[2:4]:
1200 1205 if p in m:
1201 1206 base[p] = 1 # latest known
1202 1207
1203 1208 for p in n[2:4]:
1204 1209 if p not in req and p not in m:
1205 1210 r.append(p)
1206 1211 req[p] = 1
1207 1212 seen[n[0]] = 1
1208 1213
1209 1214 if r:
1210 1215 reqcnt += 1
1211 1216 self.ui.debug(_("request %d: %s\n") %
1212 1217 (reqcnt, " ".join(map(short, r))))
1213 1218 for p in xrange(0, len(r), 10):
1214 1219 for b in remote.branches(r[p:p+10]):
1215 1220 self.ui.debug(_("received %s:%s\n") %
1216 1221 (short(b[0]), short(b[1])))
1217 1222 unknown.append(b)
1218 1223
1219 1224 # do binary search on the branches we found
1220 1225 while search:
1221 1226 n = search.pop(0)
1222 1227 reqcnt += 1
1223 1228 l = remote.between([(n[0], n[1])])[0]
1224 1229 l.append(n[1])
1225 1230 p = n[0]
1226 1231 f = 1
1227 1232 for i in l:
1228 1233 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1229 1234 if i in m:
1230 1235 if f <= 2:
1231 1236 self.ui.debug(_("found new branch changeset %s\n") %
1232 1237 short(p))
1233 1238 fetch[p] = 1
1234 1239 base[i] = 1
1235 1240 else:
1236 1241 self.ui.debug(_("narrowed branch search to %s:%s\n")
1237 1242 % (short(p), short(i)))
1238 1243 search.append((p, i))
1239 1244 break
1240 1245 p, f = i, f * 2
1241 1246
1242 1247 # sanity check our fetch list
1243 1248 for f in fetch.keys():
1244 1249 if f in m:
1245 1250 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1246 1251
1247 1252 if base.keys() == [nullid]:
1248 1253 if force:
1249 1254 self.ui.warn(_("warning: repository is unrelated\n"))
1250 1255 else:
1251 1256 raise util.Abort(_("repository is unrelated"))
1252 1257
1253 1258 self.ui.debug(_("found new changesets starting at ") +
1254 1259 " ".join([short(f) for f in fetch]) + "\n")
1255 1260
1256 1261 self.ui.debug(_("%d total queries\n") % reqcnt)
1257 1262
1258 1263 return fetch.keys()
1259 1264
1260 1265 def findoutgoing(self, remote, base=None, heads=None, force=False):
1261 1266 """Return list of nodes that are roots of subsets not in remote
1262 1267
1263 1268 If base dict is specified, assume that these nodes and their parents
1264 1269 exist on the remote side.
1265 1270 If a list of heads is specified, return only nodes which are heads
1266 1271 or ancestors of these heads, and return a second element which
1267 1272 contains all remote heads which get new children.
1268 1273 """
1269 1274 if base == None:
1270 1275 base = {}
1271 1276 self.findincoming(remote, base, heads, force=force)
1272 1277
1273 1278 self.ui.debug(_("common changesets up to ")
1274 1279 + " ".join(map(short, base.keys())) + "\n")
1275 1280
1276 1281 remain = dict.fromkeys(self.changelog.nodemap)
1277 1282
1278 1283 # prune everything remote has from the tree
1279 1284 del remain[nullid]
1280 1285 remove = base.keys()
1281 1286 while remove:
1282 1287 n = remove.pop(0)
1283 1288 if n in remain:
1284 1289 del remain[n]
1285 1290 for p in self.changelog.parents(n):
1286 1291 remove.append(p)
1287 1292
1288 1293 # find every node whose parents have been pruned
1289 1294 subset = []
1290 1295 # find every remote head that will get new children
1291 1296 updated_heads = {}
1292 1297 for n in remain:
1293 1298 p1, p2 = self.changelog.parents(n)
1294 1299 if p1 not in remain and p2 not in remain:
1295 1300 subset.append(n)
1296 1301 if heads:
1297 1302 if p1 in heads:
1298 1303 updated_heads[p1] = True
1299 1304 if p2 in heads:
1300 1305 updated_heads[p2] = True
1301 1306
1302 1307 # this is the set of all roots we have to push
1303 1308 if heads:
1304 1309 return subset, updated_heads.keys()
1305 1310 else:
1306 1311 return subset
1307 1312
1308 1313 def pull(self, remote, heads=None, force=False, lock=None):
1309 1314 mylock = False
1310 1315 if not lock:
1311 1316 lock = self.lock()
1312 1317 mylock = True
1313 1318
1314 1319 try:
1315 1320 fetch = self.findincoming(remote, force=force)
1316 1321 if fetch == [nullid]:
1317 1322 self.ui.status(_("requesting all changes\n"))
1318 1323
1319 1324 if not fetch:
1320 1325 self.ui.status(_("no changes found\n"))
1321 1326 return 0
1322 1327
1323 1328 if heads is None:
1324 1329 cg = remote.changegroup(fetch, 'pull')
1325 1330 else:
1326 1331 if 'changegroupsubset' not in remote.capabilities:
1327 1332 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1328 1333 cg = remote.changegroupsubset(fetch, heads, 'pull')
1329 1334 return self.addchangegroup(cg, 'pull', remote.url())
1330 1335 finally:
1331 1336 if mylock:
1332 1337 lock.release()
1333 1338
1334 1339 def push(self, remote, force=False, revs=None):
1335 1340 # there are two ways to push to remote repo:
1336 1341 #
1337 1342 # addchangegroup assumes local user can lock remote
1338 1343 # repo (local filesystem, old ssh servers).
1339 1344 #
1340 1345 # unbundle assumes local user cannot lock remote repo (new ssh
1341 1346 # servers, http servers).
1342 1347
1343 1348 if remote.capable('unbundle'):
1344 1349 return self.push_unbundle(remote, force, revs)
1345 1350 return self.push_addchangegroup(remote, force, revs)
1346 1351
1347 1352 def prepush(self, remote, force, revs):
1348 1353 base = {}
1349 1354 remote_heads = remote.heads()
1350 1355 inc = self.findincoming(remote, base, remote_heads, force=force)
1351 1356
1352 1357 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1353 1358 if revs is not None:
1354 1359 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1355 1360 else:
1356 1361 bases, heads = update, self.changelog.heads()
1357 1362
1358 1363 if not bases:
1359 1364 self.ui.status(_("no changes found\n"))
1360 1365 return None, 1
1361 1366 elif not force:
1362 1367 # check if we're creating new remote heads
1363 1368 # to be a remote head after push, node must be either
1364 1369 # - unknown locally
1365 1370 # - a local outgoing head descended from update
1366 1371 # - a remote head that's known locally and not
1367 1372 # ancestral to an outgoing head
1368 1373
1369 1374 warn = 0
1370 1375
1371 1376 if remote_heads == [nullid]:
1372 1377 warn = 0
1373 1378 elif not revs and len(heads) > len(remote_heads):
1374 1379 warn = 1
1375 1380 else:
1376 1381 newheads = list(heads)
1377 1382 for r in remote_heads:
1378 1383 if r in self.changelog.nodemap:
1379 1384 desc = self.changelog.heads(r, heads)
1380 1385 l = [h for h in heads if h in desc]
1381 1386 if not l:
1382 1387 newheads.append(r)
1383 1388 else:
1384 1389 newheads.append(r)
1385 1390 if len(newheads) > len(remote_heads):
1386 1391 warn = 1
1387 1392
1388 1393 if warn:
1389 1394 self.ui.warn(_("abort: push creates new remote branches!\n"))
1390 1395 self.ui.status(_("(did you forget to merge?"
1391 1396 " use push -f to force)\n"))
1392 1397 return None, 1
1393 1398 elif inc:
1394 1399 self.ui.warn(_("note: unsynced remote changes!\n"))
1395 1400
1396 1401
1397 1402 if revs is None:
1398 1403 cg = self.changegroup(update, 'push')
1399 1404 else:
1400 1405 cg = self.changegroupsubset(update, revs, 'push')
1401 1406 return cg, remote_heads
1402 1407
1403 1408 def push_addchangegroup(self, remote, force, revs):
1404 1409 lock = remote.lock()
1405 1410
1406 1411 ret = self.prepush(remote, force, revs)
1407 1412 if ret[0] is not None:
1408 1413 cg, remote_heads = ret
1409 1414 return remote.addchangegroup(cg, 'push', self.url())
1410 1415 return ret[1]
1411 1416
1412 1417 def push_unbundle(self, remote, force, revs):
1413 1418 # local repo finds heads on server, finds out what revs it
1414 1419 # must push. once revs transferred, if server finds it has
1415 1420 # different heads (someone else won commit/push race), server
1416 1421 # aborts.
1417 1422
1418 1423 ret = self.prepush(remote, force, revs)
1419 1424 if ret[0] is not None:
1420 1425 cg, remote_heads = ret
1421 1426 if force: remote_heads = ['force']
1422 1427 return remote.unbundle(cg, remote_heads, 'push')
1423 1428 return ret[1]
1424 1429
1425 1430 def changegroupinfo(self, nodes):
1426 1431 self.ui.note(_("%d changesets found\n") % len(nodes))
1427 1432 if self.ui.debugflag:
1428 1433 self.ui.debug(_("List of changesets:\n"))
1429 1434 for node in nodes:
1430 1435 self.ui.debug("%s\n" % hex(node))
1431 1436
1432 1437 def changegroupsubset(self, bases, heads, source):
1433 1438 """This function generates a changegroup consisting of all the nodes
1434 1439 that are descendents of any of the bases, and ancestors of any of
1435 1440 the heads.
1436 1441
1437 1442 It is fairly complex as determining which filenodes and which
1438 1443 manifest nodes need to be included for the changeset to be complete
1439 1444 is non-trivial.
1440 1445
1441 1446 Another wrinkle is doing the reverse, figuring out which changeset in
1442 1447 the changegroup a particular filenode or manifestnode belongs to."""
1443 1448
1444 1449 self.hook('preoutgoing', throw=True, source=source)
1445 1450
1446 1451 # Set up some initial variables
1447 1452 # Make it easy to refer to self.changelog
1448 1453 cl = self.changelog
1449 1454 # msng is short for missing - compute the list of changesets in this
1450 1455 # changegroup.
1451 1456 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1452 1457 self.changegroupinfo(msng_cl_lst)
1453 1458 # Some bases may turn out to be superfluous, and some heads may be
1454 1459 # too. nodesbetween will return the minimal set of bases and heads
1455 1460 # necessary to re-create the changegroup.
1456 1461
1457 1462 # Known heads are the list of heads that it is assumed the recipient
1458 1463 # of this changegroup will know about.
1459 1464 knownheads = {}
1460 1465 # We assume that all parents of bases are known heads.
1461 1466 for n in bases:
1462 1467 for p in cl.parents(n):
1463 1468 if p != nullid:
1464 1469 knownheads[p] = 1
1465 1470 knownheads = knownheads.keys()
1466 1471 if knownheads:
1467 1472 # Now that we know what heads are known, we can compute which
1468 1473 # changesets are known. The recipient must know about all
1469 1474 # changesets required to reach the known heads from the null
1470 1475 # changeset.
1471 1476 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1472 1477 junk = None
1473 1478 # Transform the list into an ersatz set.
1474 1479 has_cl_set = dict.fromkeys(has_cl_set)
1475 1480 else:
1476 1481 # If there were no known heads, the recipient cannot be assumed to
1477 1482 # know about any changesets.
1478 1483 has_cl_set = {}
1479 1484
1480 1485 # Make it easy to refer to self.manifest
1481 1486 mnfst = self.manifest
1482 1487 # We don't know which manifests are missing yet
1483 1488 msng_mnfst_set = {}
1484 1489 # Nor do we know which filenodes are missing.
1485 1490 msng_filenode_set = {}
1486 1491
1487 1492 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1488 1493 junk = None
1489 1494
1490 1495 # A changeset always belongs to itself, so the changenode lookup
1491 1496 # function for a changenode is identity.
1492 1497 def identity(x):
1493 1498 return x
1494 1499
1495 1500 # A function generating function. Sets up an environment for the
1496 1501 # inner function.
1497 1502 def cmp_by_rev_func(revlog):
1498 1503 # Compare two nodes by their revision number in the environment's
1499 1504 # revision history. Since the revision number both represents the
1500 1505 # most efficient order to read the nodes in, and represents a
1501 1506 # topological sorting of the nodes, this function is often useful.
1502 1507 def cmp_by_rev(a, b):
1503 1508 return cmp(revlog.rev(a), revlog.rev(b))
1504 1509 return cmp_by_rev
1505 1510
1506 1511 # If we determine that a particular file or manifest node must be a
1507 1512 # node that the recipient of the changegroup will already have, we can
1508 1513 # also assume the recipient will have all the parents. This function
1509 1514 # prunes them from the set of missing nodes.
1510 1515 def prune_parents(revlog, hasset, msngset):
1511 1516 haslst = hasset.keys()
1512 1517 haslst.sort(cmp_by_rev_func(revlog))
1513 1518 for node in haslst:
1514 1519 parentlst = [p for p in revlog.parents(node) if p != nullid]
1515 1520 while parentlst:
1516 1521 n = parentlst.pop()
1517 1522 if n not in hasset:
1518 1523 hasset[n] = 1
1519 1524 p = [p for p in revlog.parents(n) if p != nullid]
1520 1525 parentlst.extend(p)
1521 1526 for n in hasset:
1522 1527 msngset.pop(n, None)
1523 1528
1524 1529 # This is a function generating function used to set up an environment
1525 1530 # for the inner function to execute in.
1526 1531 def manifest_and_file_collector(changedfileset):
1527 1532 # This is an information gathering function that gathers
1528 1533 # information from each changeset node that goes out as part of
1529 1534 # the changegroup. The information gathered is a list of which
1530 1535 # manifest nodes are potentially required (the recipient may
1531 1536 # already have them) and total list of all files which were
1532 1537 # changed in any changeset in the changegroup.
1533 1538 #
1534 1539 # We also remember the first changenode we saw any manifest
1535 1540 # referenced by so we can later determine which changenode 'owns'
1536 1541 # the manifest.
1537 1542 def collect_manifests_and_files(clnode):
1538 1543 c = cl.read(clnode)
1539 1544 for f in c[3]:
1540 1545 # This is to make sure we only have one instance of each
1541 1546 # filename string for each filename.
1542 1547 changedfileset.setdefault(f, f)
1543 1548 msng_mnfst_set.setdefault(c[0], clnode)
1544 1549 return collect_manifests_and_files
1545 1550
1546 1551 # Figure out which manifest nodes (of the ones we think might be part
1547 1552 # of the changegroup) the recipient must know about and remove them
1548 1553 # from the changegroup.
1549 1554 def prune_manifests():
1550 1555 has_mnfst_set = {}
1551 1556 for n in msng_mnfst_set:
1552 1557 # If a 'missing' manifest thinks it belongs to a changenode
1553 1558 # the recipient is assumed to have, obviously the recipient
1554 1559 # must have that manifest.
1555 1560 linknode = cl.node(mnfst.linkrev(n))
1556 1561 if linknode in has_cl_set:
1557 1562 has_mnfst_set[n] = 1
1558 1563 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1559 1564
1560 1565 # Use the information collected in collect_manifests_and_files to say
1561 1566 # which changenode any manifestnode belongs to.
1562 1567 def lookup_manifest_link(mnfstnode):
1563 1568 return msng_mnfst_set[mnfstnode]
1564 1569
1565 1570 # A function generating function that sets up the initial environment
1566 1571 # the inner function.
1567 1572 def filenode_collector(changedfiles):
1568 1573 next_rev = [0]
1569 1574 # This gathers information from each manifestnode included in the
1570 1575 # changegroup about which filenodes the manifest node references
1571 1576 # so we can include those in the changegroup too.
1572 1577 #
1573 1578 # It also remembers which changenode each filenode belongs to. It
1574 1579 # does this by assuming the a filenode belongs to the changenode
1575 1580 # the first manifest that references it belongs to.
1576 1581 def collect_msng_filenodes(mnfstnode):
1577 1582 r = mnfst.rev(mnfstnode)
1578 1583 if r == next_rev[0]:
1579 1584 # If the last rev we looked at was the one just previous,
1580 1585 # we only need to see a diff.
1581 1586 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1582 1587 # For each line in the delta
1583 1588 for dline in delta.splitlines():
1584 1589 # get the filename and filenode for that line
1585 1590 f, fnode = dline.split('\0')
1586 1591 fnode = bin(fnode[:40])
1587 1592 f = changedfiles.get(f, None)
1588 1593 # And if the file is in the list of files we care
1589 1594 # about.
1590 1595 if f is not None:
1591 1596 # Get the changenode this manifest belongs to
1592 1597 clnode = msng_mnfst_set[mnfstnode]
1593 1598 # Create the set of filenodes for the file if
1594 1599 # there isn't one already.
1595 1600 ndset = msng_filenode_set.setdefault(f, {})
1596 1601 # And set the filenode's changelog node to the
1597 1602 # manifest's if it hasn't been set already.
1598 1603 ndset.setdefault(fnode, clnode)
1599 1604 else:
1600 1605 # Otherwise we need a full manifest.
1601 1606 m = mnfst.read(mnfstnode)
1602 1607 # For every file in we care about.
1603 1608 for f in changedfiles:
1604 1609 fnode = m.get(f, None)
1605 1610 # If it's in the manifest
1606 1611 if fnode is not None:
1607 1612 # See comments above.
1608 1613 clnode = msng_mnfst_set[mnfstnode]
1609 1614 ndset = msng_filenode_set.setdefault(f, {})
1610 1615 ndset.setdefault(fnode, clnode)
1611 1616 # Remember the revision we hope to see next.
1612 1617 next_rev[0] = r + 1
1613 1618 return collect_msng_filenodes
1614 1619
1615 1620 # We have a list of filenodes we think we need for a file, lets remove
1616 1621 # all those we now the recipient must have.
1617 1622 def prune_filenodes(f, filerevlog):
1618 1623 msngset = msng_filenode_set[f]
1619 1624 hasset = {}
1620 1625 # If a 'missing' filenode thinks it belongs to a changenode we
1621 1626 # assume the recipient must have, then the recipient must have
1622 1627 # that filenode.
1623 1628 for n in msngset:
1624 1629 clnode = cl.node(filerevlog.linkrev(n))
1625 1630 if clnode in has_cl_set:
1626 1631 hasset[n] = 1
1627 1632 prune_parents(filerevlog, hasset, msngset)
1628 1633
1629 1634 # A function generator function that sets up the a context for the
1630 1635 # inner function.
1631 1636 def lookup_filenode_link_func(fname):
1632 1637 msngset = msng_filenode_set[fname]
1633 1638 # Lookup the changenode the filenode belongs to.
1634 1639 def lookup_filenode_link(fnode):
1635 1640 return msngset[fnode]
1636 1641 return lookup_filenode_link
1637 1642
1638 1643 # Now that we have all theses utility functions to help out and
1639 1644 # logically divide up the task, generate the group.
1640 1645 def gengroup():
1641 1646 # The set of changed files starts empty.
1642 1647 changedfiles = {}
1643 1648 # Create a changenode group generator that will call our functions
1644 1649 # back to lookup the owning changenode and collect information.
1645 1650 group = cl.group(msng_cl_lst, identity,
1646 1651 manifest_and_file_collector(changedfiles))
1647 1652 for chnk in group:
1648 1653 yield chnk
1649 1654
1650 1655 # The list of manifests has been collected by the generator
1651 1656 # calling our functions back.
1652 1657 prune_manifests()
1653 1658 msng_mnfst_lst = msng_mnfst_set.keys()
1654 1659 # Sort the manifestnodes by revision number.
1655 1660 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1656 1661 # Create a generator for the manifestnodes that calls our lookup
1657 1662 # and data collection functions back.
1658 1663 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1659 1664 filenode_collector(changedfiles))
1660 1665 for chnk in group:
1661 1666 yield chnk
1662 1667
1663 1668 # These are no longer needed, dereference and toss the memory for
1664 1669 # them.
1665 1670 msng_mnfst_lst = None
1666 1671 msng_mnfst_set.clear()
1667 1672
1668 1673 changedfiles = changedfiles.keys()
1669 1674 changedfiles.sort()
1670 1675 # Go through all our files in order sorted by name.
1671 1676 for fname in changedfiles:
1672 1677 filerevlog = self.file(fname)
1673 1678 # Toss out the filenodes that the recipient isn't really
1674 1679 # missing.
1675 1680 if msng_filenode_set.has_key(fname):
1676 1681 prune_filenodes(fname, filerevlog)
1677 1682 msng_filenode_lst = msng_filenode_set[fname].keys()
1678 1683 else:
1679 1684 msng_filenode_lst = []
1680 1685 # If any filenodes are left, generate the group for them,
1681 1686 # otherwise don't bother.
1682 1687 if len(msng_filenode_lst) > 0:
1683 1688 yield changegroup.genchunk(fname)
1684 1689 # Sort the filenodes by their revision #
1685 1690 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1686 1691 # Create a group generator and only pass in a changenode
1687 1692 # lookup function as we need to collect no information
1688 1693 # from filenodes.
1689 1694 group = filerevlog.group(msng_filenode_lst,
1690 1695 lookup_filenode_link_func(fname))
1691 1696 for chnk in group:
1692 1697 yield chnk
1693 1698 if msng_filenode_set.has_key(fname):
1694 1699 # Don't need this anymore, toss it to free memory.
1695 1700 del msng_filenode_set[fname]
1696 1701 # Signal that no more groups are left.
1697 1702 yield changegroup.closechunk()
1698 1703
1699 1704 if msng_cl_lst:
1700 1705 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1701 1706
1702 1707 return util.chunkbuffer(gengroup())
1703 1708
1704 1709 def changegroup(self, basenodes, source):
1705 1710 """Generate a changegroup of all nodes that we have that a recipient
1706 1711 doesn't.
1707 1712
1708 1713 This is much easier than the previous function as we can assume that
1709 1714 the recipient has any changenode we aren't sending them."""
1710 1715
1711 1716 self.hook('preoutgoing', throw=True, source=source)
1712 1717
1713 1718 cl = self.changelog
1714 1719 nodes = cl.nodesbetween(basenodes, None)[0]
1715 1720 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1716 1721 self.changegroupinfo(nodes)
1717 1722
1718 1723 def identity(x):
1719 1724 return x
1720 1725
1721 1726 def gennodelst(revlog):
1722 1727 for r in xrange(0, revlog.count()):
1723 1728 n = revlog.node(r)
1724 1729 if revlog.linkrev(n) in revset:
1725 1730 yield n
1726 1731
1727 1732 def changed_file_collector(changedfileset):
1728 1733 def collect_changed_files(clnode):
1729 1734 c = cl.read(clnode)
1730 1735 for fname in c[3]:
1731 1736 changedfileset[fname] = 1
1732 1737 return collect_changed_files
1733 1738
1734 1739 def lookuprevlink_func(revlog):
1735 1740 def lookuprevlink(n):
1736 1741 return cl.node(revlog.linkrev(n))
1737 1742 return lookuprevlink
1738 1743
1739 1744 def gengroup():
1740 1745 # construct a list of all changed files
1741 1746 changedfiles = {}
1742 1747
1743 1748 for chnk in cl.group(nodes, identity,
1744 1749 changed_file_collector(changedfiles)):
1745 1750 yield chnk
1746 1751 changedfiles = changedfiles.keys()
1747 1752 changedfiles.sort()
1748 1753
1749 1754 mnfst = self.manifest
1750 1755 nodeiter = gennodelst(mnfst)
1751 1756 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1752 1757 yield chnk
1753 1758
1754 1759 for fname in changedfiles:
1755 1760 filerevlog = self.file(fname)
1756 1761 nodeiter = gennodelst(filerevlog)
1757 1762 nodeiter = list(nodeiter)
1758 1763 if nodeiter:
1759 1764 yield changegroup.genchunk(fname)
1760 1765 lookup = lookuprevlink_func(filerevlog)
1761 1766 for chnk in filerevlog.group(nodeiter, lookup):
1762 1767 yield chnk
1763 1768
1764 1769 yield changegroup.closechunk()
1765 1770
1766 1771 if nodes:
1767 1772 self.hook('outgoing', node=hex(nodes[0]), source=source)
1768 1773
1769 1774 return util.chunkbuffer(gengroup())
1770 1775
1771 1776 def addchangegroup(self, source, srctype, url):
1772 1777 """add changegroup to repo.
1773 1778
1774 1779 return values:
1775 1780 - nothing changed or no source: 0
1776 1781 - more heads than before: 1+added heads (2..n)
1777 1782 - less heads than before: -1-removed heads (-2..-n)
1778 1783 - number of heads stays the same: 1
1779 1784 """
1780 1785 def csmap(x):
1781 1786 self.ui.debug(_("add changeset %s\n") % short(x))
1782 1787 return cl.count()
1783 1788
1784 1789 def revmap(x):
1785 1790 return cl.rev(x)
1786 1791
1787 1792 if not source:
1788 1793 return 0
1789 1794
1790 1795 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1791 1796
1792 1797 changesets = files = revisions = 0
1793 1798
1794 1799 tr = self.transaction()
1795 1800
1796 1801 # write changelog data to temp files so concurrent readers will not see
1797 1802 # inconsistent view
1798 1803 cl = self.changelog
1799 1804 cl.delayupdate()
1800 1805 oldheads = len(cl.heads())
1801 1806
1802 1807 # pull off the changeset group
1803 1808 self.ui.status(_("adding changesets\n"))
1804 1809 cor = cl.count() - 1
1805 1810 chunkiter = changegroup.chunkiter(source)
1806 1811 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1807 1812 raise util.Abort(_("received changelog group is empty"))
1808 1813 cnr = cl.count() - 1
1809 1814 changesets = cnr - cor
1810 1815
1811 1816 # pull off the manifest group
1812 1817 self.ui.status(_("adding manifests\n"))
1813 1818 chunkiter = changegroup.chunkiter(source)
1814 1819 # no need to check for empty manifest group here:
1815 1820 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1816 1821 # no new manifest will be created and the manifest group will
1817 1822 # be empty during the pull
1818 1823 self.manifest.addgroup(chunkiter, revmap, tr)
1819 1824
1820 1825 # process the files
1821 1826 self.ui.status(_("adding file changes\n"))
1822 1827 while 1:
1823 1828 f = changegroup.getchunk(source)
1824 1829 if not f:
1825 1830 break
1826 1831 self.ui.debug(_("adding %s revisions\n") % f)
1827 1832 fl = self.file(f)
1828 1833 o = fl.count()
1829 1834 chunkiter = changegroup.chunkiter(source)
1830 1835 if fl.addgroup(chunkiter, revmap, tr) is None:
1831 1836 raise util.Abort(_("received file revlog group is empty"))
1832 1837 revisions += fl.count() - o
1833 1838 files += 1
1834 1839
1835 1840 # make changelog see real files again
1836 1841 cl.finalize(tr)
1837 1842
1838 1843 newheads = len(self.changelog.heads())
1839 1844 heads = ""
1840 1845 if oldheads and newheads != oldheads:
1841 1846 heads = _(" (%+d heads)") % (newheads - oldheads)
1842 1847
1843 1848 self.ui.status(_("added %d changesets"
1844 1849 " with %d changes to %d files%s\n")
1845 1850 % (changesets, revisions, files, heads))
1846 1851
1847 1852 if changesets > 0:
1848 1853 self.hook('pretxnchangegroup', throw=True,
1849 1854 node=hex(self.changelog.node(cor+1)), source=srctype,
1850 1855 url=url)
1851 1856
1852 1857 tr.close()
1853 1858
1854 1859 if changesets > 0:
1855 1860 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1856 1861 source=srctype, url=url)
1857 1862
1858 1863 for i in xrange(cor + 1, cnr + 1):
1859 1864 self.hook("incoming", node=hex(self.changelog.node(i)),
1860 1865 source=srctype, url=url)
1861 1866
1862 1867 # never return 0 here:
1863 1868 if newheads < oldheads:
1864 1869 return newheads - oldheads - 1
1865 1870 else:
1866 1871 return newheads - oldheads + 1
1867 1872
1868 1873
1869 1874 def stream_in(self, remote):
1870 1875 fp = remote.stream_out()
1871 1876 l = fp.readline()
1872 1877 try:
1873 1878 resp = int(l)
1874 1879 except ValueError:
1875 1880 raise util.UnexpectedOutput(
1876 1881 _('Unexpected response from remote server:'), l)
1877 1882 if resp == 1:
1878 1883 raise util.Abort(_('operation forbidden by server'))
1879 1884 elif resp == 2:
1880 1885 raise util.Abort(_('locking the remote repository failed'))
1881 1886 elif resp != 0:
1882 1887 raise util.Abort(_('the server sent an unknown error code'))
1883 1888 self.ui.status(_('streaming all changes\n'))
1884 1889 l = fp.readline()
1885 1890 try:
1886 1891 total_files, total_bytes = map(int, l.split(' ', 1))
1887 1892 except ValueError, TypeError:
1888 1893 raise util.UnexpectedOutput(
1889 1894 _('Unexpected response from remote server:'), l)
1890 1895 self.ui.status(_('%d files to transfer, %s of data\n') %
1891 1896 (total_files, util.bytecount(total_bytes)))
1892 1897 start = time.time()
1893 1898 for i in xrange(total_files):
1894 1899 # XXX doesn't support '\n' or '\r' in filenames
1895 1900 l = fp.readline()
1896 1901 try:
1897 1902 name, size = l.split('\0', 1)
1898 1903 size = int(size)
1899 1904 except ValueError, TypeError:
1900 1905 raise util.UnexpectedOutput(
1901 1906 _('Unexpected response from remote server:'), l)
1902 1907 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1903 1908 ofp = self.sopener(name, 'w')
1904 1909 for chunk in util.filechunkiter(fp, limit=size):
1905 1910 ofp.write(chunk)
1906 1911 ofp.close()
1907 1912 elapsed = time.time() - start
1908 1913 if elapsed <= 0:
1909 1914 elapsed = 0.001
1910 1915 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1911 1916 (util.bytecount(total_bytes), elapsed,
1912 1917 util.bytecount(total_bytes / elapsed)))
1913 1918 self.invalidate()
1914 1919 return len(self.heads()) + 1
1915 1920
1916 1921 def clone(self, remote, heads=[], stream=False):
1917 1922 '''clone remote repository.
1918 1923
1919 1924 keyword arguments:
1920 1925 heads: list of revs to clone (forces use of pull)
1921 1926 stream: use streaming clone if possible'''
1922 1927
1923 1928 # now, all clients that can request uncompressed clones can
1924 1929 # read repo formats supported by all servers that can serve
1925 1930 # them.
1926 1931
1927 1932 # if revlog format changes, client will have to check version
1928 1933 # and format flags on "stream" capability, and use
1929 1934 # uncompressed only if compatible.
1930 1935
1931 1936 if stream and not heads and remote.capable('stream'):
1932 1937 return self.stream_in(remote)
1933 1938 return self.pull(remote, heads)
1934 1939
1935 1940 # used to avoid circular references so destructors work
1936 1941 def aftertrans(files):
1937 1942 renamefiles = [tuple(t) for t in files]
1938 1943 def a():
1939 1944 for src, dest in renamefiles:
1940 1945 util.rename(src, dest)
1941 1946 return a
1942 1947
1943 1948 def instance(ui, path, create):
1944 1949 return localrepository(ui, util.drop_scheme('file', path), create)
1945 1950
1946 1951 def islocal(path):
1947 1952 return True
General Comments 0
You need to be logged in to leave comments. Login now