##// END OF EJS Templates
rename and simplify do_lock
Matt Mackall -
r4913:46e39935 default
parent child Browse files
Show More
@@ -1,1952 +1,1950 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.path = path
24 24 self.root = os.path.realpath(path)
25 25 self.path = os.path.join(self.root, ".hg")
26 26 self.origroot = path
27 27 self.opener = util.opener(self.path)
28 28 self.wopener = util.opener(self.root)
29 29
30 30 if not os.path.isdir(self.path):
31 31 if create:
32 32 if not os.path.exists(path):
33 33 os.mkdir(path)
34 34 os.mkdir(self.path)
35 35 requirements = ["revlogv1"]
36 36 if parentui.configbool('format', 'usestore', True):
37 37 os.mkdir(os.path.join(self.path, "store"))
38 38 requirements.append("store")
39 39 # create an invalid changelog
40 40 self.opener("00changelog.i", "a").write(
41 41 '\0\0\0\2' # represents revlogv2
42 42 ' dummy changelog to prevent using the old repo layout'
43 43 )
44 44 reqfile = self.opener("requires", "w")
45 45 for r in requirements:
46 46 reqfile.write("%s\n" % r)
47 47 reqfile.close()
48 48 else:
49 49 raise repo.RepoError(_("repository %s not found") % path)
50 50 elif create:
51 51 raise repo.RepoError(_("repository %s already exists") % path)
52 52 else:
53 53 # find requirements
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 requirements = []
60 60 # check them
61 61 for r in requirements:
62 62 if r not in self.supported:
63 63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64 64
65 65 # setup store
66 66 if "store" in requirements:
67 67 self.encodefn = util.encodefilename
68 68 self.decodefn = util.decodefilename
69 69 self.spath = os.path.join(self.path, "store")
70 70 else:
71 71 self.encodefn = lambda x: x
72 72 self.decodefn = lambda x: x
73 73 self.spath = self.path
74 74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75 75
76 76 self.ui = ui.ui(parentui=parentui)
77 77 try:
78 78 self.ui.readconfig(self.join("hgrc"), self.root)
79 79 extensions.loadall(self.ui)
80 80 except IOError:
81 81 pass
82 82
83 83 self.tagscache = None
84 84 self.branchcache = None
85 85 self.nodetagscache = None
86 86 self.filterpats = {}
87 87 self.transhandle = None
88 88
89 89 def __getattr__(self, name):
90 90 if name == 'changelog':
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 return self.changelog
94 94 if name == 'manifest':
95 95 self.changelog
96 96 self.manifest = manifest.manifest(self.sopener)
97 97 return self.manifest
98 98 if name == 'dirstate':
99 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 100 return self.dirstate
101 101 else:
102 102 raise AttributeError, name
103 103
104 104 def url(self):
105 105 return 'file:' + self.root
106 106
107 107 def hook(self, name, throw=False, **args):
108 108 return hook.hook(self.ui, self, name, throw, **args)
109 109
110 110 tag_disallowed = ':\r\n'
111 111
112 112 def _tag(self, name, node, message, local, user, date, parent=None,
113 113 extra={}):
114 114 use_dirstate = parent is None
115 115
116 116 for c in self.tag_disallowed:
117 117 if c in name:
118 118 raise util.Abort(_('%r cannot be used in a tag name') % c)
119 119
120 120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
121 121
122 122 def writetag(fp, name, munge, prevtags):
123 123 if prevtags and prevtags[-1] != '\n':
124 124 fp.write('\n')
125 125 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
126 126 fp.close()
127 127 self.hook('tag', node=hex(node), tag=name, local=local)
128 128
129 129 prevtags = ''
130 130 if local:
131 131 try:
132 132 fp = self.opener('localtags', 'r+')
133 133 except IOError, err:
134 134 fp = self.opener('localtags', 'a')
135 135 else:
136 136 prevtags = fp.read()
137 137
138 138 # local tags are stored in the current charset
139 139 writetag(fp, name, None, prevtags)
140 140 return
141 141
142 142 if use_dirstate:
143 143 try:
144 144 fp = self.wfile('.hgtags', 'rb+')
145 145 except IOError, err:
146 146 fp = self.wfile('.hgtags', 'ab')
147 147 else:
148 148 prevtags = fp.read()
149 149 else:
150 150 try:
151 151 prevtags = self.filectx('.hgtags', parent).data()
152 152 except revlog.LookupError:
153 153 pass
154 154 fp = self.wfile('.hgtags', 'wb')
155 155
156 156 # committed tags are stored in UTF-8
157 157 writetag(fp, name, util.fromlocal, prevtags)
158 158
159 159 if use_dirstate and '.hgtags' not in self.dirstate:
160 160 self.add(['.hgtags'])
161 161
162 162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 163 extra=extra)
164 164
165 165 self.hook('tag', node=hex(node), tag=name, local=local)
166 166
167 167 return tagnode
168 168
169 169 def tag(self, name, node, message, local, user, date):
170 170 '''tag a revision with a symbolic name.
171 171
172 172 if local is True, the tag is stored in a per-repository file.
173 173 otherwise, it is stored in the .hgtags file, and a new
174 174 changeset is committed with the change.
175 175
176 176 keyword arguments:
177 177
178 178 local: whether to store tag in non-version-controlled file
179 179 (default False)
180 180
181 181 message: commit message to use if committing
182 182
183 183 user: name of user to use if committing
184 184
185 185 date: date tuple to use if committing'''
186 186
187 187 for x in self.status()[:5]:
188 188 if '.hgtags' in x:
189 189 raise util.Abort(_('working copy of .hgtags is changed '
190 190 '(please commit .hgtags manually)'))
191 191
192 192
193 193 self._tag(name, node, message, local, user, date)
194 194
195 195 def tags(self):
196 196 '''return a mapping of tag to node'''
197 197 if self.tagscache:
198 198 return self.tagscache
199 199
200 200 globaltags = {}
201 201
202 202 def readtags(lines, fn):
203 203 filetags = {}
204 204 count = 0
205 205
206 206 def warn(msg):
207 207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208 208
209 209 for l in lines:
210 210 count += 1
211 211 if not l:
212 212 continue
213 213 s = l.split(" ", 1)
214 214 if len(s) != 2:
215 215 warn(_("cannot parse entry"))
216 216 continue
217 217 node, key = s
218 218 key = util.tolocal(key.strip()) # stored in UTF-8
219 219 try:
220 220 bin_n = bin(node)
221 221 except TypeError:
222 222 warn(_("node '%s' is not well formed") % node)
223 223 continue
224 224 if bin_n not in self.changelog.nodemap:
225 225 warn(_("tag '%s' refers to unknown node") % key)
226 226 continue
227 227
228 228 h = []
229 229 if key in filetags:
230 230 n, h = filetags[key]
231 231 h.append(n)
232 232 filetags[key] = (bin_n, h)
233 233
234 234 for k, nh in filetags.items():
235 235 if k not in globaltags:
236 236 globaltags[k] = nh
237 237 continue
238 238 # we prefer the global tag if:
239 239 # it supercedes us OR
240 240 # mutual supercedes and it has a higher rank
241 241 # otherwise we win because we're tip-most
242 242 an, ah = nh
243 243 bn, bh = globaltags[k]
244 244 if (bn != an and an in bh and
245 245 (bn not in ah or len(bh) > len(ah))):
246 246 an = bn
247 247 ah.extend([n for n in bh if n not in ah])
248 248 globaltags[k] = an, ah
249 249
250 250 # read the tags file from each head, ending with the tip
251 251 f = None
252 252 for rev, node, fnode in self._hgtagsnodes():
253 253 f = (f and f.filectx(fnode) or
254 254 self.filectx('.hgtags', fileid=fnode))
255 255 readtags(f.data().splitlines(), f)
256 256
257 257 try:
258 258 data = util.fromlocal(self.opener("localtags").read())
259 259 # localtags are stored in the local character set
260 260 # while the internal tag table is stored in UTF-8
261 261 readtags(data.splitlines(), "localtags")
262 262 except IOError:
263 263 pass
264 264
265 265 self.tagscache = {}
266 266 for k,nh in globaltags.items():
267 267 n = nh[0]
268 268 if n != nullid:
269 269 self.tagscache[k] = n
270 270 self.tagscache['tip'] = self.changelog.tip()
271 271
272 272 return self.tagscache
273 273
274 274 def _hgtagsnodes(self):
275 275 heads = self.heads()
276 276 heads.reverse()
277 277 last = {}
278 278 ret = []
279 279 for node in heads:
280 280 c = self.changectx(node)
281 281 rev = c.rev()
282 282 try:
283 283 fnode = c.filenode('.hgtags')
284 284 except revlog.LookupError:
285 285 continue
286 286 ret.append((rev, node, fnode))
287 287 if fnode in last:
288 288 ret[last[fnode]] = None
289 289 last[fnode] = len(ret) - 1
290 290 return [item for item in ret if item]
291 291
292 292 def tagslist(self):
293 293 '''return a list of tags ordered by revision'''
294 294 l = []
295 295 for t, n in self.tags().items():
296 296 try:
297 297 r = self.changelog.rev(n)
298 298 except:
299 299 r = -2 # sort to the beginning of the list if unknown
300 300 l.append((r, t, n))
301 301 l.sort()
302 302 return [(t, n) for r, t, n in l]
303 303
304 304 def nodetags(self, node):
305 305 '''return the tags associated with a node'''
306 306 if not self.nodetagscache:
307 307 self.nodetagscache = {}
308 308 for t, n in self.tags().items():
309 309 self.nodetagscache.setdefault(n, []).append(t)
310 310 return self.nodetagscache.get(node, [])
311 311
312 312 def _branchtags(self):
313 313 partial, last, lrev = self._readbranchcache()
314 314
315 315 tiprev = self.changelog.count() - 1
316 316 if lrev != tiprev:
317 317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 319
320 320 return partial
321 321
322 322 def branchtags(self):
323 323 if self.branchcache is not None:
324 324 return self.branchcache
325 325
326 326 self.branchcache = {} # avoid recursion in changectx
327 327 partial = self._branchtags()
328 328
329 329 # the branch cache is stored on disk as UTF-8, but in the local
330 330 # charset internally
331 331 for k, v in partial.items():
332 332 self.branchcache[util.tolocal(k)] = v
333 333 return self.branchcache
334 334
335 335 def _readbranchcache(self):
336 336 partial = {}
337 337 try:
338 338 f = self.opener("branch.cache")
339 339 lines = f.read().split('\n')
340 340 f.close()
341 341 except (IOError, OSError):
342 342 return {}, nullid, nullrev
343 343
344 344 try:
345 345 last, lrev = lines.pop(0).split(" ", 1)
346 346 last, lrev = bin(last), int(lrev)
347 347 if not (lrev < self.changelog.count() and
348 348 self.changelog.node(lrev) == last): # sanity check
349 349 # invalidate the cache
350 350 raise ValueError('Invalid branch cache: unknown tip')
351 351 for l in lines:
352 352 if not l: continue
353 353 node, label = l.split(" ", 1)
354 354 partial[label.strip()] = bin(node)
355 355 except (KeyboardInterrupt, util.SignalInterrupt):
356 356 raise
357 357 except Exception, inst:
358 358 if self.ui.debugflag:
359 359 self.ui.warn(str(inst), '\n')
360 360 partial, last, lrev = {}, nullid, nullrev
361 361 return partial, last, lrev
362 362
363 363 def _writebranchcache(self, branches, tip, tiprev):
364 364 try:
365 365 f = self.opener("branch.cache", "w", atomictemp=True)
366 366 f.write("%s %s\n" % (hex(tip), tiprev))
367 367 for label, node in branches.iteritems():
368 368 f.write("%s %s\n" % (hex(node), label))
369 369 f.rename()
370 370 except (IOError, OSError):
371 371 pass
372 372
373 373 def _updatebranchcache(self, partial, start, end):
374 374 for r in xrange(start, end):
375 375 c = self.changectx(r)
376 376 b = c.branch()
377 377 partial[b] = c.node()
378 378
379 379 def lookup(self, key):
380 380 if key == '.':
381 381 key, second = self.dirstate.parents()
382 382 if key == nullid:
383 383 raise repo.RepoError(_("no revision checked out"))
384 384 if second != nullid:
385 385 self.ui.warn(_("warning: working directory has two parents, "
386 386 "tag '.' uses the first\n"))
387 387 elif key == 'null':
388 388 return nullid
389 389 n = self.changelog._match(key)
390 390 if n:
391 391 return n
392 392 if key in self.tags():
393 393 return self.tags()[key]
394 394 if key in self.branchtags():
395 395 return self.branchtags()[key]
396 396 n = self.changelog._partialmatch(key)
397 397 if n:
398 398 return n
399 399 raise repo.RepoError(_("unknown revision '%s'") % key)
400 400
401 401 def dev(self):
402 402 return os.lstat(self.path).st_dev
403 403
404 404 def local(self):
405 405 return True
406 406
407 407 def join(self, f):
408 408 return os.path.join(self.path, f)
409 409
410 410 def sjoin(self, f):
411 411 f = self.encodefn(f)
412 412 return os.path.join(self.spath, f)
413 413
414 414 def wjoin(self, f):
415 415 return os.path.join(self.root, f)
416 416
417 417 def file(self, f):
418 418 if f[0] == '/':
419 419 f = f[1:]
420 420 return filelog.filelog(self.sopener, f)
421 421
422 422 def changectx(self, changeid=None):
423 423 return context.changectx(self, changeid)
424 424
425 425 def workingctx(self):
426 426 return context.workingctx(self)
427 427
428 428 def parents(self, changeid=None):
429 429 '''
430 430 get list of changectxs for parents of changeid or working directory
431 431 '''
432 432 if changeid is None:
433 433 pl = self.dirstate.parents()
434 434 else:
435 435 n = self.changelog.lookup(changeid)
436 436 pl = self.changelog.parents(n)
437 437 if pl[1] == nullid:
438 438 return [self.changectx(pl[0])]
439 439 return [self.changectx(pl[0]), self.changectx(pl[1])]
440 440
441 441 def filectx(self, path, changeid=None, fileid=None):
442 442 """changeid can be a changeset revision, node, or tag.
443 443 fileid can be a file revision or node."""
444 444 return context.filectx(self, path, changeid, fileid)
445 445
446 446 def getcwd(self):
447 447 return self.dirstate.getcwd()
448 448
449 449 def pathto(self, f, cwd=None):
450 450 return self.dirstate.pathto(f, cwd)
451 451
452 452 def wfile(self, f, mode='r'):
453 453 return self.wopener(f, mode)
454 454
455 455 def _link(self, f):
456 456 return os.path.islink(self.wjoin(f))
457 457
458 458 def _filter(self, filter, filename, data):
459 459 if filter not in self.filterpats:
460 460 l = []
461 461 for pat, cmd in self.ui.configitems(filter):
462 462 mf = util.matcher(self.root, "", [pat], [], [])[1]
463 463 l.append((mf, cmd))
464 464 self.filterpats[filter] = l
465 465
466 466 for mf, cmd in self.filterpats[filter]:
467 467 if mf(filename):
468 468 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
469 469 data = util.filter(data, cmd)
470 470 break
471 471
472 472 return data
473 473
474 474 def wread(self, filename):
475 475 if self._link(filename):
476 476 data = os.readlink(self.wjoin(filename))
477 477 else:
478 478 data = self.wopener(filename, 'r').read()
479 479 return self._filter("encode", filename, data)
480 480
481 481 def wwrite(self, filename, data, flags):
482 482 data = self._filter("decode", filename, data)
483 483 if "l" in flags:
484 484 self.wopener.symlink(data, filename)
485 485 else:
486 486 try:
487 487 if self._link(filename):
488 488 os.unlink(self.wjoin(filename))
489 489 except OSError:
490 490 pass
491 491 self.wopener(filename, 'w').write(data)
492 492 util.set_exec(self.wjoin(filename), "x" in flags)
493 493
494 494 def wwritedata(self, filename, data):
495 495 return self._filter("decode", filename, data)
496 496
497 497 def transaction(self):
498 498 tr = self.transhandle
499 499 if tr != None and tr.running():
500 500 return tr.nest()
501 501
502 502 # save dirstate for rollback
503 503 try:
504 504 ds = self.opener("dirstate").read()
505 505 except IOError:
506 506 ds = ""
507 507 self.opener("journal.dirstate", "w").write(ds)
508 508
509 509 renames = [(self.sjoin("journal"), self.sjoin("undo")),
510 510 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
511 511 tr = transaction.transaction(self.ui.warn, self.sopener,
512 512 self.sjoin("journal"),
513 513 aftertrans(renames))
514 514 self.transhandle = tr
515 515 return tr
516 516
517 517 def recover(self):
518 518 l = self.lock()
519 519 if os.path.exists(self.sjoin("journal")):
520 520 self.ui.status(_("rolling back interrupted transaction\n"))
521 521 transaction.rollback(self.sopener, self.sjoin("journal"))
522 522 self.invalidate()
523 523 return True
524 524 else:
525 525 self.ui.warn(_("no interrupted transaction available\n"))
526 526 return False
527 527
528 528 def rollback(self, wlock=None, lock=None):
529 529 if not wlock:
530 530 wlock = self.wlock()
531 531 if not lock:
532 532 lock = self.lock()
533 533 if os.path.exists(self.sjoin("undo")):
534 534 self.ui.status(_("rolling back last transaction\n"))
535 535 transaction.rollback(self.sopener, self.sjoin("undo"))
536 536 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
537 537 self.invalidate()
538 538 self.dirstate.invalidate()
539 539 else:
540 540 self.ui.warn(_("no rollback information available\n"))
541 541
542 542 def invalidate(self):
543 543 for a in "changelog manifest".split():
544 544 if hasattr(self, a):
545 545 self.__delattr__(a)
546 546 self.tagscache = None
547 547 self.nodetagscache = None
548 548
549 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
550 desc=None):
549 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
551 550 try:
552 551 l = lock.lock(lockname, 0, releasefn, desc=desc)
553 552 except lock.LockHeld, inst:
554 553 if not wait:
555 554 raise
556 555 self.ui.warn(_("waiting for lock on %s held by %r\n") %
557 556 (desc, inst.locker))
558 557 # default to 600 seconds timeout
559 558 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
560 559 releasefn, desc=desc)
561 560 if acquirefn:
562 561 acquirefn()
563 562 return l
564 563
565 564 def lock(self, wait=1):
566 return self.do_lock(self.sjoin("lock"), wait,
567 acquirefn=self.invalidate,
568 desc=_('repository %s') % self.origroot)
565 return self._lock(self.sjoin("lock"), wait, None, self.invalidate,
566 _('repository %s') % self.origroot)
569 567
570 568 def wlock(self, wait=1):
571 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
572 self.dirstate.invalidate,
573 desc=_('working directory of %s') % self.origroot)
569 return self._lock(self.join("wlock"), wait, self.dirstate.write,
570 self.dirstate.invalidate,
571 _('working directory of %s') % self.origroot)
574 572
575 573 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
576 574 """
577 575 commit an individual file as part of a larger transaction
578 576 """
579 577
580 578 t = self.wread(fn)
581 579 fl = self.file(fn)
582 580 fp1 = manifest1.get(fn, nullid)
583 581 fp2 = manifest2.get(fn, nullid)
584 582
585 583 meta = {}
586 584 cp = self.dirstate.copied(fn)
587 585 if cp:
588 586 # Mark the new revision of this file as a copy of another
589 587 # file. This copy data will effectively act as a parent
590 588 # of this new revision. If this is a merge, the first
591 589 # parent will be the nullid (meaning "look up the copy data")
592 590 # and the second one will be the other parent. For example:
593 591 #
594 592 # 0 --- 1 --- 3 rev1 changes file foo
595 593 # \ / rev2 renames foo to bar and changes it
596 594 # \- 2 -/ rev3 should have bar with all changes and
597 595 # should record that bar descends from
598 596 # bar in rev2 and foo in rev1
599 597 #
600 598 # this allows this merge to succeed:
601 599 #
602 600 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
603 601 # \ / merging rev3 and rev4 should use bar@rev2
604 602 # \- 2 --- 4 as the merge base
605 603 #
606 604 meta["copy"] = cp
607 605 if not manifest2: # not a branch merge
608 606 meta["copyrev"] = hex(manifest1.get(cp, nullid))
609 607 fp2 = nullid
610 608 elif fp2 != nullid: # copied on remote side
611 609 meta["copyrev"] = hex(manifest1.get(cp, nullid))
612 610 elif fp1 != nullid: # copied on local side, reversed
613 611 meta["copyrev"] = hex(manifest2.get(cp))
614 612 fp2 = fp1
615 613 else: # directory rename
616 614 meta["copyrev"] = hex(manifest1.get(cp, nullid))
617 615 self.ui.debug(_(" %s: copy %s:%s\n") %
618 616 (fn, cp, meta["copyrev"]))
619 617 fp1 = nullid
620 618 elif fp2 != nullid:
621 619 # is one parent an ancestor of the other?
622 620 fpa = fl.ancestor(fp1, fp2)
623 621 if fpa == fp1:
624 622 fp1, fp2 = fp2, nullid
625 623 elif fpa == fp2:
626 624 fp2 = nullid
627 625
628 626 # is the file unmodified from the parent? report existing entry
629 627 if fp2 == nullid and not fl.cmp(fp1, t):
630 628 return fp1
631 629
632 630 changelist.append(fn)
633 631 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
634 632
635 633 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
636 634 if p1 is None:
637 635 p1, p2 = self.dirstate.parents()
638 636 return self.commit(files=files, text=text, user=user, date=date,
639 637 p1=p1, p2=p2, wlock=wlock, extra=extra)
640 638
641 639 def commit(self, files=None, text="", user=None, date=None,
642 640 match=util.always, force=False, lock=None, wlock=None,
643 641 force_editor=False, p1=None, p2=None, extra={}):
644 642
645 643 commit = []
646 644 remove = []
647 645 changed = []
648 646 use_dirstate = (p1 is None) # not rawcommit
649 647 extra = extra.copy()
650 648
651 649 if use_dirstate:
652 650 if files:
653 651 for f in files:
654 652 s = self.dirstate[f]
655 653 if s in 'nma':
656 654 commit.append(f)
657 655 elif s == 'r':
658 656 remove.append(f)
659 657 else:
660 658 self.ui.warn(_("%s not tracked!\n") % f)
661 659 else:
662 660 changes = self.status(match=match)[:5]
663 661 modified, added, removed, deleted, unknown = changes
664 662 commit = modified + added
665 663 remove = removed
666 664 else:
667 665 commit = files
668 666
669 667 if use_dirstate:
670 668 p1, p2 = self.dirstate.parents()
671 669 update_dirstate = True
672 670 else:
673 671 p1, p2 = p1, p2 or nullid
674 672 update_dirstate = (self.dirstate.parents()[0] == p1)
675 673
676 674 c1 = self.changelog.read(p1)
677 675 c2 = self.changelog.read(p2)
678 676 m1 = self.manifest.read(c1[0]).copy()
679 677 m2 = self.manifest.read(c2[0])
680 678
681 679 if use_dirstate:
682 680 branchname = self.workingctx().branch()
683 681 try:
684 682 branchname = branchname.decode('UTF-8').encode('UTF-8')
685 683 except UnicodeDecodeError:
686 684 raise util.Abort(_('branch name not in UTF-8!'))
687 685 else:
688 686 branchname = ""
689 687
690 688 if use_dirstate:
691 689 oldname = c1[5].get("branch") # stored in UTF-8
692 690 if (not commit and not remove and not force and p2 == nullid
693 691 and branchname == oldname):
694 692 self.ui.status(_("nothing changed\n"))
695 693 return None
696 694
697 695 xp1 = hex(p1)
698 696 if p2 == nullid: xp2 = ''
699 697 else: xp2 = hex(p2)
700 698
701 699 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
702 700
703 701 if not wlock:
704 702 wlock = self.wlock()
705 703 if not lock:
706 704 lock = self.lock()
707 705 tr = self.transaction()
708 706
709 707 # check in files
710 708 new = {}
711 709 linkrev = self.changelog.count()
712 710 commit.sort()
713 711 is_exec = util.execfunc(self.root, m1.execf)
714 712 is_link = util.linkfunc(self.root, m1.linkf)
715 713 for f in commit:
716 714 self.ui.note(f + "\n")
717 715 try:
718 716 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
719 717 new_exec = is_exec(f)
720 718 new_link = is_link(f)
721 719 if not changed or changed[-1] != f:
722 720 # mention the file in the changelog if some flag changed,
723 721 # even if there was no content change.
724 722 old_exec = m1.execf(f)
725 723 old_link = m1.linkf(f)
726 724 if old_exec != new_exec or old_link != new_link:
727 725 changed.append(f)
728 726 m1.set(f, new_exec, new_link)
729 727 except (OSError, IOError):
730 728 if use_dirstate:
731 729 self.ui.warn(_("trouble committing %s!\n") % f)
732 730 raise
733 731 else:
734 732 remove.append(f)
735 733
736 734 # update manifest
737 735 m1.update(new)
738 736 remove.sort()
739 737 removed = []
740 738
741 739 for f in remove:
742 740 if f in m1:
743 741 del m1[f]
744 742 removed.append(f)
745 743 elif f in m2:
746 744 removed.append(f)
747 745 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
748 746
749 747 # add changeset
750 748 new = new.keys()
751 749 new.sort()
752 750
753 751 user = user or self.ui.username()
754 752 if not text or force_editor:
755 753 edittext = []
756 754 if text:
757 755 edittext.append(text)
758 756 edittext.append("")
759 757 edittext.append("HG: user: %s" % user)
760 758 if p2 != nullid:
761 759 edittext.append("HG: branch merge")
762 760 if branchname:
763 761 edittext.append("HG: branch %s" % util.tolocal(branchname))
764 762 edittext.extend(["HG: changed %s" % f for f in changed])
765 763 edittext.extend(["HG: removed %s" % f for f in removed])
766 764 if not changed and not remove:
767 765 edittext.append("HG: no files changed")
768 766 edittext.append("")
769 767 # run editor in the repository root
770 768 olddir = os.getcwd()
771 769 os.chdir(self.root)
772 770 text = self.ui.edit("\n".join(edittext), user)
773 771 os.chdir(olddir)
774 772
775 773 lines = [line.rstrip() for line in text.rstrip().splitlines()]
776 774 while lines and not lines[0]:
777 775 del lines[0]
778 776 if not lines:
779 777 return None
780 778 text = '\n'.join(lines)
781 779 if branchname:
782 780 extra["branch"] = branchname
783 781 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
784 782 user, date, extra)
785 783 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
786 784 parent2=xp2)
787 785 tr.close()
788 786
789 787 if self.branchcache and "branch" in extra:
790 788 self.branchcache[util.tolocal(extra["branch"])] = n
791 789
792 790 if use_dirstate or update_dirstate:
793 791 self.dirstate.setparents(n)
794 792 if use_dirstate:
795 793 for f in new:
796 794 self.dirstate.normal(f)
797 795 for f in removed:
798 796 self.dirstate.forget(f)
799 797
800 798 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
801 799 return n
802 800
803 801 def walk(self, node=None, files=[], match=util.always, badmatch=None):
804 802 '''
805 803 walk recursively through the directory tree or a given
806 804 changeset, finding all files matched by the match
807 805 function
808 806
809 807 results are yielded in a tuple (src, filename), where src
810 808 is one of:
811 809 'f' the file was found in the directory tree
812 810 'm' the file was only in the dirstate and not in the tree
813 811 'b' file was not found and matched badmatch
814 812 '''
815 813
816 814 if node:
817 815 fdict = dict.fromkeys(files)
818 816 # for dirstate.walk, files=['.'] means "walk the whole tree".
819 817 # follow that here, too
820 818 fdict.pop('.', None)
821 819 mdict = self.manifest.read(self.changelog.read(node)[0])
822 820 mfiles = mdict.keys()
823 821 mfiles.sort()
824 822 for fn in mfiles:
825 823 for ffn in fdict:
826 824 # match if the file is the exact name or a directory
827 825 if ffn == fn or fn.startswith("%s/" % ffn):
828 826 del fdict[ffn]
829 827 break
830 828 if match(fn):
831 829 yield 'm', fn
832 830 ffiles = fdict.keys()
833 831 ffiles.sort()
834 832 for fn in ffiles:
835 833 if badmatch and badmatch(fn):
836 834 if match(fn):
837 835 yield 'b', fn
838 836 else:
839 837 self.ui.warn(_('%s: No such file in rev %s\n')
840 838 % (self.pathto(fn), short(node)))
841 839 else:
842 840 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
843 841 yield src, fn
844 842
845 843 def status(self, node1=None, node2=None, files=[], match=util.always,
846 844 wlock=None, list_ignored=False, list_clean=False):
847 845 """return status of files between two nodes or node and working directory
848 846
849 847 If node1 is None, use the first dirstate parent instead.
850 848 If node2 is None, compare node1 with working directory.
851 849 """
852 850
853 851 def fcmp(fn, getnode):
854 852 t1 = self.wread(fn)
855 853 return self.file(fn).cmp(getnode(fn), t1)
856 854
857 855 def mfmatches(node):
858 856 change = self.changelog.read(node)
859 857 mf = self.manifest.read(change[0]).copy()
860 858 for fn in mf.keys():
861 859 if not match(fn):
862 860 del mf[fn]
863 861 return mf
864 862
865 863 modified, added, removed, deleted, unknown = [], [], [], [], []
866 864 ignored, clean = [], []
867 865
868 866 compareworking = False
869 867 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
870 868 compareworking = True
871 869
872 870 if not compareworking:
873 871 # read the manifest from node1 before the manifest from node2,
874 872 # so that we'll hit the manifest cache if we're going through
875 873 # all the revisions in parent->child order.
876 874 mf1 = mfmatches(node1)
877 875
878 876 # are we comparing the working directory?
879 877 if not node2:
880 878 (lookup, modified, added, removed, deleted, unknown,
881 879 ignored, clean) = self.dirstate.status(files, match,
882 880 list_ignored, list_clean)
883 881
884 882 # are we comparing working dir against its parent?
885 883 if compareworking:
886 884 if lookup:
887 885 fixup = []
888 886 # do a full compare of any files that might have changed
889 887 ctx = self.changectx()
890 888 for f in lookup:
891 889 if f not in ctx or ctx[f].cmp(self.wread(f)):
892 890 modified.append(f)
893 891 else:
894 892 fixup.append(f)
895 893 if list_clean:
896 894 clean.append(f)
897 895
898 896 # update dirstate for files that are actually clean
899 897 if fixup:
900 898 cleanup = False
901 899 if not wlock:
902 900 try:
903 901 wlock = self.wlock(wait=0)
904 902 cleanup = True
905 903 except lock.LockException:
906 904 pass
907 905 if wlock:
908 906 for f in fixup:
909 907 self.dirstate.normal(f)
910 908 if cleanup:
911 909 wlock.release()
912 910 else:
913 911 # we are comparing working dir against non-parent
914 912 # generate a pseudo-manifest for the working dir
915 913 # XXX: create it in dirstate.py ?
916 914 mf2 = mfmatches(self.dirstate.parents()[0])
917 915 is_exec = util.execfunc(self.root, mf2.execf)
918 916 is_link = util.linkfunc(self.root, mf2.linkf)
919 917 for f in lookup + modified + added:
920 918 mf2[f] = ""
921 919 mf2.set(f, is_exec(f), is_link(f))
922 920 for f in removed:
923 921 if f in mf2:
924 922 del mf2[f]
925 923
926 924 else:
927 925 # we are comparing two revisions
928 926 mf2 = mfmatches(node2)
929 927
930 928 if not compareworking:
931 929 # flush lists from dirstate before comparing manifests
932 930 modified, added, clean = [], [], []
933 931
934 932 # make sure to sort the files so we talk to the disk in a
935 933 # reasonable order
936 934 mf2keys = mf2.keys()
937 935 mf2keys.sort()
938 936 getnode = lambda fn: mf1.get(fn, nullid)
939 937 for fn in mf2keys:
940 938 if mf1.has_key(fn):
941 939 if (mf1.flags(fn) != mf2.flags(fn) or
942 940 (mf1[fn] != mf2[fn] and
943 941 (mf2[fn] != "" or fcmp(fn, getnode)))):
944 942 modified.append(fn)
945 943 elif list_clean:
946 944 clean.append(fn)
947 945 del mf1[fn]
948 946 else:
949 947 added.append(fn)
950 948
951 949 removed = mf1.keys()
952 950
953 951 # sort and return results:
954 952 for l in modified, added, removed, deleted, unknown, ignored, clean:
955 953 l.sort()
956 954 return (modified, added, removed, deleted, unknown, ignored, clean)
957 955
958 956 def add(self, list, wlock=None):
959 957 if not wlock:
960 958 wlock = self.wlock()
961 959 for f in list:
962 960 p = self.wjoin(f)
963 961 try:
964 962 st = os.lstat(p)
965 963 except:
966 964 self.ui.warn(_("%s does not exist!\n") % f)
967 965 continue
968 966 if st.st_size > 10000000:
969 967 self.ui.warn(_("%s: files over 10MB may cause memory and"
970 968 " performance problems\n"
971 969 "(use 'hg revert %s' to unadd the file)\n")
972 970 % (f, f))
973 971 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
974 972 self.ui.warn(_("%s not added: only files and symlinks "
975 973 "supported currently\n") % f)
976 974 elif self.dirstate[f] in 'an':
977 975 self.ui.warn(_("%s already tracked!\n") % f)
978 976 else:
979 977 self.dirstate.add(f)
980 978
981 979 def forget(self, list, wlock=None):
982 980 if not wlock:
983 981 wlock = self.wlock()
984 982 for f in list:
985 983 if self.dirstate[f] != 'a':
986 984 self.ui.warn(_("%s not added!\n") % f)
987 985 else:
988 986 self.dirstate.forget(f)
989 987
990 988 def remove(self, list, unlink=False, wlock=None):
991 989 if unlink:
992 990 for f in list:
993 991 try:
994 992 util.unlink(self.wjoin(f))
995 993 except OSError, inst:
996 994 if inst.errno != errno.ENOENT:
997 995 raise
998 996 if not wlock:
999 997 wlock = self.wlock()
1000 998 for f in list:
1001 999 if unlink and os.path.exists(self.wjoin(f)):
1002 1000 self.ui.warn(_("%s still exists!\n") % f)
1003 1001 elif self.dirstate[f] == 'a':
1004 1002 self.dirstate.forget(f)
1005 1003 elif f not in self.dirstate:
1006 1004 self.ui.warn(_("%s not tracked!\n") % f)
1007 1005 else:
1008 1006 self.dirstate.remove(f)
1009 1007
1010 1008 def undelete(self, list, wlock=None):
1011 1009 p = self.dirstate.parents()[0]
1012 1010 mn = self.changelog.read(p)[0]
1013 1011 m = self.manifest.read(mn)
1014 1012 if not wlock:
1015 1013 wlock = self.wlock()
1016 1014 for f in list:
1017 1015 if self.dirstate[f] != 'r':
1018 1016 self.ui.warn("%s not removed!\n" % f)
1019 1017 else:
1020 1018 t = self.file(f).read(m[f])
1021 1019 self.wwrite(f, t, m.flags(f))
1022 1020 self.dirstate.normal(f)
1023 1021
1024 1022 def copy(self, source, dest, wlock=None):
1025 1023 p = self.wjoin(dest)
1026 1024 if not (os.path.exists(p) or os.path.islink(p)):
1027 1025 self.ui.warn(_("%s does not exist!\n") % dest)
1028 1026 elif not (os.path.isfile(p) or os.path.islink(p)):
1029 1027 self.ui.warn(_("copy failed: %s is not a file or a "
1030 1028 "symbolic link\n") % dest)
1031 1029 else:
1032 1030 if not wlock:
1033 1031 wlock = self.wlock()
1034 1032 if dest not in self.dirstate:
1035 1033 self.dirstate.add(dest)
1036 1034 self.dirstate.copy(source, dest)
1037 1035
1038 1036 def heads(self, start=None):
1039 1037 heads = self.changelog.heads(start)
1040 1038 # sort the output in rev descending order
1041 1039 heads = [(-self.changelog.rev(h), h) for h in heads]
1042 1040 heads.sort()
1043 1041 return [n for (r, n) in heads]
1044 1042
1045 1043 def branchheads(self, branch, start=None):
1046 1044 branches = self.branchtags()
1047 1045 if branch not in branches:
1048 1046 return []
1049 1047 # The basic algorithm is this:
1050 1048 #
1051 1049 # Start from the branch tip since there are no later revisions that can
1052 1050 # possibly be in this branch, and the tip is a guaranteed head.
1053 1051 #
1054 1052 # Remember the tip's parents as the first ancestors, since these by
1055 1053 # definition are not heads.
1056 1054 #
1057 1055 # Step backwards from the brach tip through all the revisions. We are
1058 1056 # guaranteed by the rules of Mercurial that we will now be visiting the
1059 1057 # nodes in reverse topological order (children before parents).
1060 1058 #
1061 1059 # If a revision is one of the ancestors of a head then we can toss it
1062 1060 # out of the ancestors set (we've already found it and won't be
1063 1061 # visiting it again) and put its parents in the ancestors set.
1064 1062 #
1065 1063 # Otherwise, if a revision is in the branch it's another head, since it
1066 1064 # wasn't in the ancestor list of an existing head. So add it to the
1067 1065 # head list, and add its parents to the ancestor list.
1068 1066 #
1069 1067 # If it is not in the branch ignore it.
1070 1068 #
1071 1069 # Once we have a list of heads, use nodesbetween to filter out all the
1072 1070 # heads that cannot be reached from startrev. There may be a more
1073 1071 # efficient way to do this as part of the previous algorithm.
1074 1072
1075 1073 set = util.set
1076 1074 heads = [self.changelog.rev(branches[branch])]
1077 1075 # Don't care if ancestors contains nullrev or not.
1078 1076 ancestors = set(self.changelog.parentrevs(heads[0]))
1079 1077 for rev in xrange(heads[0] - 1, nullrev, -1):
1080 1078 if rev in ancestors:
1081 1079 ancestors.update(self.changelog.parentrevs(rev))
1082 1080 ancestors.remove(rev)
1083 1081 elif self.changectx(rev).branch() == branch:
1084 1082 heads.append(rev)
1085 1083 ancestors.update(self.changelog.parentrevs(rev))
1086 1084 heads = [self.changelog.node(rev) for rev in heads]
1087 1085 if start is not None:
1088 1086 heads = self.changelog.nodesbetween([start], heads)[2]
1089 1087 return heads
1090 1088
1091 1089 def branches(self, nodes):
1092 1090 if not nodes:
1093 1091 nodes = [self.changelog.tip()]
1094 1092 b = []
1095 1093 for n in nodes:
1096 1094 t = n
1097 1095 while 1:
1098 1096 p = self.changelog.parents(n)
1099 1097 if p[1] != nullid or p[0] == nullid:
1100 1098 b.append((t, n, p[0], p[1]))
1101 1099 break
1102 1100 n = p[0]
1103 1101 return b
1104 1102
1105 1103 def between(self, pairs):
1106 1104 r = []
1107 1105
1108 1106 for top, bottom in pairs:
1109 1107 n, l, i = top, [], 0
1110 1108 f = 1
1111 1109
1112 1110 while n != bottom:
1113 1111 p = self.changelog.parents(n)[0]
1114 1112 if i == f:
1115 1113 l.append(n)
1116 1114 f = f * 2
1117 1115 n = p
1118 1116 i += 1
1119 1117
1120 1118 r.append(l)
1121 1119
1122 1120 return r
1123 1121
1124 1122 def findincoming(self, remote, base=None, heads=None, force=False):
1125 1123 """Return list of roots of the subsets of missing nodes from remote
1126 1124
1127 1125 If base dict is specified, assume that these nodes and their parents
1128 1126 exist on the remote side and that no child of a node of base exists
1129 1127 in both remote and self.
1130 1128 Furthermore base will be updated to include the nodes that exists
1131 1129 in self and remote but no children exists in self and remote.
1132 1130 If a list of heads is specified, return only nodes which are heads
1133 1131 or ancestors of these heads.
1134 1132
1135 1133 All the ancestors of base are in self and in remote.
1136 1134 All the descendants of the list returned are missing in self.
1137 1135 (and so we know that the rest of the nodes are missing in remote, see
1138 1136 outgoing)
1139 1137 """
1140 1138 m = self.changelog.nodemap
1141 1139 search = []
1142 1140 fetch = {}
1143 1141 seen = {}
1144 1142 seenbranch = {}
1145 1143 if base == None:
1146 1144 base = {}
1147 1145
1148 1146 if not heads:
1149 1147 heads = remote.heads()
1150 1148
1151 1149 if self.changelog.tip() == nullid:
1152 1150 base[nullid] = 1
1153 1151 if heads != [nullid]:
1154 1152 return [nullid]
1155 1153 return []
1156 1154
1157 1155 # assume we're closer to the tip than the root
1158 1156 # and start by examining the heads
1159 1157 self.ui.status(_("searching for changes\n"))
1160 1158
1161 1159 unknown = []
1162 1160 for h in heads:
1163 1161 if h not in m:
1164 1162 unknown.append(h)
1165 1163 else:
1166 1164 base[h] = 1
1167 1165
1168 1166 if not unknown:
1169 1167 return []
1170 1168
1171 1169 req = dict.fromkeys(unknown)
1172 1170 reqcnt = 0
1173 1171
1174 1172 # search through remote branches
1175 1173 # a 'branch' here is a linear segment of history, with four parts:
1176 1174 # head, root, first parent, second parent
1177 1175 # (a branch always has two parents (or none) by definition)
1178 1176 unknown = remote.branches(unknown)
1179 1177 while unknown:
1180 1178 r = []
1181 1179 while unknown:
1182 1180 n = unknown.pop(0)
1183 1181 if n[0] in seen:
1184 1182 continue
1185 1183
1186 1184 self.ui.debug(_("examining %s:%s\n")
1187 1185 % (short(n[0]), short(n[1])))
1188 1186 if n[0] == nullid: # found the end of the branch
1189 1187 pass
1190 1188 elif n in seenbranch:
1191 1189 self.ui.debug(_("branch already found\n"))
1192 1190 continue
1193 1191 elif n[1] and n[1] in m: # do we know the base?
1194 1192 self.ui.debug(_("found incomplete branch %s:%s\n")
1195 1193 % (short(n[0]), short(n[1])))
1196 1194 search.append(n) # schedule branch range for scanning
1197 1195 seenbranch[n] = 1
1198 1196 else:
1199 1197 if n[1] not in seen and n[1] not in fetch:
1200 1198 if n[2] in m and n[3] in m:
1201 1199 self.ui.debug(_("found new changeset %s\n") %
1202 1200 short(n[1]))
1203 1201 fetch[n[1]] = 1 # earliest unknown
1204 1202 for p in n[2:4]:
1205 1203 if p in m:
1206 1204 base[p] = 1 # latest known
1207 1205
1208 1206 for p in n[2:4]:
1209 1207 if p not in req and p not in m:
1210 1208 r.append(p)
1211 1209 req[p] = 1
1212 1210 seen[n[0]] = 1
1213 1211
1214 1212 if r:
1215 1213 reqcnt += 1
1216 1214 self.ui.debug(_("request %d: %s\n") %
1217 1215 (reqcnt, " ".join(map(short, r))))
1218 1216 for p in xrange(0, len(r), 10):
1219 1217 for b in remote.branches(r[p:p+10]):
1220 1218 self.ui.debug(_("received %s:%s\n") %
1221 1219 (short(b[0]), short(b[1])))
1222 1220 unknown.append(b)
1223 1221
1224 1222 # do binary search on the branches we found
1225 1223 while search:
1226 1224 n = search.pop(0)
1227 1225 reqcnt += 1
1228 1226 l = remote.between([(n[0], n[1])])[0]
1229 1227 l.append(n[1])
1230 1228 p = n[0]
1231 1229 f = 1
1232 1230 for i in l:
1233 1231 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1234 1232 if i in m:
1235 1233 if f <= 2:
1236 1234 self.ui.debug(_("found new branch changeset %s\n") %
1237 1235 short(p))
1238 1236 fetch[p] = 1
1239 1237 base[i] = 1
1240 1238 else:
1241 1239 self.ui.debug(_("narrowed branch search to %s:%s\n")
1242 1240 % (short(p), short(i)))
1243 1241 search.append((p, i))
1244 1242 break
1245 1243 p, f = i, f * 2
1246 1244
1247 1245 # sanity check our fetch list
1248 1246 for f in fetch.keys():
1249 1247 if f in m:
1250 1248 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1251 1249
1252 1250 if base.keys() == [nullid]:
1253 1251 if force:
1254 1252 self.ui.warn(_("warning: repository is unrelated\n"))
1255 1253 else:
1256 1254 raise util.Abort(_("repository is unrelated"))
1257 1255
1258 1256 self.ui.debug(_("found new changesets starting at ") +
1259 1257 " ".join([short(f) for f in fetch]) + "\n")
1260 1258
1261 1259 self.ui.debug(_("%d total queries\n") % reqcnt)
1262 1260
1263 1261 return fetch.keys()
1264 1262
1265 1263 def findoutgoing(self, remote, base=None, heads=None, force=False):
1266 1264 """Return list of nodes that are roots of subsets not in remote
1267 1265
1268 1266 If base dict is specified, assume that these nodes and their parents
1269 1267 exist on the remote side.
1270 1268 If a list of heads is specified, return only nodes which are heads
1271 1269 or ancestors of these heads, and return a second element which
1272 1270 contains all remote heads which get new children.
1273 1271 """
1274 1272 if base == None:
1275 1273 base = {}
1276 1274 self.findincoming(remote, base, heads, force=force)
1277 1275
1278 1276 self.ui.debug(_("common changesets up to ")
1279 1277 + " ".join(map(short, base.keys())) + "\n")
1280 1278
1281 1279 remain = dict.fromkeys(self.changelog.nodemap)
1282 1280
1283 1281 # prune everything remote has from the tree
1284 1282 del remain[nullid]
1285 1283 remove = base.keys()
1286 1284 while remove:
1287 1285 n = remove.pop(0)
1288 1286 if n in remain:
1289 1287 del remain[n]
1290 1288 for p in self.changelog.parents(n):
1291 1289 remove.append(p)
1292 1290
1293 1291 # find every node whose parents have been pruned
1294 1292 subset = []
1295 1293 # find every remote head that will get new children
1296 1294 updated_heads = {}
1297 1295 for n in remain:
1298 1296 p1, p2 = self.changelog.parents(n)
1299 1297 if p1 not in remain and p2 not in remain:
1300 1298 subset.append(n)
1301 1299 if heads:
1302 1300 if p1 in heads:
1303 1301 updated_heads[p1] = True
1304 1302 if p2 in heads:
1305 1303 updated_heads[p2] = True
1306 1304
1307 1305 # this is the set of all roots we have to push
1308 1306 if heads:
1309 1307 return subset, updated_heads.keys()
1310 1308 else:
1311 1309 return subset
1312 1310
1313 1311 def pull(self, remote, heads=None, force=False, lock=None):
1314 1312 mylock = False
1315 1313 if not lock:
1316 1314 lock = self.lock()
1317 1315 mylock = True
1318 1316
1319 1317 try:
1320 1318 fetch = self.findincoming(remote, force=force)
1321 1319 if fetch == [nullid]:
1322 1320 self.ui.status(_("requesting all changes\n"))
1323 1321
1324 1322 if not fetch:
1325 1323 self.ui.status(_("no changes found\n"))
1326 1324 return 0
1327 1325
1328 1326 if heads is None:
1329 1327 cg = remote.changegroup(fetch, 'pull')
1330 1328 else:
1331 1329 if 'changegroupsubset' not in remote.capabilities:
1332 1330 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1333 1331 cg = remote.changegroupsubset(fetch, heads, 'pull')
1334 1332 return self.addchangegroup(cg, 'pull', remote.url())
1335 1333 finally:
1336 1334 if mylock:
1337 1335 lock.release()
1338 1336
1339 1337 def push(self, remote, force=False, revs=None):
1340 1338 # there are two ways to push to remote repo:
1341 1339 #
1342 1340 # addchangegroup assumes local user can lock remote
1343 1341 # repo (local filesystem, old ssh servers).
1344 1342 #
1345 1343 # unbundle assumes local user cannot lock remote repo (new ssh
1346 1344 # servers, http servers).
1347 1345
1348 1346 if remote.capable('unbundle'):
1349 1347 return self.push_unbundle(remote, force, revs)
1350 1348 return self.push_addchangegroup(remote, force, revs)
1351 1349
1352 1350 def prepush(self, remote, force, revs):
1353 1351 base = {}
1354 1352 remote_heads = remote.heads()
1355 1353 inc = self.findincoming(remote, base, remote_heads, force=force)
1356 1354
1357 1355 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1358 1356 if revs is not None:
1359 1357 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1360 1358 else:
1361 1359 bases, heads = update, self.changelog.heads()
1362 1360
1363 1361 if not bases:
1364 1362 self.ui.status(_("no changes found\n"))
1365 1363 return None, 1
1366 1364 elif not force:
1367 1365 # check if we're creating new remote heads
1368 1366 # to be a remote head after push, node must be either
1369 1367 # - unknown locally
1370 1368 # - a local outgoing head descended from update
1371 1369 # - a remote head that's known locally and not
1372 1370 # ancestral to an outgoing head
1373 1371
1374 1372 warn = 0
1375 1373
1376 1374 if remote_heads == [nullid]:
1377 1375 warn = 0
1378 1376 elif not revs and len(heads) > len(remote_heads):
1379 1377 warn = 1
1380 1378 else:
1381 1379 newheads = list(heads)
1382 1380 for r in remote_heads:
1383 1381 if r in self.changelog.nodemap:
1384 1382 desc = self.changelog.heads(r, heads)
1385 1383 l = [h for h in heads if h in desc]
1386 1384 if not l:
1387 1385 newheads.append(r)
1388 1386 else:
1389 1387 newheads.append(r)
1390 1388 if len(newheads) > len(remote_heads):
1391 1389 warn = 1
1392 1390
1393 1391 if warn:
1394 1392 self.ui.warn(_("abort: push creates new remote branches!\n"))
1395 1393 self.ui.status(_("(did you forget to merge?"
1396 1394 " use push -f to force)\n"))
1397 1395 return None, 1
1398 1396 elif inc:
1399 1397 self.ui.warn(_("note: unsynced remote changes!\n"))
1400 1398
1401 1399
1402 1400 if revs is None:
1403 1401 cg = self.changegroup(update, 'push')
1404 1402 else:
1405 1403 cg = self.changegroupsubset(update, revs, 'push')
1406 1404 return cg, remote_heads
1407 1405
1408 1406 def push_addchangegroup(self, remote, force, revs):
1409 1407 lock = remote.lock()
1410 1408
1411 1409 ret = self.prepush(remote, force, revs)
1412 1410 if ret[0] is not None:
1413 1411 cg, remote_heads = ret
1414 1412 return remote.addchangegroup(cg, 'push', self.url())
1415 1413 return ret[1]
1416 1414
1417 1415 def push_unbundle(self, remote, force, revs):
1418 1416 # local repo finds heads on server, finds out what revs it
1419 1417 # must push. once revs transferred, if server finds it has
1420 1418 # different heads (someone else won commit/push race), server
1421 1419 # aborts.
1422 1420
1423 1421 ret = self.prepush(remote, force, revs)
1424 1422 if ret[0] is not None:
1425 1423 cg, remote_heads = ret
1426 1424 if force: remote_heads = ['force']
1427 1425 return remote.unbundle(cg, remote_heads, 'push')
1428 1426 return ret[1]
1429 1427
1430 1428 def changegroupinfo(self, nodes):
1431 1429 self.ui.note(_("%d changesets found\n") % len(nodes))
1432 1430 if self.ui.debugflag:
1433 1431 self.ui.debug(_("List of changesets:\n"))
1434 1432 for node in nodes:
1435 1433 self.ui.debug("%s\n" % hex(node))
1436 1434
1437 1435 def changegroupsubset(self, bases, heads, source):
1438 1436 """This function generates a changegroup consisting of all the nodes
1439 1437 that are descendents of any of the bases, and ancestors of any of
1440 1438 the heads.
1441 1439
1442 1440 It is fairly complex as determining which filenodes and which
1443 1441 manifest nodes need to be included for the changeset to be complete
1444 1442 is non-trivial.
1445 1443
1446 1444 Another wrinkle is doing the reverse, figuring out which changeset in
1447 1445 the changegroup a particular filenode or manifestnode belongs to."""
1448 1446
1449 1447 self.hook('preoutgoing', throw=True, source=source)
1450 1448
1451 1449 # Set up some initial variables
1452 1450 # Make it easy to refer to self.changelog
1453 1451 cl = self.changelog
1454 1452 # msng is short for missing - compute the list of changesets in this
1455 1453 # changegroup.
1456 1454 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1457 1455 self.changegroupinfo(msng_cl_lst)
1458 1456 # Some bases may turn out to be superfluous, and some heads may be
1459 1457 # too. nodesbetween will return the minimal set of bases and heads
1460 1458 # necessary to re-create the changegroup.
1461 1459
1462 1460 # Known heads are the list of heads that it is assumed the recipient
1463 1461 # of this changegroup will know about.
1464 1462 knownheads = {}
1465 1463 # We assume that all parents of bases are known heads.
1466 1464 for n in bases:
1467 1465 for p in cl.parents(n):
1468 1466 if p != nullid:
1469 1467 knownheads[p] = 1
1470 1468 knownheads = knownheads.keys()
1471 1469 if knownheads:
1472 1470 # Now that we know what heads are known, we can compute which
1473 1471 # changesets are known. The recipient must know about all
1474 1472 # changesets required to reach the known heads from the null
1475 1473 # changeset.
1476 1474 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1477 1475 junk = None
1478 1476 # Transform the list into an ersatz set.
1479 1477 has_cl_set = dict.fromkeys(has_cl_set)
1480 1478 else:
1481 1479 # If there were no known heads, the recipient cannot be assumed to
1482 1480 # know about any changesets.
1483 1481 has_cl_set = {}
1484 1482
1485 1483 # Make it easy to refer to self.manifest
1486 1484 mnfst = self.manifest
1487 1485 # We don't know which manifests are missing yet
1488 1486 msng_mnfst_set = {}
1489 1487 # Nor do we know which filenodes are missing.
1490 1488 msng_filenode_set = {}
1491 1489
1492 1490 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1493 1491 junk = None
1494 1492
1495 1493 # A changeset always belongs to itself, so the changenode lookup
1496 1494 # function for a changenode is identity.
1497 1495 def identity(x):
1498 1496 return x
1499 1497
1500 1498 # A function generating function. Sets up an environment for the
1501 1499 # inner function.
1502 1500 def cmp_by_rev_func(revlog):
1503 1501 # Compare two nodes by their revision number in the environment's
1504 1502 # revision history. Since the revision number both represents the
1505 1503 # most efficient order to read the nodes in, and represents a
1506 1504 # topological sorting of the nodes, this function is often useful.
1507 1505 def cmp_by_rev(a, b):
1508 1506 return cmp(revlog.rev(a), revlog.rev(b))
1509 1507 return cmp_by_rev
1510 1508
1511 1509 # If we determine that a particular file or manifest node must be a
1512 1510 # node that the recipient of the changegroup will already have, we can
1513 1511 # also assume the recipient will have all the parents. This function
1514 1512 # prunes them from the set of missing nodes.
1515 1513 def prune_parents(revlog, hasset, msngset):
1516 1514 haslst = hasset.keys()
1517 1515 haslst.sort(cmp_by_rev_func(revlog))
1518 1516 for node in haslst:
1519 1517 parentlst = [p for p in revlog.parents(node) if p != nullid]
1520 1518 while parentlst:
1521 1519 n = parentlst.pop()
1522 1520 if n not in hasset:
1523 1521 hasset[n] = 1
1524 1522 p = [p for p in revlog.parents(n) if p != nullid]
1525 1523 parentlst.extend(p)
1526 1524 for n in hasset:
1527 1525 msngset.pop(n, None)
1528 1526
1529 1527 # This is a function generating function used to set up an environment
1530 1528 # for the inner function to execute in.
1531 1529 def manifest_and_file_collector(changedfileset):
1532 1530 # This is an information gathering function that gathers
1533 1531 # information from each changeset node that goes out as part of
1534 1532 # the changegroup. The information gathered is a list of which
1535 1533 # manifest nodes are potentially required (the recipient may
1536 1534 # already have them) and total list of all files which were
1537 1535 # changed in any changeset in the changegroup.
1538 1536 #
1539 1537 # We also remember the first changenode we saw any manifest
1540 1538 # referenced by so we can later determine which changenode 'owns'
1541 1539 # the manifest.
1542 1540 def collect_manifests_and_files(clnode):
1543 1541 c = cl.read(clnode)
1544 1542 for f in c[3]:
1545 1543 # This is to make sure we only have one instance of each
1546 1544 # filename string for each filename.
1547 1545 changedfileset.setdefault(f, f)
1548 1546 msng_mnfst_set.setdefault(c[0], clnode)
1549 1547 return collect_manifests_and_files
1550 1548
1551 1549 # Figure out which manifest nodes (of the ones we think might be part
1552 1550 # of the changegroup) the recipient must know about and remove them
1553 1551 # from the changegroup.
1554 1552 def prune_manifests():
1555 1553 has_mnfst_set = {}
1556 1554 for n in msng_mnfst_set:
1557 1555 # If a 'missing' manifest thinks it belongs to a changenode
1558 1556 # the recipient is assumed to have, obviously the recipient
1559 1557 # must have that manifest.
1560 1558 linknode = cl.node(mnfst.linkrev(n))
1561 1559 if linknode in has_cl_set:
1562 1560 has_mnfst_set[n] = 1
1563 1561 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1564 1562
1565 1563 # Use the information collected in collect_manifests_and_files to say
1566 1564 # which changenode any manifestnode belongs to.
1567 1565 def lookup_manifest_link(mnfstnode):
1568 1566 return msng_mnfst_set[mnfstnode]
1569 1567
1570 1568 # A function generating function that sets up the initial environment
1571 1569 # the inner function.
1572 1570 def filenode_collector(changedfiles):
1573 1571 next_rev = [0]
1574 1572 # This gathers information from each manifestnode included in the
1575 1573 # changegroup about which filenodes the manifest node references
1576 1574 # so we can include those in the changegroup too.
1577 1575 #
1578 1576 # It also remembers which changenode each filenode belongs to. It
1579 1577 # does this by assuming the a filenode belongs to the changenode
1580 1578 # the first manifest that references it belongs to.
1581 1579 def collect_msng_filenodes(mnfstnode):
1582 1580 r = mnfst.rev(mnfstnode)
1583 1581 if r == next_rev[0]:
1584 1582 # If the last rev we looked at was the one just previous,
1585 1583 # we only need to see a diff.
1586 1584 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1587 1585 # For each line in the delta
1588 1586 for dline in delta.splitlines():
1589 1587 # get the filename and filenode for that line
1590 1588 f, fnode = dline.split('\0')
1591 1589 fnode = bin(fnode[:40])
1592 1590 f = changedfiles.get(f, None)
1593 1591 # And if the file is in the list of files we care
1594 1592 # about.
1595 1593 if f is not None:
1596 1594 # Get the changenode this manifest belongs to
1597 1595 clnode = msng_mnfst_set[mnfstnode]
1598 1596 # Create the set of filenodes for the file if
1599 1597 # there isn't one already.
1600 1598 ndset = msng_filenode_set.setdefault(f, {})
1601 1599 # And set the filenode's changelog node to the
1602 1600 # manifest's if it hasn't been set already.
1603 1601 ndset.setdefault(fnode, clnode)
1604 1602 else:
1605 1603 # Otherwise we need a full manifest.
1606 1604 m = mnfst.read(mnfstnode)
1607 1605 # For every file in we care about.
1608 1606 for f in changedfiles:
1609 1607 fnode = m.get(f, None)
1610 1608 # If it's in the manifest
1611 1609 if fnode is not None:
1612 1610 # See comments above.
1613 1611 clnode = msng_mnfst_set[mnfstnode]
1614 1612 ndset = msng_filenode_set.setdefault(f, {})
1615 1613 ndset.setdefault(fnode, clnode)
1616 1614 # Remember the revision we hope to see next.
1617 1615 next_rev[0] = r + 1
1618 1616 return collect_msng_filenodes
1619 1617
1620 1618 # We have a list of filenodes we think we need for a file, lets remove
1621 1619 # all those we now the recipient must have.
1622 1620 def prune_filenodes(f, filerevlog):
1623 1621 msngset = msng_filenode_set[f]
1624 1622 hasset = {}
1625 1623 # If a 'missing' filenode thinks it belongs to a changenode we
1626 1624 # assume the recipient must have, then the recipient must have
1627 1625 # that filenode.
1628 1626 for n in msngset:
1629 1627 clnode = cl.node(filerevlog.linkrev(n))
1630 1628 if clnode in has_cl_set:
1631 1629 hasset[n] = 1
1632 1630 prune_parents(filerevlog, hasset, msngset)
1633 1631
1634 1632 # A function generator function that sets up the a context for the
1635 1633 # inner function.
1636 1634 def lookup_filenode_link_func(fname):
1637 1635 msngset = msng_filenode_set[fname]
1638 1636 # Lookup the changenode the filenode belongs to.
1639 1637 def lookup_filenode_link(fnode):
1640 1638 return msngset[fnode]
1641 1639 return lookup_filenode_link
1642 1640
1643 1641 # Now that we have all theses utility functions to help out and
1644 1642 # logically divide up the task, generate the group.
1645 1643 def gengroup():
1646 1644 # The set of changed files starts empty.
1647 1645 changedfiles = {}
1648 1646 # Create a changenode group generator that will call our functions
1649 1647 # back to lookup the owning changenode and collect information.
1650 1648 group = cl.group(msng_cl_lst, identity,
1651 1649 manifest_and_file_collector(changedfiles))
1652 1650 for chnk in group:
1653 1651 yield chnk
1654 1652
1655 1653 # The list of manifests has been collected by the generator
1656 1654 # calling our functions back.
1657 1655 prune_manifests()
1658 1656 msng_mnfst_lst = msng_mnfst_set.keys()
1659 1657 # Sort the manifestnodes by revision number.
1660 1658 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1661 1659 # Create a generator for the manifestnodes that calls our lookup
1662 1660 # and data collection functions back.
1663 1661 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1664 1662 filenode_collector(changedfiles))
1665 1663 for chnk in group:
1666 1664 yield chnk
1667 1665
1668 1666 # These are no longer needed, dereference and toss the memory for
1669 1667 # them.
1670 1668 msng_mnfst_lst = None
1671 1669 msng_mnfst_set.clear()
1672 1670
1673 1671 changedfiles = changedfiles.keys()
1674 1672 changedfiles.sort()
1675 1673 # Go through all our files in order sorted by name.
1676 1674 for fname in changedfiles:
1677 1675 filerevlog = self.file(fname)
1678 1676 # Toss out the filenodes that the recipient isn't really
1679 1677 # missing.
1680 1678 if msng_filenode_set.has_key(fname):
1681 1679 prune_filenodes(fname, filerevlog)
1682 1680 msng_filenode_lst = msng_filenode_set[fname].keys()
1683 1681 else:
1684 1682 msng_filenode_lst = []
1685 1683 # If any filenodes are left, generate the group for them,
1686 1684 # otherwise don't bother.
1687 1685 if len(msng_filenode_lst) > 0:
1688 1686 yield changegroup.genchunk(fname)
1689 1687 # Sort the filenodes by their revision #
1690 1688 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1691 1689 # Create a group generator and only pass in a changenode
1692 1690 # lookup function as we need to collect no information
1693 1691 # from filenodes.
1694 1692 group = filerevlog.group(msng_filenode_lst,
1695 1693 lookup_filenode_link_func(fname))
1696 1694 for chnk in group:
1697 1695 yield chnk
1698 1696 if msng_filenode_set.has_key(fname):
1699 1697 # Don't need this anymore, toss it to free memory.
1700 1698 del msng_filenode_set[fname]
1701 1699 # Signal that no more groups are left.
1702 1700 yield changegroup.closechunk()
1703 1701
1704 1702 if msng_cl_lst:
1705 1703 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1706 1704
1707 1705 return util.chunkbuffer(gengroup())
1708 1706
1709 1707 def changegroup(self, basenodes, source):
1710 1708 """Generate a changegroup of all nodes that we have that a recipient
1711 1709 doesn't.
1712 1710
1713 1711 This is much easier than the previous function as we can assume that
1714 1712 the recipient has any changenode we aren't sending them."""
1715 1713
1716 1714 self.hook('preoutgoing', throw=True, source=source)
1717 1715
1718 1716 cl = self.changelog
1719 1717 nodes = cl.nodesbetween(basenodes, None)[0]
1720 1718 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1721 1719 self.changegroupinfo(nodes)
1722 1720
1723 1721 def identity(x):
1724 1722 return x
1725 1723
1726 1724 def gennodelst(revlog):
1727 1725 for r in xrange(0, revlog.count()):
1728 1726 n = revlog.node(r)
1729 1727 if revlog.linkrev(n) in revset:
1730 1728 yield n
1731 1729
1732 1730 def changed_file_collector(changedfileset):
1733 1731 def collect_changed_files(clnode):
1734 1732 c = cl.read(clnode)
1735 1733 for fname in c[3]:
1736 1734 changedfileset[fname] = 1
1737 1735 return collect_changed_files
1738 1736
1739 1737 def lookuprevlink_func(revlog):
1740 1738 def lookuprevlink(n):
1741 1739 return cl.node(revlog.linkrev(n))
1742 1740 return lookuprevlink
1743 1741
1744 1742 def gengroup():
1745 1743 # construct a list of all changed files
1746 1744 changedfiles = {}
1747 1745
1748 1746 for chnk in cl.group(nodes, identity,
1749 1747 changed_file_collector(changedfiles)):
1750 1748 yield chnk
1751 1749 changedfiles = changedfiles.keys()
1752 1750 changedfiles.sort()
1753 1751
1754 1752 mnfst = self.manifest
1755 1753 nodeiter = gennodelst(mnfst)
1756 1754 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1757 1755 yield chnk
1758 1756
1759 1757 for fname in changedfiles:
1760 1758 filerevlog = self.file(fname)
1761 1759 nodeiter = gennodelst(filerevlog)
1762 1760 nodeiter = list(nodeiter)
1763 1761 if nodeiter:
1764 1762 yield changegroup.genchunk(fname)
1765 1763 lookup = lookuprevlink_func(filerevlog)
1766 1764 for chnk in filerevlog.group(nodeiter, lookup):
1767 1765 yield chnk
1768 1766
1769 1767 yield changegroup.closechunk()
1770 1768
1771 1769 if nodes:
1772 1770 self.hook('outgoing', node=hex(nodes[0]), source=source)
1773 1771
1774 1772 return util.chunkbuffer(gengroup())
1775 1773
1776 1774 def addchangegroup(self, source, srctype, url):
1777 1775 """add changegroup to repo.
1778 1776
1779 1777 return values:
1780 1778 - nothing changed or no source: 0
1781 1779 - more heads than before: 1+added heads (2..n)
1782 1780 - less heads than before: -1-removed heads (-2..-n)
1783 1781 - number of heads stays the same: 1
1784 1782 """
1785 1783 def csmap(x):
1786 1784 self.ui.debug(_("add changeset %s\n") % short(x))
1787 1785 return cl.count()
1788 1786
1789 1787 def revmap(x):
1790 1788 return cl.rev(x)
1791 1789
1792 1790 if not source:
1793 1791 return 0
1794 1792
1795 1793 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1796 1794
1797 1795 changesets = files = revisions = 0
1798 1796
1799 1797 tr = self.transaction()
1800 1798
1801 1799 # write changelog data to temp files so concurrent readers will not see
1802 1800 # inconsistent view
1803 1801 cl = self.changelog
1804 1802 cl.delayupdate()
1805 1803 oldheads = len(cl.heads())
1806 1804
1807 1805 # pull off the changeset group
1808 1806 self.ui.status(_("adding changesets\n"))
1809 1807 cor = cl.count() - 1
1810 1808 chunkiter = changegroup.chunkiter(source)
1811 1809 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1812 1810 raise util.Abort(_("received changelog group is empty"))
1813 1811 cnr = cl.count() - 1
1814 1812 changesets = cnr - cor
1815 1813
1816 1814 # pull off the manifest group
1817 1815 self.ui.status(_("adding manifests\n"))
1818 1816 chunkiter = changegroup.chunkiter(source)
1819 1817 # no need to check for empty manifest group here:
1820 1818 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1821 1819 # no new manifest will be created and the manifest group will
1822 1820 # be empty during the pull
1823 1821 self.manifest.addgroup(chunkiter, revmap, tr)
1824 1822
1825 1823 # process the files
1826 1824 self.ui.status(_("adding file changes\n"))
1827 1825 while 1:
1828 1826 f = changegroup.getchunk(source)
1829 1827 if not f:
1830 1828 break
1831 1829 self.ui.debug(_("adding %s revisions\n") % f)
1832 1830 fl = self.file(f)
1833 1831 o = fl.count()
1834 1832 chunkiter = changegroup.chunkiter(source)
1835 1833 if fl.addgroup(chunkiter, revmap, tr) is None:
1836 1834 raise util.Abort(_("received file revlog group is empty"))
1837 1835 revisions += fl.count() - o
1838 1836 files += 1
1839 1837
1840 1838 # make changelog see real files again
1841 1839 cl.finalize(tr)
1842 1840
1843 1841 newheads = len(self.changelog.heads())
1844 1842 heads = ""
1845 1843 if oldheads and newheads != oldheads:
1846 1844 heads = _(" (%+d heads)") % (newheads - oldheads)
1847 1845
1848 1846 self.ui.status(_("added %d changesets"
1849 1847 " with %d changes to %d files%s\n")
1850 1848 % (changesets, revisions, files, heads))
1851 1849
1852 1850 if changesets > 0:
1853 1851 self.hook('pretxnchangegroup', throw=True,
1854 1852 node=hex(self.changelog.node(cor+1)), source=srctype,
1855 1853 url=url)
1856 1854
1857 1855 tr.close()
1858 1856
1859 1857 if changesets > 0:
1860 1858 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1861 1859 source=srctype, url=url)
1862 1860
1863 1861 for i in xrange(cor + 1, cnr + 1):
1864 1862 self.hook("incoming", node=hex(self.changelog.node(i)),
1865 1863 source=srctype, url=url)
1866 1864
1867 1865 # never return 0 here:
1868 1866 if newheads < oldheads:
1869 1867 return newheads - oldheads - 1
1870 1868 else:
1871 1869 return newheads - oldheads + 1
1872 1870
1873 1871
1874 1872 def stream_in(self, remote):
1875 1873 fp = remote.stream_out()
1876 1874 l = fp.readline()
1877 1875 try:
1878 1876 resp = int(l)
1879 1877 except ValueError:
1880 1878 raise util.UnexpectedOutput(
1881 1879 _('Unexpected response from remote server:'), l)
1882 1880 if resp == 1:
1883 1881 raise util.Abort(_('operation forbidden by server'))
1884 1882 elif resp == 2:
1885 1883 raise util.Abort(_('locking the remote repository failed'))
1886 1884 elif resp != 0:
1887 1885 raise util.Abort(_('the server sent an unknown error code'))
1888 1886 self.ui.status(_('streaming all changes\n'))
1889 1887 l = fp.readline()
1890 1888 try:
1891 1889 total_files, total_bytes = map(int, l.split(' ', 1))
1892 1890 except ValueError, TypeError:
1893 1891 raise util.UnexpectedOutput(
1894 1892 _('Unexpected response from remote server:'), l)
1895 1893 self.ui.status(_('%d files to transfer, %s of data\n') %
1896 1894 (total_files, util.bytecount(total_bytes)))
1897 1895 start = time.time()
1898 1896 for i in xrange(total_files):
1899 1897 # XXX doesn't support '\n' or '\r' in filenames
1900 1898 l = fp.readline()
1901 1899 try:
1902 1900 name, size = l.split('\0', 1)
1903 1901 size = int(size)
1904 1902 except ValueError, TypeError:
1905 1903 raise util.UnexpectedOutput(
1906 1904 _('Unexpected response from remote server:'), l)
1907 1905 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1908 1906 ofp = self.sopener(name, 'w')
1909 1907 for chunk in util.filechunkiter(fp, limit=size):
1910 1908 ofp.write(chunk)
1911 1909 ofp.close()
1912 1910 elapsed = time.time() - start
1913 1911 if elapsed <= 0:
1914 1912 elapsed = 0.001
1915 1913 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1916 1914 (util.bytecount(total_bytes), elapsed,
1917 1915 util.bytecount(total_bytes / elapsed)))
1918 1916 self.invalidate()
1919 1917 return len(self.heads()) + 1
1920 1918
1921 1919 def clone(self, remote, heads=[], stream=False):
1922 1920 '''clone remote repository.
1923 1921
1924 1922 keyword arguments:
1925 1923 heads: list of revs to clone (forces use of pull)
1926 1924 stream: use streaming clone if possible'''
1927 1925
1928 1926 # now, all clients that can request uncompressed clones can
1929 1927 # read repo formats supported by all servers that can serve
1930 1928 # them.
1931 1929
1932 1930 # if revlog format changes, client will have to check version
1933 1931 # and format flags on "stream" capability, and use
1934 1932 # uncompressed only if compatible.
1935 1933
1936 1934 if stream and not heads and remote.capable('stream'):
1937 1935 return self.stream_in(remote)
1938 1936 return self.pull(remote, heads)
1939 1937
1940 1938 # used to avoid circular references so destructors work
1941 1939 def aftertrans(files):
1942 1940 renamefiles = [tuple(t) for t in files]
1943 1941 def a():
1944 1942 for src, dest in renamefiles:
1945 1943 util.rename(src, dest)
1946 1944 return a
1947 1945
1948 1946 def instance(ui, path, create):
1949 1947 return localrepository(ui, util.drop_scheme('file', path), create)
1950 1948
1951 1949 def islocal(path):
1952 1950 return True
General Comments 0
You need to be logged in to leave comments. Login now