##// END OF EJS Templates
pull -r: pass the revisions as the heads argument of findincoming...
Alexis S. L. Carvalho -
r5221:8ede77c2 default
parent child Browse files
Show More
@@ -0,0 +1,27 b''
1 #!/bin/sh
2
3 hg init repo
4 cd repo
5 echo foo > foo
6 hg ci -qAm 'add foo' -d '0 0'
7 echo >> foo
8 hg ci -m 'change foo' -d '0 0'
9 hg up -qC 0
10 echo bar > bar
11 hg ci -qAm 'add bar' -d '0 0'
12 hg log
13 cd ..
14 hg init copy
15 cd copy
16
17 echo '% pull -r 0'
18 hg pull -qr 0 ../repo
19 hg log
20
21 echo '% pull -r 1'
22 hg pull -qr 1 ../repo
23 hg log
24
25 # this used to abort: received changelog group is empty
26 echo '% pull -r 1 again'
27 hg pull -qr 1 ../repo
@@ -0,0 +1,37 b''
1 changeset: 2:effea6de0384
2 tag: tip
3 parent: 0:bbd179dfa0a7
4 user: test
5 date: Thu Jan 01 00:00:00 1970 +0000
6 summary: add bar
7
8 changeset: 1:ed1b79f46b9a
9 user: test
10 date: Thu Jan 01 00:00:00 1970 +0000
11 summary: change foo
12
13 changeset: 0:bbd179dfa0a7
14 user: test
15 date: Thu Jan 01 00:00:00 1970 +0000
16 summary: add foo
17
18 % pull -r 0
19 changeset: 0:bbd179dfa0a7
20 tag: tip
21 user: test
22 date: Thu Jan 01 00:00:00 1970 +0000
23 summary: add foo
24
25 % pull -r 1
26 changeset: 1:ed1b79f46b9a
27 tag: tip
28 user: test
29 date: Thu Jan 01 00:00:00 1970 +0000
30 summary: change foo
31
32 changeset: 0:bbd179dfa0a7
33 user: test
34 date: Thu Jan 01 00:00:00 1970 +0000
35 summary: add foo
36
37 % pull -r 1 again
@@ -1,1947 +1,1947 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.path = path
24 24 self.root = os.path.realpath(path)
25 25 self.path = os.path.join(self.root, ".hg")
26 26 self.origroot = path
27 27 self.opener = util.opener(self.path)
28 28 self.wopener = util.opener(self.root)
29 29
30 30 if not os.path.isdir(self.path):
31 31 if create:
32 32 if not os.path.exists(path):
33 33 os.mkdir(path)
34 34 os.mkdir(self.path)
35 35 requirements = ["revlogv1"]
36 36 if parentui.configbool('format', 'usestore', True):
37 37 os.mkdir(os.path.join(self.path, "store"))
38 38 requirements.append("store")
39 39 # create an invalid changelog
40 40 self.opener("00changelog.i", "a").write(
41 41 '\0\0\0\2' # represents revlogv2
42 42 ' dummy changelog to prevent using the old repo layout'
43 43 )
44 44 reqfile = self.opener("requires", "w")
45 45 for r in requirements:
46 46 reqfile.write("%s\n" % r)
47 47 reqfile.close()
48 48 else:
49 49 raise repo.RepoError(_("repository %s not found") % path)
50 50 elif create:
51 51 raise repo.RepoError(_("repository %s already exists") % path)
52 52 else:
53 53 # find requirements
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 requirements = []
60 60 # check them
61 61 for r in requirements:
62 62 if r not in self.supported:
63 63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64 64
65 65 # setup store
66 66 if "store" in requirements:
67 67 self.encodefn = util.encodefilename
68 68 self.decodefn = util.decodefilename
69 69 self.spath = os.path.join(self.path, "store")
70 70 else:
71 71 self.encodefn = lambda x: x
72 72 self.decodefn = lambda x: x
73 73 self.spath = self.path
74 74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75 75
76 76 self.ui = ui.ui(parentui=parentui)
77 77 try:
78 78 self.ui.readconfig(self.join("hgrc"), self.root)
79 79 extensions.loadall(self.ui)
80 80 except IOError:
81 81 pass
82 82
83 83 self.tagscache = None
84 84 self.branchcache = None
85 85 self.nodetagscache = None
86 86 self.filterpats = {}
87 87 self.transhandle = None
88 88
89 89 def __getattr__(self, name):
90 90 if name == 'changelog':
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 return self.changelog
94 94 if name == 'manifest':
95 95 self.changelog
96 96 self.manifest = manifest.manifest(self.sopener)
97 97 return self.manifest
98 98 if name == 'dirstate':
99 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 100 return self.dirstate
101 101 else:
102 102 raise AttributeError, name
103 103
104 104 def url(self):
105 105 return 'file:' + self.root
106 106
107 107 def hook(self, name, throw=False, **args):
108 108 return hook.hook(self.ui, self, name, throw, **args)
109 109
110 110 tag_disallowed = ':\r\n'
111 111
112 112 def _tag(self, name, node, message, local, user, date, parent=None):
113 113 use_dirstate = parent is None
114 114
115 115 for c in self.tag_disallowed:
116 116 if c in name:
117 117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 118
119 119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 120
121 121 def writetag(fp, name, munge, prevtags):
122 122 if prevtags and prevtags[-1] != '\n':
123 123 fp.write('\n')
124 124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 125 fp.close()
126 126 self.hook('tag', node=hex(node), tag=name, local=local)
127 127
128 128 prevtags = ''
129 129 if local:
130 130 try:
131 131 fp = self.opener('localtags', 'r+')
132 132 except IOError, err:
133 133 fp = self.opener('localtags', 'a')
134 134 else:
135 135 prevtags = fp.read()
136 136
137 137 # local tags are stored in the current charset
138 138 writetag(fp, name, None, prevtags)
139 139 return
140 140
141 141 if use_dirstate:
142 142 try:
143 143 fp = self.wfile('.hgtags', 'rb+')
144 144 except IOError, err:
145 145 fp = self.wfile('.hgtags', 'ab')
146 146 else:
147 147 prevtags = fp.read()
148 148 else:
149 149 try:
150 150 prevtags = self.filectx('.hgtags', parent).data()
151 151 except revlog.LookupError:
152 152 pass
153 153 fp = self.wfile('.hgtags', 'wb')
154 154 if prevtags:
155 155 fp.write(prevtags)
156 156
157 157 # committed tags are stored in UTF-8
158 158 writetag(fp, name, util.fromlocal, prevtags)
159 159
160 160 if use_dirstate and self.dirstate.state('.hgtags') == '?':
161 161 self.add(['.hgtags'])
162 162
163 163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
164 164
165 165 self.hook('tag', node=hex(node), tag=name, local=local)
166 166
167 167 return tagnode
168 168
169 169 def tag(self, name, node, message, local, user, date):
170 170 '''tag a revision with a symbolic name.
171 171
172 172 if local is True, the tag is stored in a per-repository file.
173 173 otherwise, it is stored in the .hgtags file, and a new
174 174 changeset is committed with the change.
175 175
176 176 keyword arguments:
177 177
178 178 local: whether to store tag in non-version-controlled file
179 179 (default False)
180 180
181 181 message: commit message to use if committing
182 182
183 183 user: name of user to use if committing
184 184
185 185 date: date tuple to use if committing'''
186 186
187 187 for x in self.status()[:5]:
188 188 if '.hgtags' in x:
189 189 raise util.Abort(_('working copy of .hgtags is changed '
190 190 '(please commit .hgtags manually)'))
191 191
192 192
193 193 self._tag(name, node, message, local, user, date)
194 194
195 195 def tags(self):
196 196 '''return a mapping of tag to node'''
197 197 if self.tagscache:
198 198 return self.tagscache
199 199
200 200 globaltags = {}
201 201
202 202 def readtags(lines, fn):
203 203 filetags = {}
204 204 count = 0
205 205
206 206 def warn(msg):
207 207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208 208
209 209 for l in lines:
210 210 count += 1
211 211 if not l:
212 212 continue
213 213 s = l.split(" ", 1)
214 214 if len(s) != 2:
215 215 warn(_("cannot parse entry"))
216 216 continue
217 217 node, key = s
218 218 key = util.tolocal(key.strip()) # stored in UTF-8
219 219 try:
220 220 bin_n = bin(node)
221 221 except TypeError:
222 222 warn(_("node '%s' is not well formed") % node)
223 223 continue
224 224 if bin_n not in self.changelog.nodemap:
225 225 warn(_("tag '%s' refers to unknown node") % key)
226 226 continue
227 227
228 228 h = []
229 229 if key in filetags:
230 230 n, h = filetags[key]
231 231 h.append(n)
232 232 filetags[key] = (bin_n, h)
233 233
234 234 for k, nh in filetags.items():
235 235 if k not in globaltags:
236 236 globaltags[k] = nh
237 237 continue
238 238 # we prefer the global tag if:
239 239 # it supercedes us OR
240 240 # mutual supercedes and it has a higher rank
241 241 # otherwise we win because we're tip-most
242 242 an, ah = nh
243 243 bn, bh = globaltags[k]
244 244 if (bn != an and an in bh and
245 245 (bn not in ah or len(bh) > len(ah))):
246 246 an = bn
247 247 ah.extend([n for n in bh if n not in ah])
248 248 globaltags[k] = an, ah
249 249
250 250 # read the tags file from each head, ending with the tip
251 251 f = None
252 252 for rev, node, fnode in self._hgtagsnodes():
253 253 f = (f and f.filectx(fnode) or
254 254 self.filectx('.hgtags', fileid=fnode))
255 255 readtags(f.data().splitlines(), f)
256 256
257 257 try:
258 258 data = util.fromlocal(self.opener("localtags").read())
259 259 # localtags are stored in the local character set
260 260 # while the internal tag table is stored in UTF-8
261 261 readtags(data.splitlines(), "localtags")
262 262 except IOError:
263 263 pass
264 264
265 265 self.tagscache = {}
266 266 for k,nh in globaltags.items():
267 267 n = nh[0]
268 268 if n != nullid:
269 269 self.tagscache[k] = n
270 270 self.tagscache['tip'] = self.changelog.tip()
271 271
272 272 return self.tagscache
273 273
274 274 def _hgtagsnodes(self):
275 275 heads = self.heads()
276 276 heads.reverse()
277 277 last = {}
278 278 ret = []
279 279 for node in heads:
280 280 c = self.changectx(node)
281 281 rev = c.rev()
282 282 try:
283 283 fnode = c.filenode('.hgtags')
284 284 except revlog.LookupError:
285 285 continue
286 286 ret.append((rev, node, fnode))
287 287 if fnode in last:
288 288 ret[last[fnode]] = None
289 289 last[fnode] = len(ret) - 1
290 290 return [item for item in ret if item]
291 291
292 292 def tagslist(self):
293 293 '''return a list of tags ordered by revision'''
294 294 l = []
295 295 for t, n in self.tags().items():
296 296 try:
297 297 r = self.changelog.rev(n)
298 298 except:
299 299 r = -2 # sort to the beginning of the list if unknown
300 300 l.append((r, t, n))
301 301 l.sort()
302 302 return [(t, n) for r, t, n in l]
303 303
304 304 def nodetags(self, node):
305 305 '''return the tags associated with a node'''
306 306 if not self.nodetagscache:
307 307 self.nodetagscache = {}
308 308 for t, n in self.tags().items():
309 309 self.nodetagscache.setdefault(n, []).append(t)
310 310 return self.nodetagscache.get(node, [])
311 311
312 312 def _branchtags(self):
313 313 partial, last, lrev = self._readbranchcache()
314 314
315 315 tiprev = self.changelog.count() - 1
316 316 if lrev != tiprev:
317 317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 319
320 320 return partial
321 321
322 322 def branchtags(self):
323 323 if self.branchcache is not None:
324 324 return self.branchcache
325 325
326 326 self.branchcache = {} # avoid recursion in changectx
327 327 partial = self._branchtags()
328 328
329 329 # the branch cache is stored on disk as UTF-8, but in the local
330 330 # charset internally
331 331 for k, v in partial.items():
332 332 self.branchcache[util.tolocal(k)] = v
333 333 return self.branchcache
334 334
335 335 def _readbranchcache(self):
336 336 partial = {}
337 337 try:
338 338 f = self.opener("branch.cache")
339 339 lines = f.read().split('\n')
340 340 f.close()
341 341 except (IOError, OSError):
342 342 return {}, nullid, nullrev
343 343
344 344 try:
345 345 last, lrev = lines.pop(0).split(" ", 1)
346 346 last, lrev = bin(last), int(lrev)
347 347 if not (lrev < self.changelog.count() and
348 348 self.changelog.node(lrev) == last): # sanity check
349 349 # invalidate the cache
350 350 raise ValueError('Invalid branch cache: unknown tip')
351 351 for l in lines:
352 352 if not l: continue
353 353 node, label = l.split(" ", 1)
354 354 partial[label.strip()] = bin(node)
355 355 except (KeyboardInterrupt, util.SignalInterrupt):
356 356 raise
357 357 except Exception, inst:
358 358 if self.ui.debugflag:
359 359 self.ui.warn(str(inst), '\n')
360 360 partial, last, lrev = {}, nullid, nullrev
361 361 return partial, last, lrev
362 362
363 363 def _writebranchcache(self, branches, tip, tiprev):
364 364 try:
365 365 f = self.opener("branch.cache", "w", atomictemp=True)
366 366 f.write("%s %s\n" % (hex(tip), tiprev))
367 367 for label, node in branches.iteritems():
368 368 f.write("%s %s\n" % (hex(node), label))
369 369 f.rename()
370 370 except (IOError, OSError):
371 371 pass
372 372
373 373 def _updatebranchcache(self, partial, start, end):
374 374 for r in xrange(start, end):
375 375 c = self.changectx(r)
376 376 b = c.branch()
377 377 partial[b] = c.node()
378 378
379 379 def lookup(self, key):
380 380 if key == '.':
381 381 key, second = self.dirstate.parents()
382 382 if key == nullid:
383 383 raise repo.RepoError(_("no revision checked out"))
384 384 if second != nullid:
385 385 self.ui.warn(_("warning: working directory has two parents, "
386 386 "tag '.' uses the first\n"))
387 387 elif key == 'null':
388 388 return nullid
389 389 n = self.changelog._match(key)
390 390 if n:
391 391 return n
392 392 if key in self.tags():
393 393 return self.tags()[key]
394 394 if key in self.branchtags():
395 395 return self.branchtags()[key]
396 396 n = self.changelog._partialmatch(key)
397 397 if n:
398 398 return n
399 399 raise repo.RepoError(_("unknown revision '%s'") % key)
400 400
401 401 def dev(self):
402 402 return os.lstat(self.path).st_dev
403 403
404 404 def local(self):
405 405 return True
406 406
407 407 def join(self, f):
408 408 return os.path.join(self.path, f)
409 409
410 410 def sjoin(self, f):
411 411 f = self.encodefn(f)
412 412 return os.path.join(self.spath, f)
413 413
414 414 def wjoin(self, f):
415 415 return os.path.join(self.root, f)
416 416
417 417 def file(self, f):
418 418 if f[0] == '/':
419 419 f = f[1:]
420 420 return filelog.filelog(self.sopener, f)
421 421
422 422 def changectx(self, changeid=None):
423 423 return context.changectx(self, changeid)
424 424
425 425 def workingctx(self):
426 426 return context.workingctx(self)
427 427
428 428 def parents(self, changeid=None):
429 429 '''
430 430 get list of changectxs for parents of changeid or working directory
431 431 '''
432 432 if changeid is None:
433 433 pl = self.dirstate.parents()
434 434 else:
435 435 n = self.changelog.lookup(changeid)
436 436 pl = self.changelog.parents(n)
437 437 if pl[1] == nullid:
438 438 return [self.changectx(pl[0])]
439 439 return [self.changectx(pl[0]), self.changectx(pl[1])]
440 440
441 441 def filectx(self, path, changeid=None, fileid=None):
442 442 """changeid can be a changeset revision, node, or tag.
443 443 fileid can be a file revision or node."""
444 444 return context.filectx(self, path, changeid, fileid)
445 445
446 446 def getcwd(self):
447 447 return self.dirstate.getcwd()
448 448
449 449 def pathto(self, f, cwd=None):
450 450 return self.dirstate.pathto(f, cwd)
451 451
452 452 def wfile(self, f, mode='r'):
453 453 return self.wopener(f, mode)
454 454
455 455 def _link(self, f):
456 456 return os.path.islink(self.wjoin(f))
457 457
458 458 def _filter(self, filter, filename, data):
459 459 if filter not in self.filterpats:
460 460 l = []
461 461 for pat, cmd in self.ui.configitems(filter):
462 462 mf = util.matcher(self.root, "", [pat], [], [])[1]
463 463 l.append((mf, cmd))
464 464 self.filterpats[filter] = l
465 465
466 466 for mf, cmd in self.filterpats[filter]:
467 467 if mf(filename):
468 468 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
469 469 data = util.filter(data, cmd)
470 470 break
471 471
472 472 return data
473 473
474 474 def wread(self, filename):
475 475 if self._link(filename):
476 476 data = os.readlink(self.wjoin(filename))
477 477 else:
478 478 data = self.wopener(filename, 'r').read()
479 479 return self._filter("encode", filename, data)
480 480
481 481 def wwrite(self, filename, data, flags):
482 482 data = self._filter("decode", filename, data)
483 483 if "l" in flags:
484 484 self.wopener.symlink(data, filename)
485 485 else:
486 486 try:
487 487 if self._link(filename):
488 488 os.unlink(self.wjoin(filename))
489 489 except OSError:
490 490 pass
491 491 self.wopener(filename, 'w').write(data)
492 492 util.set_exec(self.wjoin(filename), "x" in flags)
493 493
494 494 def wwritedata(self, filename, data):
495 495 return self._filter("decode", filename, data)
496 496
497 497 def transaction(self):
498 498 tr = self.transhandle
499 499 if tr != None and tr.running():
500 500 return tr.nest()
501 501
502 502 # save dirstate for rollback
503 503 try:
504 504 ds = self.opener("dirstate").read()
505 505 except IOError:
506 506 ds = ""
507 507 self.opener("journal.dirstate", "w").write(ds)
508 508
509 509 renames = [(self.sjoin("journal"), self.sjoin("undo")),
510 510 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
511 511 tr = transaction.transaction(self.ui.warn, self.sopener,
512 512 self.sjoin("journal"),
513 513 aftertrans(renames))
514 514 self.transhandle = tr
515 515 return tr
516 516
517 517 def recover(self):
518 518 l = self.lock()
519 519 if os.path.exists(self.sjoin("journal")):
520 520 self.ui.status(_("rolling back interrupted transaction\n"))
521 521 transaction.rollback(self.sopener, self.sjoin("journal"))
522 522 self.invalidate()
523 523 return True
524 524 else:
525 525 self.ui.warn(_("no interrupted transaction available\n"))
526 526 return False
527 527
528 528 def rollback(self, wlock=None, lock=None):
529 529 if not wlock:
530 530 wlock = self.wlock()
531 531 if not lock:
532 532 lock = self.lock()
533 533 if os.path.exists(self.sjoin("undo")):
534 534 self.ui.status(_("rolling back last transaction\n"))
535 535 transaction.rollback(self.sopener, self.sjoin("undo"))
536 536 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
537 537 self.invalidate()
538 538 self.dirstate.invalidate()
539 539 else:
540 540 self.ui.warn(_("no rollback information available\n"))
541 541
542 542 def invalidate(self):
543 543 for a in "changelog manifest".split():
544 544 if hasattr(self, a):
545 545 self.__delattr__(a)
546 546 self.tagscache = None
547 547 self.nodetagscache = None
548 548
549 549 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
550 550 desc=None):
551 551 try:
552 552 l = lock.lock(lockname, 0, releasefn, desc=desc)
553 553 except lock.LockHeld, inst:
554 554 if not wait:
555 555 raise
556 556 self.ui.warn(_("waiting for lock on %s held by %r\n") %
557 557 (desc, inst.locker))
558 558 # default to 600 seconds timeout
559 559 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
560 560 releasefn, desc=desc)
561 561 if acquirefn:
562 562 acquirefn()
563 563 return l
564 564
565 565 def lock(self, wait=1):
566 566 return self.do_lock(self.sjoin("lock"), wait,
567 567 acquirefn=self.invalidate,
568 568 desc=_('repository %s') % self.origroot)
569 569
570 570 def wlock(self, wait=1):
571 571 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
572 572 self.dirstate.invalidate,
573 573 desc=_('working directory of %s') % self.origroot)
574 574
575 575 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
576 576 """
577 577 commit an individual file as part of a larger transaction
578 578 """
579 579
580 580 t = self.wread(fn)
581 581 fl = self.file(fn)
582 582 fp1 = manifest1.get(fn, nullid)
583 583 fp2 = manifest2.get(fn, nullid)
584 584
585 585 meta = {}
586 586 cp = self.dirstate.copied(fn)
587 587 if cp:
588 588 # Mark the new revision of this file as a copy of another
589 589 # file. This copy data will effectively act as a parent
590 590 # of this new revision. If this is a merge, the first
591 591 # parent will be the nullid (meaning "look up the copy data")
592 592 # and the second one will be the other parent. For example:
593 593 #
594 594 # 0 --- 1 --- 3 rev1 changes file foo
595 595 # \ / rev2 renames foo to bar and changes it
596 596 # \- 2 -/ rev3 should have bar with all changes and
597 597 # should record that bar descends from
598 598 # bar in rev2 and foo in rev1
599 599 #
600 600 # this allows this merge to succeed:
601 601 #
602 602 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
603 603 # \ / merging rev3 and rev4 should use bar@rev2
604 604 # \- 2 --- 4 as the merge base
605 605 #
606 606 meta["copy"] = cp
607 607 if not manifest2: # not a branch merge
608 608 meta["copyrev"] = hex(manifest1.get(cp, nullid))
609 609 fp2 = nullid
610 610 elif fp2 != nullid: # copied on remote side
611 611 meta["copyrev"] = hex(manifest1.get(cp, nullid))
612 612 elif fp1 != nullid: # copied on local side, reversed
613 613 meta["copyrev"] = hex(manifest2.get(cp))
614 614 fp2 = fp1
615 615 else: # directory rename
616 616 meta["copyrev"] = hex(manifest1.get(cp, nullid))
617 617 self.ui.debug(_(" %s: copy %s:%s\n") %
618 618 (fn, cp, meta["copyrev"]))
619 619 fp1 = nullid
620 620 elif fp2 != nullid:
621 621 # is one parent an ancestor of the other?
622 622 fpa = fl.ancestor(fp1, fp2)
623 623 if fpa == fp1:
624 624 fp1, fp2 = fp2, nullid
625 625 elif fpa == fp2:
626 626 fp2 = nullid
627 627
628 628 # is the file unmodified from the parent? report existing entry
629 629 if fp2 == nullid and not fl.cmp(fp1, t):
630 630 return fp1
631 631
632 632 changelist.append(fn)
633 633 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
634 634
635 635 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
636 636 if p1 is None:
637 637 p1, p2 = self.dirstate.parents()
638 638 return self.commit(files=files, text=text, user=user, date=date,
639 639 p1=p1, p2=p2, wlock=wlock, extra=extra)
640 640
641 641 def commit(self, files=None, text="", user=None, date=None,
642 642 match=util.always, force=False, lock=None, wlock=None,
643 643 force_editor=False, p1=None, p2=None, extra={}):
644 644
645 645 commit = []
646 646 remove = []
647 647 changed = []
648 648 use_dirstate = (p1 is None) # not rawcommit
649 649 extra = extra.copy()
650 650
651 651 if use_dirstate:
652 652 if files:
653 653 for f in files:
654 654 s = self.dirstate.state(f)
655 655 if s in 'nmai':
656 656 commit.append(f)
657 657 elif s == 'r':
658 658 remove.append(f)
659 659 else:
660 660 self.ui.warn(_("%s not tracked!\n") % f)
661 661 else:
662 662 changes = self.status(match=match)[:5]
663 663 modified, added, removed, deleted, unknown = changes
664 664 commit = modified + added
665 665 remove = removed
666 666 else:
667 667 commit = files
668 668
669 669 if use_dirstate:
670 670 p1, p2 = self.dirstate.parents()
671 671 update_dirstate = True
672 672 else:
673 673 p1, p2 = p1, p2 or nullid
674 674 update_dirstate = (self.dirstate.parents()[0] == p1)
675 675
676 676 c1 = self.changelog.read(p1)
677 677 c2 = self.changelog.read(p2)
678 678 m1 = self.manifest.read(c1[0]).copy()
679 679 m2 = self.manifest.read(c2[0])
680 680
681 681 if use_dirstate:
682 682 branchname = self.workingctx().branch()
683 683 try:
684 684 branchname = branchname.decode('UTF-8').encode('UTF-8')
685 685 except UnicodeDecodeError:
686 686 raise util.Abort(_('branch name not in UTF-8!'))
687 687 else:
688 688 branchname = ""
689 689
690 690 if use_dirstate:
691 691 oldname = c1[5].get("branch") # stored in UTF-8
692 692 if (not commit and not remove and not force and p2 == nullid
693 693 and branchname == oldname):
694 694 self.ui.status(_("nothing changed\n"))
695 695 return None
696 696
697 697 xp1 = hex(p1)
698 698 if p2 == nullid: xp2 = ''
699 699 else: xp2 = hex(p2)
700 700
701 701 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
702 702
703 703 if not wlock:
704 704 wlock = self.wlock()
705 705 if not lock:
706 706 lock = self.lock()
707 707 tr = self.transaction()
708 708
709 709 # check in files
710 710 new = {}
711 711 linkrev = self.changelog.count()
712 712 commit.sort()
713 713 is_exec = util.execfunc(self.root, m1.execf)
714 714 is_link = util.linkfunc(self.root, m1.linkf)
715 715 for f in commit:
716 716 self.ui.note(f + "\n")
717 717 try:
718 718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
719 719 new_exec = is_exec(f)
720 720 new_link = is_link(f)
721 721 if not changed or changed[-1] != f:
722 722 # mention the file in the changelog if some flag changed,
723 723 # even if there was no content change.
724 724 old_exec = m1.execf(f)
725 725 old_link = m1.linkf(f)
726 726 if old_exec != new_exec or old_link != new_link:
727 727 changed.append(f)
728 728 m1.set(f, new_exec, new_link)
729 729 except (OSError, IOError):
730 730 if use_dirstate:
731 731 self.ui.warn(_("trouble committing %s!\n") % f)
732 732 raise
733 733 else:
734 734 remove.append(f)
735 735
736 736 # update manifest
737 737 m1.update(new)
738 738 remove.sort()
739 739 removed = []
740 740
741 741 for f in remove:
742 742 if f in m1:
743 743 del m1[f]
744 744 removed.append(f)
745 745 elif f in m2:
746 746 removed.append(f)
747 747 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
748 748
749 749 # add changeset
750 750 new = new.keys()
751 751 new.sort()
752 752
753 753 user = user or self.ui.username()
754 754 if not text or force_editor:
755 755 edittext = []
756 756 if text:
757 757 edittext.append(text)
758 758 edittext.append("")
759 759 edittext.append("HG: user: %s" % user)
760 760 if p2 != nullid:
761 761 edittext.append("HG: branch merge")
762 762 if branchname:
763 763 edittext.append("HG: branch %s" % util.tolocal(branchname))
764 764 edittext.extend(["HG: changed %s" % f for f in changed])
765 765 edittext.extend(["HG: removed %s" % f for f in removed])
766 766 if not changed and not remove:
767 767 edittext.append("HG: no files changed")
768 768 edittext.append("")
769 769 # run editor in the repository root
770 770 olddir = os.getcwd()
771 771 os.chdir(self.root)
772 772 text = self.ui.edit("\n".join(edittext), user)
773 773 os.chdir(olddir)
774 774
775 775 lines = [line.rstrip() for line in text.rstrip().splitlines()]
776 776 while lines and not lines[0]:
777 777 del lines[0]
778 778 if not lines:
779 779 return None
780 780 text = '\n'.join(lines)
781 781 if branchname:
782 782 extra["branch"] = branchname
783 783 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
784 784 user, date, extra)
785 785 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
786 786 parent2=xp2)
787 787 tr.close()
788 788
789 789 if self.branchcache and "branch" in extra:
790 790 self.branchcache[util.tolocal(extra["branch"])] = n
791 791
792 792 if use_dirstate or update_dirstate:
793 793 self.dirstate.setparents(n)
794 794 if use_dirstate:
795 795 self.dirstate.update(new, "n")
796 796 self.dirstate.forget(removed)
797 797
798 798 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
799 799 return n
800 800
801 801 def walk(self, node=None, files=[], match=util.always, badmatch=None):
802 802 '''
803 803 walk recursively through the directory tree or a given
804 804 changeset, finding all files matched by the match
805 805 function
806 806
807 807 results are yielded in a tuple (src, filename), where src
808 808 is one of:
809 809 'f' the file was found in the directory tree
810 810 'm' the file was only in the dirstate and not in the tree
811 811 'b' file was not found and matched badmatch
812 812 '''
813 813
814 814 if node:
815 815 fdict = dict.fromkeys(files)
816 816 # for dirstate.walk, files=['.'] means "walk the whole tree".
817 817 # follow that here, too
818 818 fdict.pop('.', None)
819 819 mdict = self.manifest.read(self.changelog.read(node)[0])
820 820 mfiles = mdict.keys()
821 821 mfiles.sort()
822 822 for fn in mfiles:
823 823 for ffn in fdict:
824 824 # match if the file is the exact name or a directory
825 825 if ffn == fn or fn.startswith("%s/" % ffn):
826 826 del fdict[ffn]
827 827 break
828 828 if match(fn):
829 829 yield 'm', fn
830 830 ffiles = fdict.keys()
831 831 ffiles.sort()
832 832 for fn in ffiles:
833 833 if badmatch and badmatch(fn):
834 834 if match(fn):
835 835 yield 'b', fn
836 836 else:
837 837 self.ui.warn(_('%s: No such file in rev %s\n')
838 838 % (self.pathto(fn), short(node)))
839 839 else:
840 840 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
841 841 yield src, fn
842 842
843 843 def status(self, node1=None, node2=None, files=[], match=util.always,
844 844 wlock=None, list_ignored=False, list_clean=False):
845 845 """return status of files between two nodes or node and working directory
846 846
847 847 If node1 is None, use the first dirstate parent instead.
848 848 If node2 is None, compare node1 with working directory.
849 849 """
850 850
851 851 def fcmp(fn, getnode):
852 852 t1 = self.wread(fn)
853 853 return self.file(fn).cmp(getnode(fn), t1)
854 854
855 855 def mfmatches(node):
856 856 change = self.changelog.read(node)
857 857 mf = self.manifest.read(change[0]).copy()
858 858 for fn in mf.keys():
859 859 if not match(fn):
860 860 del mf[fn]
861 861 return mf
862 862
863 863 modified, added, removed, deleted, unknown = [], [], [], [], []
864 864 ignored, clean = [], []
865 865
866 866 compareworking = False
867 867 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
868 868 compareworking = True
869 869
870 870 if not compareworking:
871 871 # read the manifest from node1 before the manifest from node2,
872 872 # so that we'll hit the manifest cache if we're going through
873 873 # all the revisions in parent->child order.
874 874 mf1 = mfmatches(node1)
875 875
876 876 mywlock = False
877 877
878 878 # are we comparing the working directory?
879 879 if not node2:
880 880 (lookup, modified, added, removed, deleted, unknown,
881 881 ignored, clean) = self.dirstate.status(files, match,
882 882 list_ignored, list_clean)
883 883
884 884 # are we comparing working dir against its parent?
885 885 if compareworking:
886 886 if lookup:
887 887 # do a full compare of any files that might have changed
888 888 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
889 889 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
890 890 nullid)
891 891 for f in lookup:
892 892 if fcmp(f, getnode):
893 893 modified.append(f)
894 894 else:
895 895 if list_clean:
896 896 clean.append(f)
897 897 if not wlock and not mywlock:
898 898 mywlock = True
899 899 try:
900 900 wlock = self.wlock(wait=0)
901 901 except lock.LockException:
902 902 pass
903 903 if wlock:
904 904 self.dirstate.update([f], "n")
905 905 else:
906 906 # we are comparing working dir against non-parent
907 907 # generate a pseudo-manifest for the working dir
908 908 # XXX: create it in dirstate.py ?
909 909 mf2 = mfmatches(self.dirstate.parents()[0])
910 910 is_exec = util.execfunc(self.root, mf2.execf)
911 911 is_link = util.linkfunc(self.root, mf2.linkf)
912 912 for f in lookup + modified + added:
913 913 mf2[f] = ""
914 914 mf2.set(f, is_exec(f), is_link(f))
915 915 for f in removed:
916 916 if f in mf2:
917 917 del mf2[f]
918 918
919 919 if mywlock and wlock:
920 920 wlock.release()
921 921 else:
922 922 # we are comparing two revisions
923 923 mf2 = mfmatches(node2)
924 924
925 925 if not compareworking:
926 926 # flush lists from dirstate before comparing manifests
927 927 modified, added, clean = [], [], []
928 928
929 929 # make sure to sort the files so we talk to the disk in a
930 930 # reasonable order
931 931 mf2keys = mf2.keys()
932 932 mf2keys.sort()
933 933 getnode = lambda fn: mf1.get(fn, nullid)
934 934 for fn in mf2keys:
935 935 if mf1.has_key(fn):
936 936 if (mf1.flags(fn) != mf2.flags(fn) or
937 937 (mf1[fn] != mf2[fn] and
938 938 (mf2[fn] != "" or fcmp(fn, getnode)))):
939 939 modified.append(fn)
940 940 elif list_clean:
941 941 clean.append(fn)
942 942 del mf1[fn]
943 943 else:
944 944 added.append(fn)
945 945
946 946 removed = mf1.keys()
947 947
948 948 # sort and return results:
949 949 for l in modified, added, removed, deleted, unknown, ignored, clean:
950 950 l.sort()
951 951 return (modified, added, removed, deleted, unknown, ignored, clean)
952 952
953 953 def add(self, list, wlock=None):
954 954 if not wlock:
955 955 wlock = self.wlock()
956 956 for f in list:
957 957 p = self.wjoin(f)
958 958 try:
959 959 st = os.lstat(p)
960 960 except:
961 961 self.ui.warn(_("%s does not exist!\n") % f)
962 962 continue
963 963 if st.st_size > 10000000:
964 964 self.ui.warn(_("%s: files over 10MB may cause memory and"
965 965 " performance problems\n"
966 966 "(use 'hg revert %s' to unadd the file)\n")
967 967 % (f, f))
968 968 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
969 969 self.ui.warn(_("%s not added: only files and symlinks "
970 970 "supported currently\n") % f)
971 971 elif self.dirstate.state(f) in 'an':
972 972 self.ui.warn(_("%s already tracked!\n") % f)
973 973 else:
974 974 self.dirstate.update([f], "a")
975 975
976 976 def forget(self, list, wlock=None):
977 977 if not wlock:
978 978 wlock = self.wlock()
979 979 for f in list:
980 980 if self.dirstate.state(f) not in 'ai':
981 981 self.ui.warn(_("%s not added!\n") % f)
982 982 else:
983 983 self.dirstate.forget([f])
984 984
985 985 def remove(self, list, unlink=False, wlock=None):
986 986 if unlink:
987 987 for f in list:
988 988 try:
989 989 util.unlink(self.wjoin(f))
990 990 except OSError, inst:
991 991 if inst.errno != errno.ENOENT:
992 992 raise
993 993 if not wlock:
994 994 wlock = self.wlock()
995 995 for f in list:
996 996 if unlink and os.path.exists(self.wjoin(f)):
997 997 self.ui.warn(_("%s still exists!\n") % f)
998 998 elif self.dirstate.state(f) == 'a':
999 999 self.dirstate.forget([f])
1000 1000 elif f not in self.dirstate:
1001 1001 self.ui.warn(_("%s not tracked!\n") % f)
1002 1002 else:
1003 1003 self.dirstate.update([f], "r")
1004 1004
1005 1005 def undelete(self, list, wlock=None):
1006 1006 p = self.dirstate.parents()[0]
1007 1007 mn = self.changelog.read(p)[0]
1008 1008 m = self.manifest.read(mn)
1009 1009 if not wlock:
1010 1010 wlock = self.wlock()
1011 1011 for f in list:
1012 1012 if self.dirstate.state(f) not in "r":
1013 1013 self.ui.warn("%s not removed!\n" % f)
1014 1014 else:
1015 1015 t = self.file(f).read(m[f])
1016 1016 self.wwrite(f, t, m.flags(f))
1017 1017 self.dirstate.update([f], "n")
1018 1018
1019 1019 def copy(self, source, dest, wlock=None):
1020 1020 p = self.wjoin(dest)
1021 1021 if not (os.path.exists(p) or os.path.islink(p)):
1022 1022 self.ui.warn(_("%s does not exist!\n") % dest)
1023 1023 elif not (os.path.isfile(p) or os.path.islink(p)):
1024 1024 self.ui.warn(_("copy failed: %s is not a file or a "
1025 1025 "symbolic link\n") % dest)
1026 1026 else:
1027 1027 if not wlock:
1028 1028 wlock = self.wlock()
1029 1029 if self.dirstate.state(dest) == '?':
1030 1030 self.dirstate.update([dest], "a")
1031 1031 self.dirstate.copy(source, dest)
1032 1032
1033 1033 def heads(self, start=None):
1034 1034 heads = self.changelog.heads(start)
1035 1035 # sort the output in rev descending order
1036 1036 heads = [(-self.changelog.rev(h), h) for h in heads]
1037 1037 heads.sort()
1038 1038 return [n for (r, n) in heads]
1039 1039
1040 1040 def branchheads(self, branch, start=None):
1041 1041 branches = self.branchtags()
1042 1042 if branch not in branches:
1043 1043 return []
1044 1044 # The basic algorithm is this:
1045 1045 #
1046 1046 # Start from the branch tip since there are no later revisions that can
1047 1047 # possibly be in this branch, and the tip is a guaranteed head.
1048 1048 #
1049 1049 # Remember the tip's parents as the first ancestors, since these by
1050 1050 # definition are not heads.
1051 1051 #
1052 1052 # Step backwards from the brach tip through all the revisions. We are
1053 1053 # guaranteed by the rules of Mercurial that we will now be visiting the
1054 1054 # nodes in reverse topological order (children before parents).
1055 1055 #
1056 1056 # If a revision is one of the ancestors of a head then we can toss it
1057 1057 # out of the ancestors set (we've already found it and won't be
1058 1058 # visiting it again) and put its parents in the ancestors set.
1059 1059 #
1060 1060 # Otherwise, if a revision is in the branch it's another head, since it
1061 1061 # wasn't in the ancestor list of an existing head. So add it to the
1062 1062 # head list, and add its parents to the ancestor list.
1063 1063 #
1064 1064 # If it is not in the branch ignore it.
1065 1065 #
1066 1066 # Once we have a list of heads, use nodesbetween to filter out all the
1067 1067 # heads that cannot be reached from startrev. There may be a more
1068 1068 # efficient way to do this as part of the previous algorithm.
1069 1069
1070 1070 set = util.set
1071 1071 heads = [self.changelog.rev(branches[branch])]
1072 1072 # Don't care if ancestors contains nullrev or not.
1073 1073 ancestors = set(self.changelog.parentrevs(heads[0]))
1074 1074 for rev in xrange(heads[0] - 1, nullrev, -1):
1075 1075 if rev in ancestors:
1076 1076 ancestors.update(self.changelog.parentrevs(rev))
1077 1077 ancestors.remove(rev)
1078 1078 elif self.changectx(rev).branch() == branch:
1079 1079 heads.append(rev)
1080 1080 ancestors.update(self.changelog.parentrevs(rev))
1081 1081 heads = [self.changelog.node(rev) for rev in heads]
1082 1082 if start is not None:
1083 1083 heads = self.changelog.nodesbetween([start], heads)[2]
1084 1084 return heads
1085 1085
1086 1086 def branches(self, nodes):
1087 1087 if not nodes:
1088 1088 nodes = [self.changelog.tip()]
1089 1089 b = []
1090 1090 for n in nodes:
1091 1091 t = n
1092 1092 while 1:
1093 1093 p = self.changelog.parents(n)
1094 1094 if p[1] != nullid or p[0] == nullid:
1095 1095 b.append((t, n, p[0], p[1]))
1096 1096 break
1097 1097 n = p[0]
1098 1098 return b
1099 1099
1100 1100 def between(self, pairs):
1101 1101 r = []
1102 1102
1103 1103 for top, bottom in pairs:
1104 1104 n, l, i = top, [], 0
1105 1105 f = 1
1106 1106
1107 1107 while n != bottom:
1108 1108 p = self.changelog.parents(n)[0]
1109 1109 if i == f:
1110 1110 l.append(n)
1111 1111 f = f * 2
1112 1112 n = p
1113 1113 i += 1
1114 1114
1115 1115 r.append(l)
1116 1116
1117 1117 return r
1118 1118
1119 1119 def findincoming(self, remote, base=None, heads=None, force=False):
1120 1120 """Return list of roots of the subsets of missing nodes from remote
1121 1121
1122 1122 If base dict is specified, assume that these nodes and their parents
1123 1123 exist on the remote side and that no child of a node of base exists
1124 1124 in both remote and self.
1125 1125 Furthermore base will be updated to include the nodes that exists
1126 1126 in self and remote but no children exists in self and remote.
1127 1127 If a list of heads is specified, return only nodes which are heads
1128 1128 or ancestors of these heads.
1129 1129
1130 1130 All the ancestors of base are in self and in remote.
1131 1131 All the descendants of the list returned are missing in self.
1132 1132 (and so we know that the rest of the nodes are missing in remote, see
1133 1133 outgoing)
1134 1134 """
1135 1135 m = self.changelog.nodemap
1136 1136 search = []
1137 1137 fetch = {}
1138 1138 seen = {}
1139 1139 seenbranch = {}
1140 1140 if base == None:
1141 1141 base = {}
1142 1142
1143 1143 if not heads:
1144 1144 heads = remote.heads()
1145 1145
1146 1146 if self.changelog.tip() == nullid:
1147 1147 base[nullid] = 1
1148 1148 if heads != [nullid]:
1149 1149 return [nullid]
1150 1150 return []
1151 1151
1152 1152 # assume we're closer to the tip than the root
1153 1153 # and start by examining the heads
1154 1154 self.ui.status(_("searching for changes\n"))
1155 1155
1156 1156 unknown = []
1157 1157 for h in heads:
1158 1158 if h not in m:
1159 1159 unknown.append(h)
1160 1160 else:
1161 1161 base[h] = 1
1162 1162
1163 1163 if not unknown:
1164 1164 return []
1165 1165
1166 1166 req = dict.fromkeys(unknown)
1167 1167 reqcnt = 0
1168 1168
1169 1169 # search through remote branches
1170 1170 # a 'branch' here is a linear segment of history, with four parts:
1171 1171 # head, root, first parent, second parent
1172 1172 # (a branch always has two parents (or none) by definition)
1173 1173 unknown = remote.branches(unknown)
1174 1174 while unknown:
1175 1175 r = []
1176 1176 while unknown:
1177 1177 n = unknown.pop(0)
1178 1178 if n[0] in seen:
1179 1179 continue
1180 1180
1181 1181 self.ui.debug(_("examining %s:%s\n")
1182 1182 % (short(n[0]), short(n[1])))
1183 1183 if n[0] == nullid: # found the end of the branch
1184 1184 pass
1185 1185 elif n in seenbranch:
1186 1186 self.ui.debug(_("branch already found\n"))
1187 1187 continue
1188 1188 elif n[1] and n[1] in m: # do we know the base?
1189 1189 self.ui.debug(_("found incomplete branch %s:%s\n")
1190 1190 % (short(n[0]), short(n[1])))
1191 1191 search.append(n) # schedule branch range for scanning
1192 1192 seenbranch[n] = 1
1193 1193 else:
1194 1194 if n[1] not in seen and n[1] not in fetch:
1195 1195 if n[2] in m and n[3] in m:
1196 1196 self.ui.debug(_("found new changeset %s\n") %
1197 1197 short(n[1]))
1198 1198 fetch[n[1]] = 1 # earliest unknown
1199 1199 for p in n[2:4]:
1200 1200 if p in m:
1201 1201 base[p] = 1 # latest known
1202 1202
1203 1203 for p in n[2:4]:
1204 1204 if p not in req and p not in m:
1205 1205 r.append(p)
1206 1206 req[p] = 1
1207 1207 seen[n[0]] = 1
1208 1208
1209 1209 if r:
1210 1210 reqcnt += 1
1211 1211 self.ui.debug(_("request %d: %s\n") %
1212 1212 (reqcnt, " ".join(map(short, r))))
1213 1213 for p in xrange(0, len(r), 10):
1214 1214 for b in remote.branches(r[p:p+10]):
1215 1215 self.ui.debug(_("received %s:%s\n") %
1216 1216 (short(b[0]), short(b[1])))
1217 1217 unknown.append(b)
1218 1218
1219 1219 # do binary search on the branches we found
1220 1220 while search:
1221 1221 n = search.pop(0)
1222 1222 reqcnt += 1
1223 1223 l = remote.between([(n[0], n[1])])[0]
1224 1224 l.append(n[1])
1225 1225 p = n[0]
1226 1226 f = 1
1227 1227 for i in l:
1228 1228 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1229 1229 if i in m:
1230 1230 if f <= 2:
1231 1231 self.ui.debug(_("found new branch changeset %s\n") %
1232 1232 short(p))
1233 1233 fetch[p] = 1
1234 1234 base[i] = 1
1235 1235 else:
1236 1236 self.ui.debug(_("narrowed branch search to %s:%s\n")
1237 1237 % (short(p), short(i)))
1238 1238 search.append((p, i))
1239 1239 break
1240 1240 p, f = i, f * 2
1241 1241
1242 1242 # sanity check our fetch list
1243 1243 for f in fetch.keys():
1244 1244 if f in m:
1245 1245 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1246 1246
1247 1247 if base.keys() == [nullid]:
1248 1248 if force:
1249 1249 self.ui.warn(_("warning: repository is unrelated\n"))
1250 1250 else:
1251 1251 raise util.Abort(_("repository is unrelated"))
1252 1252
1253 1253 self.ui.debug(_("found new changesets starting at ") +
1254 1254 " ".join([short(f) for f in fetch]) + "\n")
1255 1255
1256 1256 self.ui.debug(_("%d total queries\n") % reqcnt)
1257 1257
1258 1258 return fetch.keys()
1259 1259
1260 1260 def findoutgoing(self, remote, base=None, heads=None, force=False):
1261 1261 """Return list of nodes that are roots of subsets not in remote
1262 1262
1263 1263 If base dict is specified, assume that these nodes and their parents
1264 1264 exist on the remote side.
1265 1265 If a list of heads is specified, return only nodes which are heads
1266 1266 or ancestors of these heads, and return a second element which
1267 1267 contains all remote heads which get new children.
1268 1268 """
1269 1269 if base == None:
1270 1270 base = {}
1271 1271 self.findincoming(remote, base, heads, force=force)
1272 1272
1273 1273 self.ui.debug(_("common changesets up to ")
1274 1274 + " ".join(map(short, base.keys())) + "\n")
1275 1275
1276 1276 remain = dict.fromkeys(self.changelog.nodemap)
1277 1277
1278 1278 # prune everything remote has from the tree
1279 1279 del remain[nullid]
1280 1280 remove = base.keys()
1281 1281 while remove:
1282 1282 n = remove.pop(0)
1283 1283 if n in remain:
1284 1284 del remain[n]
1285 1285 for p in self.changelog.parents(n):
1286 1286 remove.append(p)
1287 1287
1288 1288 # find every node whose parents have been pruned
1289 1289 subset = []
1290 1290 # find every remote head that will get new children
1291 1291 updated_heads = {}
1292 1292 for n in remain:
1293 1293 p1, p2 = self.changelog.parents(n)
1294 1294 if p1 not in remain and p2 not in remain:
1295 1295 subset.append(n)
1296 1296 if heads:
1297 1297 if p1 in heads:
1298 1298 updated_heads[p1] = True
1299 1299 if p2 in heads:
1300 1300 updated_heads[p2] = True
1301 1301
1302 1302 # this is the set of all roots we have to push
1303 1303 if heads:
1304 1304 return subset, updated_heads.keys()
1305 1305 else:
1306 1306 return subset
1307 1307
1308 1308 def pull(self, remote, heads=None, force=False, lock=None):
1309 1309 mylock = False
1310 1310 if not lock:
1311 1311 lock = self.lock()
1312 1312 mylock = True
1313 1313
1314 1314 try:
1315 fetch = self.findincoming(remote, force=force)
1315 fetch = self.findincoming(remote, heads=heads, force=force)
1316 1316 if fetch == [nullid]:
1317 1317 self.ui.status(_("requesting all changes\n"))
1318 1318
1319 1319 if not fetch:
1320 1320 self.ui.status(_("no changes found\n"))
1321 1321 return 0
1322 1322
1323 1323 if heads is None:
1324 1324 cg = remote.changegroup(fetch, 'pull')
1325 1325 else:
1326 1326 if 'changegroupsubset' not in remote.capabilities:
1327 1327 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1328 1328 cg = remote.changegroupsubset(fetch, heads, 'pull')
1329 1329 return self.addchangegroup(cg, 'pull', remote.url())
1330 1330 finally:
1331 1331 if mylock:
1332 1332 lock.release()
1333 1333
1334 1334 def push(self, remote, force=False, revs=None):
1335 1335 # there are two ways to push to remote repo:
1336 1336 #
1337 1337 # addchangegroup assumes local user can lock remote
1338 1338 # repo (local filesystem, old ssh servers).
1339 1339 #
1340 1340 # unbundle assumes local user cannot lock remote repo (new ssh
1341 1341 # servers, http servers).
1342 1342
1343 1343 if remote.capable('unbundle'):
1344 1344 return self.push_unbundle(remote, force, revs)
1345 1345 return self.push_addchangegroup(remote, force, revs)
1346 1346
1347 1347 def prepush(self, remote, force, revs):
1348 1348 base = {}
1349 1349 remote_heads = remote.heads()
1350 1350 inc = self.findincoming(remote, base, remote_heads, force=force)
1351 1351
1352 1352 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1353 1353 if revs is not None:
1354 1354 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1355 1355 else:
1356 1356 bases, heads = update, self.changelog.heads()
1357 1357
1358 1358 if not bases:
1359 1359 self.ui.status(_("no changes found\n"))
1360 1360 return None, 1
1361 1361 elif not force:
1362 1362 # check if we're creating new remote heads
1363 1363 # to be a remote head after push, node must be either
1364 1364 # - unknown locally
1365 1365 # - a local outgoing head descended from update
1366 1366 # - a remote head that's known locally and not
1367 1367 # ancestral to an outgoing head
1368 1368
1369 1369 warn = 0
1370 1370
1371 1371 if remote_heads == [nullid]:
1372 1372 warn = 0
1373 1373 elif not revs and len(heads) > len(remote_heads):
1374 1374 warn = 1
1375 1375 else:
1376 1376 newheads = list(heads)
1377 1377 for r in remote_heads:
1378 1378 if r in self.changelog.nodemap:
1379 1379 desc = self.changelog.heads(r, heads)
1380 1380 l = [h for h in heads if h in desc]
1381 1381 if not l:
1382 1382 newheads.append(r)
1383 1383 else:
1384 1384 newheads.append(r)
1385 1385 if len(newheads) > len(remote_heads):
1386 1386 warn = 1
1387 1387
1388 1388 if warn:
1389 1389 self.ui.warn(_("abort: push creates new remote branches!\n"))
1390 1390 self.ui.status(_("(did you forget to merge?"
1391 1391 " use push -f to force)\n"))
1392 1392 return None, 1
1393 1393 elif inc:
1394 1394 self.ui.warn(_("note: unsynced remote changes!\n"))
1395 1395
1396 1396
1397 1397 if revs is None:
1398 1398 cg = self.changegroup(update, 'push')
1399 1399 else:
1400 1400 cg = self.changegroupsubset(update, revs, 'push')
1401 1401 return cg, remote_heads
1402 1402
1403 1403 def push_addchangegroup(self, remote, force, revs):
1404 1404 lock = remote.lock()
1405 1405
1406 1406 ret = self.prepush(remote, force, revs)
1407 1407 if ret[0] is not None:
1408 1408 cg, remote_heads = ret
1409 1409 return remote.addchangegroup(cg, 'push', self.url())
1410 1410 return ret[1]
1411 1411
1412 1412 def push_unbundle(self, remote, force, revs):
1413 1413 # local repo finds heads on server, finds out what revs it
1414 1414 # must push. once revs transferred, if server finds it has
1415 1415 # different heads (someone else won commit/push race), server
1416 1416 # aborts.
1417 1417
1418 1418 ret = self.prepush(remote, force, revs)
1419 1419 if ret[0] is not None:
1420 1420 cg, remote_heads = ret
1421 1421 if force: remote_heads = ['force']
1422 1422 return remote.unbundle(cg, remote_heads, 'push')
1423 1423 return ret[1]
1424 1424
1425 1425 def changegroupinfo(self, nodes):
1426 1426 self.ui.note(_("%d changesets found\n") % len(nodes))
1427 1427 if self.ui.debugflag:
1428 1428 self.ui.debug(_("List of changesets:\n"))
1429 1429 for node in nodes:
1430 1430 self.ui.debug("%s\n" % hex(node))
1431 1431
1432 1432 def changegroupsubset(self, bases, heads, source):
1433 1433 """This function generates a changegroup consisting of all the nodes
1434 1434 that are descendents of any of the bases, and ancestors of any of
1435 1435 the heads.
1436 1436
1437 1437 It is fairly complex as determining which filenodes and which
1438 1438 manifest nodes need to be included for the changeset to be complete
1439 1439 is non-trivial.
1440 1440
1441 1441 Another wrinkle is doing the reverse, figuring out which changeset in
1442 1442 the changegroup a particular filenode or manifestnode belongs to."""
1443 1443
1444 1444 self.hook('preoutgoing', throw=True, source=source)
1445 1445
1446 1446 # Set up some initial variables
1447 1447 # Make it easy to refer to self.changelog
1448 1448 cl = self.changelog
1449 1449 # msng is short for missing - compute the list of changesets in this
1450 1450 # changegroup.
1451 1451 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1452 1452 self.changegroupinfo(msng_cl_lst)
1453 1453 # Some bases may turn out to be superfluous, and some heads may be
1454 1454 # too. nodesbetween will return the minimal set of bases and heads
1455 1455 # necessary to re-create the changegroup.
1456 1456
1457 1457 # Known heads are the list of heads that it is assumed the recipient
1458 1458 # of this changegroup will know about.
1459 1459 knownheads = {}
1460 1460 # We assume that all parents of bases are known heads.
1461 1461 for n in bases:
1462 1462 for p in cl.parents(n):
1463 1463 if p != nullid:
1464 1464 knownheads[p] = 1
1465 1465 knownheads = knownheads.keys()
1466 1466 if knownheads:
1467 1467 # Now that we know what heads are known, we can compute which
1468 1468 # changesets are known. The recipient must know about all
1469 1469 # changesets required to reach the known heads from the null
1470 1470 # changeset.
1471 1471 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1472 1472 junk = None
1473 1473 # Transform the list into an ersatz set.
1474 1474 has_cl_set = dict.fromkeys(has_cl_set)
1475 1475 else:
1476 1476 # If there were no known heads, the recipient cannot be assumed to
1477 1477 # know about any changesets.
1478 1478 has_cl_set = {}
1479 1479
1480 1480 # Make it easy to refer to self.manifest
1481 1481 mnfst = self.manifest
1482 1482 # We don't know which manifests are missing yet
1483 1483 msng_mnfst_set = {}
1484 1484 # Nor do we know which filenodes are missing.
1485 1485 msng_filenode_set = {}
1486 1486
1487 1487 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1488 1488 junk = None
1489 1489
1490 1490 # A changeset always belongs to itself, so the changenode lookup
1491 1491 # function for a changenode is identity.
1492 1492 def identity(x):
1493 1493 return x
1494 1494
1495 1495 # A function generating function. Sets up an environment for the
1496 1496 # inner function.
1497 1497 def cmp_by_rev_func(revlog):
1498 1498 # Compare two nodes by their revision number in the environment's
1499 1499 # revision history. Since the revision number both represents the
1500 1500 # most efficient order to read the nodes in, and represents a
1501 1501 # topological sorting of the nodes, this function is often useful.
1502 1502 def cmp_by_rev(a, b):
1503 1503 return cmp(revlog.rev(a), revlog.rev(b))
1504 1504 return cmp_by_rev
1505 1505
1506 1506 # If we determine that a particular file or manifest node must be a
1507 1507 # node that the recipient of the changegroup will already have, we can
1508 1508 # also assume the recipient will have all the parents. This function
1509 1509 # prunes them from the set of missing nodes.
1510 1510 def prune_parents(revlog, hasset, msngset):
1511 1511 haslst = hasset.keys()
1512 1512 haslst.sort(cmp_by_rev_func(revlog))
1513 1513 for node in haslst:
1514 1514 parentlst = [p for p in revlog.parents(node) if p != nullid]
1515 1515 while parentlst:
1516 1516 n = parentlst.pop()
1517 1517 if n not in hasset:
1518 1518 hasset[n] = 1
1519 1519 p = [p for p in revlog.parents(n) if p != nullid]
1520 1520 parentlst.extend(p)
1521 1521 for n in hasset:
1522 1522 msngset.pop(n, None)
1523 1523
1524 1524 # This is a function generating function used to set up an environment
1525 1525 # for the inner function to execute in.
1526 1526 def manifest_and_file_collector(changedfileset):
1527 1527 # This is an information gathering function that gathers
1528 1528 # information from each changeset node that goes out as part of
1529 1529 # the changegroup. The information gathered is a list of which
1530 1530 # manifest nodes are potentially required (the recipient may
1531 1531 # already have them) and total list of all files which were
1532 1532 # changed in any changeset in the changegroup.
1533 1533 #
1534 1534 # We also remember the first changenode we saw any manifest
1535 1535 # referenced by so we can later determine which changenode 'owns'
1536 1536 # the manifest.
1537 1537 def collect_manifests_and_files(clnode):
1538 1538 c = cl.read(clnode)
1539 1539 for f in c[3]:
1540 1540 # This is to make sure we only have one instance of each
1541 1541 # filename string for each filename.
1542 1542 changedfileset.setdefault(f, f)
1543 1543 msng_mnfst_set.setdefault(c[0], clnode)
1544 1544 return collect_manifests_and_files
1545 1545
1546 1546 # Figure out which manifest nodes (of the ones we think might be part
1547 1547 # of the changegroup) the recipient must know about and remove them
1548 1548 # from the changegroup.
1549 1549 def prune_manifests():
1550 1550 has_mnfst_set = {}
1551 1551 for n in msng_mnfst_set:
1552 1552 # If a 'missing' manifest thinks it belongs to a changenode
1553 1553 # the recipient is assumed to have, obviously the recipient
1554 1554 # must have that manifest.
1555 1555 linknode = cl.node(mnfst.linkrev(n))
1556 1556 if linknode in has_cl_set:
1557 1557 has_mnfst_set[n] = 1
1558 1558 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1559 1559
1560 1560 # Use the information collected in collect_manifests_and_files to say
1561 1561 # which changenode any manifestnode belongs to.
1562 1562 def lookup_manifest_link(mnfstnode):
1563 1563 return msng_mnfst_set[mnfstnode]
1564 1564
1565 1565 # A function generating function that sets up the initial environment
1566 1566 # the inner function.
1567 1567 def filenode_collector(changedfiles):
1568 1568 next_rev = [0]
1569 1569 # This gathers information from each manifestnode included in the
1570 1570 # changegroup about which filenodes the manifest node references
1571 1571 # so we can include those in the changegroup too.
1572 1572 #
1573 1573 # It also remembers which changenode each filenode belongs to. It
1574 1574 # does this by assuming the a filenode belongs to the changenode
1575 1575 # the first manifest that references it belongs to.
1576 1576 def collect_msng_filenodes(mnfstnode):
1577 1577 r = mnfst.rev(mnfstnode)
1578 1578 if r == next_rev[0]:
1579 1579 # If the last rev we looked at was the one just previous,
1580 1580 # we only need to see a diff.
1581 1581 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1582 1582 # For each line in the delta
1583 1583 for dline in delta.splitlines():
1584 1584 # get the filename and filenode for that line
1585 1585 f, fnode = dline.split('\0')
1586 1586 fnode = bin(fnode[:40])
1587 1587 f = changedfiles.get(f, None)
1588 1588 # And if the file is in the list of files we care
1589 1589 # about.
1590 1590 if f is not None:
1591 1591 # Get the changenode this manifest belongs to
1592 1592 clnode = msng_mnfst_set[mnfstnode]
1593 1593 # Create the set of filenodes for the file if
1594 1594 # there isn't one already.
1595 1595 ndset = msng_filenode_set.setdefault(f, {})
1596 1596 # And set the filenode's changelog node to the
1597 1597 # manifest's if it hasn't been set already.
1598 1598 ndset.setdefault(fnode, clnode)
1599 1599 else:
1600 1600 # Otherwise we need a full manifest.
1601 1601 m = mnfst.read(mnfstnode)
1602 1602 # For every file in we care about.
1603 1603 for f in changedfiles:
1604 1604 fnode = m.get(f, None)
1605 1605 # If it's in the manifest
1606 1606 if fnode is not None:
1607 1607 # See comments above.
1608 1608 clnode = msng_mnfst_set[mnfstnode]
1609 1609 ndset = msng_filenode_set.setdefault(f, {})
1610 1610 ndset.setdefault(fnode, clnode)
1611 1611 # Remember the revision we hope to see next.
1612 1612 next_rev[0] = r + 1
1613 1613 return collect_msng_filenodes
1614 1614
1615 1615 # We have a list of filenodes we think we need for a file, lets remove
1616 1616 # all those we now the recipient must have.
1617 1617 def prune_filenodes(f, filerevlog):
1618 1618 msngset = msng_filenode_set[f]
1619 1619 hasset = {}
1620 1620 # If a 'missing' filenode thinks it belongs to a changenode we
1621 1621 # assume the recipient must have, then the recipient must have
1622 1622 # that filenode.
1623 1623 for n in msngset:
1624 1624 clnode = cl.node(filerevlog.linkrev(n))
1625 1625 if clnode in has_cl_set:
1626 1626 hasset[n] = 1
1627 1627 prune_parents(filerevlog, hasset, msngset)
1628 1628
1629 1629 # A function generator function that sets up the a context for the
1630 1630 # inner function.
1631 1631 def lookup_filenode_link_func(fname):
1632 1632 msngset = msng_filenode_set[fname]
1633 1633 # Lookup the changenode the filenode belongs to.
1634 1634 def lookup_filenode_link(fnode):
1635 1635 return msngset[fnode]
1636 1636 return lookup_filenode_link
1637 1637
1638 1638 # Now that we have all theses utility functions to help out and
1639 1639 # logically divide up the task, generate the group.
1640 1640 def gengroup():
1641 1641 # The set of changed files starts empty.
1642 1642 changedfiles = {}
1643 1643 # Create a changenode group generator that will call our functions
1644 1644 # back to lookup the owning changenode and collect information.
1645 1645 group = cl.group(msng_cl_lst, identity,
1646 1646 manifest_and_file_collector(changedfiles))
1647 1647 for chnk in group:
1648 1648 yield chnk
1649 1649
1650 1650 # The list of manifests has been collected by the generator
1651 1651 # calling our functions back.
1652 1652 prune_manifests()
1653 1653 msng_mnfst_lst = msng_mnfst_set.keys()
1654 1654 # Sort the manifestnodes by revision number.
1655 1655 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1656 1656 # Create a generator for the manifestnodes that calls our lookup
1657 1657 # and data collection functions back.
1658 1658 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1659 1659 filenode_collector(changedfiles))
1660 1660 for chnk in group:
1661 1661 yield chnk
1662 1662
1663 1663 # These are no longer needed, dereference and toss the memory for
1664 1664 # them.
1665 1665 msng_mnfst_lst = None
1666 1666 msng_mnfst_set.clear()
1667 1667
1668 1668 changedfiles = changedfiles.keys()
1669 1669 changedfiles.sort()
1670 1670 # Go through all our files in order sorted by name.
1671 1671 for fname in changedfiles:
1672 1672 filerevlog = self.file(fname)
1673 1673 # Toss out the filenodes that the recipient isn't really
1674 1674 # missing.
1675 1675 if msng_filenode_set.has_key(fname):
1676 1676 prune_filenodes(fname, filerevlog)
1677 1677 msng_filenode_lst = msng_filenode_set[fname].keys()
1678 1678 else:
1679 1679 msng_filenode_lst = []
1680 1680 # If any filenodes are left, generate the group for them,
1681 1681 # otherwise don't bother.
1682 1682 if len(msng_filenode_lst) > 0:
1683 1683 yield changegroup.genchunk(fname)
1684 1684 # Sort the filenodes by their revision #
1685 1685 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1686 1686 # Create a group generator and only pass in a changenode
1687 1687 # lookup function as we need to collect no information
1688 1688 # from filenodes.
1689 1689 group = filerevlog.group(msng_filenode_lst,
1690 1690 lookup_filenode_link_func(fname))
1691 1691 for chnk in group:
1692 1692 yield chnk
1693 1693 if msng_filenode_set.has_key(fname):
1694 1694 # Don't need this anymore, toss it to free memory.
1695 1695 del msng_filenode_set[fname]
1696 1696 # Signal that no more groups are left.
1697 1697 yield changegroup.closechunk()
1698 1698
1699 1699 if msng_cl_lst:
1700 1700 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1701 1701
1702 1702 return util.chunkbuffer(gengroup())
1703 1703
1704 1704 def changegroup(self, basenodes, source):
1705 1705 """Generate a changegroup of all nodes that we have that a recipient
1706 1706 doesn't.
1707 1707
1708 1708 This is much easier than the previous function as we can assume that
1709 1709 the recipient has any changenode we aren't sending them."""
1710 1710
1711 1711 self.hook('preoutgoing', throw=True, source=source)
1712 1712
1713 1713 cl = self.changelog
1714 1714 nodes = cl.nodesbetween(basenodes, None)[0]
1715 1715 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1716 1716 self.changegroupinfo(nodes)
1717 1717
1718 1718 def identity(x):
1719 1719 return x
1720 1720
1721 1721 def gennodelst(revlog):
1722 1722 for r in xrange(0, revlog.count()):
1723 1723 n = revlog.node(r)
1724 1724 if revlog.linkrev(n) in revset:
1725 1725 yield n
1726 1726
1727 1727 def changed_file_collector(changedfileset):
1728 1728 def collect_changed_files(clnode):
1729 1729 c = cl.read(clnode)
1730 1730 for fname in c[3]:
1731 1731 changedfileset[fname] = 1
1732 1732 return collect_changed_files
1733 1733
1734 1734 def lookuprevlink_func(revlog):
1735 1735 def lookuprevlink(n):
1736 1736 return cl.node(revlog.linkrev(n))
1737 1737 return lookuprevlink
1738 1738
1739 1739 def gengroup():
1740 1740 # construct a list of all changed files
1741 1741 changedfiles = {}
1742 1742
1743 1743 for chnk in cl.group(nodes, identity,
1744 1744 changed_file_collector(changedfiles)):
1745 1745 yield chnk
1746 1746 changedfiles = changedfiles.keys()
1747 1747 changedfiles.sort()
1748 1748
1749 1749 mnfst = self.manifest
1750 1750 nodeiter = gennodelst(mnfst)
1751 1751 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1752 1752 yield chnk
1753 1753
1754 1754 for fname in changedfiles:
1755 1755 filerevlog = self.file(fname)
1756 1756 nodeiter = gennodelst(filerevlog)
1757 1757 nodeiter = list(nodeiter)
1758 1758 if nodeiter:
1759 1759 yield changegroup.genchunk(fname)
1760 1760 lookup = lookuprevlink_func(filerevlog)
1761 1761 for chnk in filerevlog.group(nodeiter, lookup):
1762 1762 yield chnk
1763 1763
1764 1764 yield changegroup.closechunk()
1765 1765
1766 1766 if nodes:
1767 1767 self.hook('outgoing', node=hex(nodes[0]), source=source)
1768 1768
1769 1769 return util.chunkbuffer(gengroup())
1770 1770
1771 1771 def addchangegroup(self, source, srctype, url):
1772 1772 """add changegroup to repo.
1773 1773
1774 1774 return values:
1775 1775 - nothing changed or no source: 0
1776 1776 - more heads than before: 1+added heads (2..n)
1777 1777 - less heads than before: -1-removed heads (-2..-n)
1778 1778 - number of heads stays the same: 1
1779 1779 """
1780 1780 def csmap(x):
1781 1781 self.ui.debug(_("add changeset %s\n") % short(x))
1782 1782 return cl.count()
1783 1783
1784 1784 def revmap(x):
1785 1785 return cl.rev(x)
1786 1786
1787 1787 if not source:
1788 1788 return 0
1789 1789
1790 1790 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1791 1791
1792 1792 changesets = files = revisions = 0
1793 1793
1794 1794 tr = self.transaction()
1795 1795
1796 1796 # write changelog data to temp files so concurrent readers will not see
1797 1797 # inconsistent view
1798 1798 cl = self.changelog
1799 1799 cl.delayupdate()
1800 1800 oldheads = len(cl.heads())
1801 1801
1802 1802 # pull off the changeset group
1803 1803 self.ui.status(_("adding changesets\n"))
1804 1804 cor = cl.count() - 1
1805 1805 chunkiter = changegroup.chunkiter(source)
1806 1806 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1807 1807 raise util.Abort(_("received changelog group is empty"))
1808 1808 cnr = cl.count() - 1
1809 1809 changesets = cnr - cor
1810 1810
1811 1811 # pull off the manifest group
1812 1812 self.ui.status(_("adding manifests\n"))
1813 1813 chunkiter = changegroup.chunkiter(source)
1814 1814 # no need to check for empty manifest group here:
1815 1815 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1816 1816 # no new manifest will be created and the manifest group will
1817 1817 # be empty during the pull
1818 1818 self.manifest.addgroup(chunkiter, revmap, tr)
1819 1819
1820 1820 # process the files
1821 1821 self.ui.status(_("adding file changes\n"))
1822 1822 while 1:
1823 1823 f = changegroup.getchunk(source)
1824 1824 if not f:
1825 1825 break
1826 1826 self.ui.debug(_("adding %s revisions\n") % f)
1827 1827 fl = self.file(f)
1828 1828 o = fl.count()
1829 1829 chunkiter = changegroup.chunkiter(source)
1830 1830 if fl.addgroup(chunkiter, revmap, tr) is None:
1831 1831 raise util.Abort(_("received file revlog group is empty"))
1832 1832 revisions += fl.count() - o
1833 1833 files += 1
1834 1834
1835 1835 # make changelog see real files again
1836 1836 cl.finalize(tr)
1837 1837
1838 1838 newheads = len(self.changelog.heads())
1839 1839 heads = ""
1840 1840 if oldheads and newheads != oldheads:
1841 1841 heads = _(" (%+d heads)") % (newheads - oldheads)
1842 1842
1843 1843 self.ui.status(_("added %d changesets"
1844 1844 " with %d changes to %d files%s\n")
1845 1845 % (changesets, revisions, files, heads))
1846 1846
1847 1847 if changesets > 0:
1848 1848 self.hook('pretxnchangegroup', throw=True,
1849 1849 node=hex(self.changelog.node(cor+1)), source=srctype,
1850 1850 url=url)
1851 1851
1852 1852 tr.close()
1853 1853
1854 1854 if changesets > 0:
1855 1855 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1856 1856 source=srctype, url=url)
1857 1857
1858 1858 for i in xrange(cor + 1, cnr + 1):
1859 1859 self.hook("incoming", node=hex(self.changelog.node(i)),
1860 1860 source=srctype, url=url)
1861 1861
1862 1862 # never return 0 here:
1863 1863 if newheads < oldheads:
1864 1864 return newheads - oldheads - 1
1865 1865 else:
1866 1866 return newheads - oldheads + 1
1867 1867
1868 1868
1869 1869 def stream_in(self, remote):
1870 1870 fp = remote.stream_out()
1871 1871 l = fp.readline()
1872 1872 try:
1873 1873 resp = int(l)
1874 1874 except ValueError:
1875 1875 raise util.UnexpectedOutput(
1876 1876 _('Unexpected response from remote server:'), l)
1877 1877 if resp == 1:
1878 1878 raise util.Abort(_('operation forbidden by server'))
1879 1879 elif resp == 2:
1880 1880 raise util.Abort(_('locking the remote repository failed'))
1881 1881 elif resp != 0:
1882 1882 raise util.Abort(_('the server sent an unknown error code'))
1883 1883 self.ui.status(_('streaming all changes\n'))
1884 1884 l = fp.readline()
1885 1885 try:
1886 1886 total_files, total_bytes = map(int, l.split(' ', 1))
1887 1887 except ValueError, TypeError:
1888 1888 raise util.UnexpectedOutput(
1889 1889 _('Unexpected response from remote server:'), l)
1890 1890 self.ui.status(_('%d files to transfer, %s of data\n') %
1891 1891 (total_files, util.bytecount(total_bytes)))
1892 1892 start = time.time()
1893 1893 for i in xrange(total_files):
1894 1894 # XXX doesn't support '\n' or '\r' in filenames
1895 1895 l = fp.readline()
1896 1896 try:
1897 1897 name, size = l.split('\0', 1)
1898 1898 size = int(size)
1899 1899 except ValueError, TypeError:
1900 1900 raise util.UnexpectedOutput(
1901 1901 _('Unexpected response from remote server:'), l)
1902 1902 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1903 1903 ofp = self.sopener(name, 'w')
1904 1904 for chunk in util.filechunkiter(fp, limit=size):
1905 1905 ofp.write(chunk)
1906 1906 ofp.close()
1907 1907 elapsed = time.time() - start
1908 1908 if elapsed <= 0:
1909 1909 elapsed = 0.001
1910 1910 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1911 1911 (util.bytecount(total_bytes), elapsed,
1912 1912 util.bytecount(total_bytes / elapsed)))
1913 1913 self.invalidate()
1914 1914 return len(self.heads()) + 1
1915 1915
1916 1916 def clone(self, remote, heads=[], stream=False):
1917 1917 '''clone remote repository.
1918 1918
1919 1919 keyword arguments:
1920 1920 heads: list of revs to clone (forces use of pull)
1921 1921 stream: use streaming clone if possible'''
1922 1922
1923 1923 # now, all clients that can request uncompressed clones can
1924 1924 # read repo formats supported by all servers that can serve
1925 1925 # them.
1926 1926
1927 1927 # if revlog format changes, client will have to check version
1928 1928 # and format flags on "stream" capability, and use
1929 1929 # uncompressed only if compatible.
1930 1930
1931 1931 if stream and not heads and remote.capable('stream'):
1932 1932 return self.stream_in(remote)
1933 1933 return self.pull(remote, heads)
1934 1934
1935 1935 # used to avoid circular references so destructors work
1936 1936 def aftertrans(files):
1937 1937 renamefiles = [tuple(t) for t in files]
1938 1938 def a():
1939 1939 for src, dest in renamefiles:
1940 1940 util.rename(src, dest)
1941 1941 return a
1942 1942
1943 1943 def instance(ui, path, create):
1944 1944 return localrepository(ui, util.drop_scheme('file', path), create)
1945 1945
1946 1946 def islocal(path):
1947 1947 return True
General Comments 0
You need to be logged in to leave comments. Login now