##// END OF EJS Templates
branch.cache: silently ignore I/O and OS errors
Matt Mackall -
r4415:1a63b44f default
parent child Browse files
Show More
@@ -1,2016 +1,2020 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __del__(self):
22 22 self.transhandle = None
23 23 def __init__(self, parentui, path=None, create=0):
24 24 repo.repository.__init__(self)
25 25 if not path:
26 26 p = os.getcwd()
27 27 while not os.path.isdir(os.path.join(p, ".hg")):
28 28 oldp = p
29 29 p = os.path.dirname(p)
30 30 if p == oldp:
31 31 raise repo.RepoError(_("There is no Mercurial repository"
32 32 " here (.hg not found)"))
33 33 path = p
34 34
35 35 self.root = os.path.realpath(path)
36 36 self.path = os.path.join(self.root, ".hg")
37 37 self.origroot = path
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements = ("revlogv1", "store")
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 v = self.ui.configrevlog()
92 92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 94 fl = v.get('flags', None)
95 95 flags = 0
96 96 if fl != None:
97 97 for x in fl.split():
98 98 flags |= revlog.flagstr(x)
99 99 elif self.revlogv1:
100 100 flags = revlog.REVLOG_DEFAULT_FLAGS
101 101
102 102 v = self.revlogversion | flags
103 103 self.manifest = manifest.manifest(self.sopener, v)
104 104 self.changelog = changelog.changelog(self.sopener, v)
105 105
106 106 fallback = self.ui.config('ui', 'fallbackencoding')
107 107 if fallback:
108 108 util._fallbackencoding = fallback
109 109
110 110 # the changelog might not have the inline index flag
111 111 # on. If the format of the changelog is the same as found in
112 112 # .hgrc, apply any flags found in the .hgrc as well.
113 113 # Otherwise, just version from the changelog
114 114 v = self.changelog.version
115 115 if v == self.revlogversion:
116 116 v |= flags
117 117 self.revlogversion = v
118 118
119 119 self.tagscache = None
120 120 self.branchcache = None
121 121 self.nodetagscache = None
122 122 self.encodepats = None
123 123 self.decodepats = None
124 124 self.transhandle = None
125 125
126 126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 127
128 128 def url(self):
129 129 return 'file:' + self.root
130 130
131 131 def hook(self, name, throw=False, **args):
132 132 def callhook(hname, funcname):
133 133 '''call python hook. hook is callable object, looked up as
134 134 name in python module. if callable returns "true", hook
135 135 fails, else passes. if hook raises exception, treated as
136 136 hook failure. exception propagates if throw is "true".
137 137
138 138 reason for "true" meaning "hook failed" is so that
139 139 unmodified commands (e.g. mercurial.commands.update) can
140 140 be run as hooks without wrappers to convert return values.'''
141 141
142 142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 143 d = funcname.rfind('.')
144 144 if d == -1:
145 145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 146 % (hname, funcname))
147 147 modname = funcname[:d]
148 148 try:
149 149 obj = __import__(modname)
150 150 except ImportError:
151 151 try:
152 152 # extensions are loaded with hgext_ prefix
153 153 obj = __import__("hgext_%s" % modname)
154 154 except ImportError:
155 155 raise util.Abort(_('%s hook is invalid '
156 156 '(import of "%s" failed)') %
157 157 (hname, modname))
158 158 try:
159 159 for p in funcname.split('.')[1:]:
160 160 obj = getattr(obj, p)
161 161 except AttributeError, err:
162 162 raise util.Abort(_('%s hook is invalid '
163 163 '("%s" is not defined)') %
164 164 (hname, funcname))
165 165 if not callable(obj):
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not callable)') %
168 168 (hname, funcname))
169 169 try:
170 170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 171 except (KeyboardInterrupt, util.SignalInterrupt):
172 172 raise
173 173 except Exception, exc:
174 174 if isinstance(exc, util.Abort):
175 175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 176 (hname, exc.args[0]))
177 177 else:
178 178 self.ui.warn(_('error: %s hook raised an exception: '
179 179 '%s\n') % (hname, exc))
180 180 if throw:
181 181 raise
182 182 self.ui.print_exc()
183 183 return True
184 184 if r:
185 185 if throw:
186 186 raise util.Abort(_('%s hook failed') % hname)
187 187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 188 return r
189 189
190 190 def runhook(name, cmd):
191 191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 193 r = util.system(cmd, environ=env, cwd=self.root)
194 194 if r:
195 195 desc, r = util.explain_exit(r)
196 196 if throw:
197 197 raise util.Abort(_('%s hook %s') % (name, desc))
198 198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 199 return r
200 200
201 201 r = False
202 202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 203 if hname.split(".", 1)[0] == name and cmd]
204 204 hooks.sort()
205 205 for hname, cmd in hooks:
206 206 if cmd.startswith('python:'):
207 207 r = callhook(hname, cmd[7:].strip()) or r
208 208 else:
209 209 r = runhook(hname, cmd) or r
210 210 return r
211 211
212 212 tag_disallowed = ':\r\n'
213 213
214 214 def tag(self, name, node, message, local, user, date):
215 215 '''tag a revision with a symbolic name.
216 216
217 217 if local is True, the tag is stored in a per-repository file.
218 218 otherwise, it is stored in the .hgtags file, and a new
219 219 changeset is committed with the change.
220 220
221 221 keyword arguments:
222 222
223 223 local: whether to store tag in non-version-controlled file
224 224 (default False)
225 225
226 226 message: commit message to use if committing
227 227
228 228 user: name of user to use if committing
229 229
230 230 date: date tuple to use if committing'''
231 231
232 232 for c in self.tag_disallowed:
233 233 if c in name:
234 234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 235
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 237
238 238 if local:
239 239 # local tags are stored in the current charset
240 240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 241 self.hook('tag', node=hex(node), tag=name, local=local)
242 242 return
243 243
244 244 for x in self.status()[:5]:
245 245 if '.hgtags' in x:
246 246 raise util.Abort(_('working copy of .hgtags is changed '
247 247 '(please commit .hgtags manually)'))
248 248
249 249 # committed tags are stored in UTF-8
250 250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 251 self.wfile('.hgtags', 'ab').write(line)
252 252 if self.dirstate.state('.hgtags') == '?':
253 253 self.add(['.hgtags'])
254 254
255 255 self.commit(['.hgtags'], message, user, date)
256 256 self.hook('tag', node=hex(node), tag=name, local=local)
257 257
258 258 def tags(self):
259 259 '''return a mapping of tag to node'''
260 260 if self.tagscache:
261 261 return self.tagscache
262 262
263 263 globaltags = {}
264 264
265 265 def readtags(lines, fn):
266 266 filetags = {}
267 267 count = 0
268 268
269 269 def warn(msg):
270 270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
271 271
272 272 for l in lines:
273 273 count += 1
274 274 if not l:
275 275 continue
276 276 s = l.split(" ", 1)
277 277 if len(s) != 2:
278 278 warn(_("cannot parse entry"))
279 279 continue
280 280 node, key = s
281 281 key = util.tolocal(key.strip()) # stored in UTF-8
282 282 try:
283 283 bin_n = bin(node)
284 284 except TypeError:
285 285 warn(_("node '%s' is not well formed") % node)
286 286 continue
287 287 if bin_n not in self.changelog.nodemap:
288 288 warn(_("tag '%s' refers to unknown node") % key)
289 289 continue
290 290
291 291 h = []
292 292 if key in filetags:
293 293 n, h = filetags[key]
294 294 h.append(n)
295 295 filetags[key] = (bin_n, h)
296 296
297 297 for k,nh in filetags.items():
298 298 if k not in globaltags:
299 299 globaltags[k] = nh
300 300 continue
301 301 # we prefer the global tag if:
302 302 # it supercedes us OR
303 303 # mutual supercedes and it has a higher rank
304 304 # otherwise we win because we're tip-most
305 305 an, ah = nh
306 306 bn, bh = globaltags[k]
307 307 if bn != an and an in bh and \
308 308 (bn not in ah or len(bh) > len(ah)):
309 309 an = bn
310 310 ah.append([n for n in bh if n not in ah])
311 311 globaltags[k] = an, ah
312 312
313 313 # read the tags file from each head, ending with the tip
314 314 f = None
315 315 for rev, node, fnode in self._hgtagsnodes():
316 316 f = (f and f.filectx(fnode) or
317 317 self.filectx('.hgtags', fileid=fnode))
318 318 readtags(f.data().splitlines(), f)
319 319
320 320 try:
321 321 data = util.fromlocal(self.opener("localtags").read())
322 322 # localtags are stored in the local character set
323 323 # while the internal tag table is stored in UTF-8
324 324 readtags(data.splitlines(), "localtags")
325 325 except IOError:
326 326 pass
327 327
328 328 self.tagscache = {}
329 329 for k,nh in globaltags.items():
330 330 n = nh[0]
331 331 if n != nullid:
332 332 self.tagscache[k] = n
333 333 self.tagscache['tip'] = self.changelog.tip()
334 334
335 335 return self.tagscache
336 336
337 337 def _hgtagsnodes(self):
338 338 heads = self.heads()
339 339 heads.reverse()
340 340 last = {}
341 341 ret = []
342 342 for node in heads:
343 343 c = self.changectx(node)
344 344 rev = c.rev()
345 345 try:
346 346 fnode = c.filenode('.hgtags')
347 347 except repo.LookupError:
348 348 continue
349 349 ret.append((rev, node, fnode))
350 350 if fnode in last:
351 351 ret[last[fnode]] = None
352 352 last[fnode] = len(ret) - 1
353 353 return [item for item in ret if item]
354 354
355 355 def tagslist(self):
356 356 '''return a list of tags ordered by revision'''
357 357 l = []
358 358 for t, n in self.tags().items():
359 359 try:
360 360 r = self.changelog.rev(n)
361 361 except:
362 362 r = -2 # sort to the beginning of the list if unknown
363 363 l.append((r, t, n))
364 364 l.sort()
365 365 return [(t, n) for r, t, n in l]
366 366
367 367 def nodetags(self, node):
368 368 '''return the tags associated with a node'''
369 369 if not self.nodetagscache:
370 370 self.nodetagscache = {}
371 371 for t, n in self.tags().items():
372 372 self.nodetagscache.setdefault(n, []).append(t)
373 373 return self.nodetagscache.get(node, [])
374 374
375 375 def _branchtags(self):
376 376 partial, last, lrev = self._readbranchcache()
377 377
378 378 tiprev = self.changelog.count() - 1
379 379 if lrev != tiprev:
380 380 self._updatebranchcache(partial, lrev+1, tiprev+1)
381 381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
382 382
383 383 return partial
384 384
385 385 def branchtags(self):
386 386 if self.branchcache is not None:
387 387 return self.branchcache
388 388
389 389 self.branchcache = {} # avoid recursion in changectx
390 390 partial = self._branchtags()
391 391
392 392 # the branch cache is stored on disk as UTF-8, but in the local
393 393 # charset internally
394 394 for k, v in partial.items():
395 395 self.branchcache[util.tolocal(k)] = v
396 396 return self.branchcache
397 397
398 398 def _readbranchcache(self):
399 399 partial = {}
400 400 try:
401 401 f = self.opener("branch.cache")
402 402 lines = f.read().split('\n')
403 403 f.close()
404 except (IOError, OSError):
405 return {}, nullid, nullrev
406
407 try:
404 408 last, lrev = lines.pop(0).split(" ", 1)
405 409 last, lrev = bin(last), int(lrev)
406 410 if not (lrev < self.changelog.count() and
407 411 self.changelog.node(lrev) == last): # sanity check
408 412 # invalidate the cache
409 413 raise ValueError('Invalid branch cache: unknown tip')
410 414 for l in lines:
411 415 if not l: continue
412 416 node, label = l.split(" ", 1)
413 417 partial[label.strip()] = bin(node)
414 418 except (KeyboardInterrupt, util.SignalInterrupt):
415 419 raise
416 420 except Exception, inst:
417 421 if self.ui.debugflag:
418 422 self.ui.warn(str(inst), '\n')
419 423 partial, last, lrev = {}, nullid, nullrev
420 424 return partial, last, lrev
421 425
422 426 def _writebranchcache(self, branches, tip, tiprev):
423 427 try:
424 428 f = self.opener("branch.cache", "w", atomictemp=True)
425 429 f.write("%s %s\n" % (hex(tip), tiprev))
426 430 for label, node in branches.iteritems():
427 431 f.write("%s %s\n" % (hex(node), label))
428 432 f.rename()
429 except IOError:
433 except (IOError, OSError):
430 434 pass
431 435
432 436 def _updatebranchcache(self, partial, start, end):
433 437 for r in xrange(start, end):
434 438 c = self.changectx(r)
435 439 b = c.branch()
436 440 partial[b] = c.node()
437 441
438 442 def lookup(self, key):
439 443 if key == '.':
440 444 key = self.dirstate.parents()[0]
441 445 if key == nullid:
442 446 raise repo.RepoError(_("no revision checked out"))
443 447 elif key == 'null':
444 448 return nullid
445 449 n = self.changelog._match(key)
446 450 if n:
447 451 return n
448 452 if key in self.tags():
449 453 return self.tags()[key]
450 454 if key in self.branchtags():
451 455 return self.branchtags()[key]
452 456 n = self.changelog._partialmatch(key)
453 457 if n:
454 458 return n
455 459 raise repo.RepoError(_("unknown revision '%s'") % key)
456 460
457 461 def dev(self):
458 462 return os.lstat(self.path).st_dev
459 463
460 464 def local(self):
461 465 return True
462 466
463 467 def join(self, f):
464 468 return os.path.join(self.path, f)
465 469
466 470 def sjoin(self, f):
467 471 f = self.encodefn(f)
468 472 return os.path.join(self.spath, f)
469 473
470 474 def wjoin(self, f):
471 475 return os.path.join(self.root, f)
472 476
473 477 def file(self, f):
474 478 if f[0] == '/':
475 479 f = f[1:]
476 480 return filelog.filelog(self.sopener, f, self.revlogversion)
477 481
478 482 def changectx(self, changeid=None):
479 483 return context.changectx(self, changeid)
480 484
481 485 def workingctx(self):
482 486 return context.workingctx(self)
483 487
484 488 def parents(self, changeid=None):
485 489 '''
486 490 get list of changectxs for parents of changeid or working directory
487 491 '''
488 492 if changeid is None:
489 493 pl = self.dirstate.parents()
490 494 else:
491 495 n = self.changelog.lookup(changeid)
492 496 pl = self.changelog.parents(n)
493 497 if pl[1] == nullid:
494 498 return [self.changectx(pl[0])]
495 499 return [self.changectx(pl[0]), self.changectx(pl[1])]
496 500
497 501 def filectx(self, path, changeid=None, fileid=None):
498 502 """changeid can be a changeset revision, node, or tag.
499 503 fileid can be a file revision or node."""
500 504 return context.filectx(self, path, changeid, fileid)
501 505
502 506 def getcwd(self):
503 507 return self.dirstate.getcwd()
504 508
505 509 def wfile(self, f, mode='r'):
506 510 return self.wopener(f, mode)
507 511
508 512 def wread(self, filename):
509 513 if self.encodepats == None:
510 514 l = []
511 515 for pat, cmd in self.ui.configitems("encode"):
512 516 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 517 l.append((mf, cmd))
514 518 self.encodepats = l
515 519
516 520 data = self.wopener(filename, 'r').read()
517 521
518 522 for mf, cmd in self.encodepats:
519 523 if mf(filename):
520 524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
521 525 data = util.filter(data, cmd)
522 526 break
523 527
524 528 return data
525 529
526 530 def wwrite(self, filename, data, fd=None):
527 531 if self.decodepats == None:
528 532 l = []
529 533 for pat, cmd in self.ui.configitems("decode"):
530 534 mf = util.matcher(self.root, "", [pat], [], [])[1]
531 535 l.append((mf, cmd))
532 536 self.decodepats = l
533 537
534 538 for mf, cmd in self.decodepats:
535 539 if mf(filename):
536 540 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
537 541 data = util.filter(data, cmd)
538 542 break
539 543
540 544 if fd:
541 545 return fd.write(data)
542 546 return self.wopener(filename, 'w').write(data)
543 547
544 548 def transaction(self):
545 549 tr = self.transhandle
546 550 if tr != None and tr.running():
547 551 return tr.nest()
548 552
549 553 # save dirstate for rollback
550 554 try:
551 555 ds = self.opener("dirstate").read()
552 556 except IOError:
553 557 ds = ""
554 558 self.opener("journal.dirstate", "w").write(ds)
555 559
556 560 renames = [(self.sjoin("journal"), self.sjoin("undo")),
557 561 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
558 562 tr = transaction.transaction(self.ui.warn, self.sopener,
559 563 self.sjoin("journal"),
560 564 aftertrans(renames))
561 565 self.transhandle = tr
562 566 return tr
563 567
564 568 def recover(self):
565 569 l = self.lock()
566 570 if os.path.exists(self.sjoin("journal")):
567 571 self.ui.status(_("rolling back interrupted transaction\n"))
568 572 transaction.rollback(self.sopener, self.sjoin("journal"))
569 573 self.reload()
570 574 return True
571 575 else:
572 576 self.ui.warn(_("no interrupted transaction available\n"))
573 577 return False
574 578
575 579 def rollback(self, wlock=None):
576 580 if not wlock:
577 581 wlock = self.wlock()
578 582 l = self.lock()
579 583 if os.path.exists(self.sjoin("undo")):
580 584 self.ui.status(_("rolling back last transaction\n"))
581 585 transaction.rollback(self.sopener, self.sjoin("undo"))
582 586 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
583 587 self.reload()
584 588 self.wreload()
585 589 else:
586 590 self.ui.warn(_("no rollback information available\n"))
587 591
588 592 def wreload(self):
589 593 self.dirstate.read()
590 594
591 595 def reload(self):
592 596 self.changelog.load()
593 597 self.manifest.load()
594 598 self.tagscache = None
595 599 self.nodetagscache = None
596 600
597 601 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
598 602 desc=None):
599 603 try:
600 604 l = lock.lock(lockname, 0, releasefn, desc=desc)
601 605 except lock.LockHeld, inst:
602 606 if not wait:
603 607 raise
604 608 self.ui.warn(_("waiting for lock on %s held by %r\n") %
605 609 (desc, inst.locker))
606 610 # default to 600 seconds timeout
607 611 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
608 612 releasefn, desc=desc)
609 613 if acquirefn:
610 614 acquirefn()
611 615 return l
612 616
613 617 def lock(self, wait=1):
614 618 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
615 619 desc=_('repository %s') % self.origroot)
616 620
617 621 def wlock(self, wait=1):
618 622 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
619 623 self.wreload,
620 624 desc=_('working directory of %s') % self.origroot)
621 625
622 626 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
623 627 """
624 628 commit an individual file as part of a larger transaction
625 629 """
626 630
627 631 t = self.wread(fn)
628 632 fl = self.file(fn)
629 633 fp1 = manifest1.get(fn, nullid)
630 634 fp2 = manifest2.get(fn, nullid)
631 635
632 636 meta = {}
633 637 cp = self.dirstate.copied(fn)
634 638 if cp:
635 639 # Mark the new revision of this file as a copy of another
636 640 # file. This copy data will effectively act as a parent
637 641 # of this new revision. If this is a merge, the first
638 642 # parent will be the nullid (meaning "look up the copy data")
639 643 # and the second one will be the other parent. For example:
640 644 #
641 645 # 0 --- 1 --- 3 rev1 changes file foo
642 646 # \ / rev2 renames foo to bar and changes it
643 647 # \- 2 -/ rev3 should have bar with all changes and
644 648 # should record that bar descends from
645 649 # bar in rev2 and foo in rev1
646 650 #
647 651 # this allows this merge to succeed:
648 652 #
649 653 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
650 654 # \ / merging rev3 and rev4 should use bar@rev2
651 655 # \- 2 --- 4 as the merge base
652 656 #
653 657 meta["copy"] = cp
654 658 if not manifest2: # not a branch merge
655 659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
656 660 fp2 = nullid
657 661 elif fp2 != nullid: # copied on remote side
658 662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
659 663 elif fp1 != nullid: # copied on local side, reversed
660 664 meta["copyrev"] = hex(manifest2.get(cp))
661 665 fp2 = fp1
662 666 else: # directory rename
663 667 meta["copyrev"] = hex(manifest1.get(cp, nullid))
664 668 self.ui.debug(_(" %s: copy %s:%s\n") %
665 669 (fn, cp, meta["copyrev"]))
666 670 fp1 = nullid
667 671 elif fp2 != nullid:
668 672 # is one parent an ancestor of the other?
669 673 fpa = fl.ancestor(fp1, fp2)
670 674 if fpa == fp1:
671 675 fp1, fp2 = fp2, nullid
672 676 elif fpa == fp2:
673 677 fp2 = nullid
674 678
675 679 # is the file unmodified from the parent? report existing entry
676 680 if fp2 == nullid and not fl.cmp(fp1, t):
677 681 return fp1
678 682
679 683 changelist.append(fn)
680 684 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
681 685
682 686 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
683 687 if p1 is None:
684 688 p1, p2 = self.dirstate.parents()
685 689 return self.commit(files=files, text=text, user=user, date=date,
686 690 p1=p1, p2=p2, wlock=wlock)
687 691
688 692 def commit(self, files=None, text="", user=None, date=None,
689 693 match=util.always, force=False, lock=None, wlock=None,
690 694 force_editor=False, p1=None, p2=None, extra={}):
691 695
692 696 commit = []
693 697 remove = []
694 698 changed = []
695 699 use_dirstate = (p1 is None) # not rawcommit
696 700 extra = extra.copy()
697 701
698 702 if use_dirstate:
699 703 if files:
700 704 for f in files:
701 705 s = self.dirstate.state(f)
702 706 if s in 'nmai':
703 707 commit.append(f)
704 708 elif s == 'r':
705 709 remove.append(f)
706 710 else:
707 711 self.ui.warn(_("%s not tracked!\n") % f)
708 712 else:
709 713 changes = self.status(match=match)[:5]
710 714 modified, added, removed, deleted, unknown = changes
711 715 commit = modified + added
712 716 remove = removed
713 717 else:
714 718 commit = files
715 719
716 720 if use_dirstate:
717 721 p1, p2 = self.dirstate.parents()
718 722 update_dirstate = True
719 723 else:
720 724 p1, p2 = p1, p2 or nullid
721 725 update_dirstate = (self.dirstate.parents()[0] == p1)
722 726
723 727 c1 = self.changelog.read(p1)
724 728 c2 = self.changelog.read(p2)
725 729 m1 = self.manifest.read(c1[0]).copy()
726 730 m2 = self.manifest.read(c2[0])
727 731
728 732 if use_dirstate:
729 733 branchname = self.workingctx().branch()
730 734 try:
731 735 branchname = branchname.decode('UTF-8').encode('UTF-8')
732 736 except UnicodeDecodeError:
733 737 raise util.Abort(_('branch name not in UTF-8!'))
734 738 else:
735 739 branchname = ""
736 740
737 741 if use_dirstate:
738 742 oldname = c1[5].get("branch") # stored in UTF-8
739 743 if not commit and not remove and not force and p2 == nullid and \
740 744 branchname == oldname:
741 745 self.ui.status(_("nothing changed\n"))
742 746 return None
743 747
744 748 xp1 = hex(p1)
745 749 if p2 == nullid: xp2 = ''
746 750 else: xp2 = hex(p2)
747 751
748 752 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
749 753
750 754 if not wlock:
751 755 wlock = self.wlock()
752 756 if not lock:
753 757 lock = self.lock()
754 758 tr = self.transaction()
755 759
756 760 # check in files
757 761 new = {}
758 762 linkrev = self.changelog.count()
759 763 commit.sort()
760 764 for f in commit:
761 765 self.ui.note(f + "\n")
762 766 try:
763 767 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
764 768 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
765 769 except IOError:
766 770 if use_dirstate:
767 771 self.ui.warn(_("trouble committing %s!\n") % f)
768 772 raise
769 773 else:
770 774 remove.append(f)
771 775
772 776 # update manifest
773 777 m1.update(new)
774 778 remove.sort()
775 779
776 780 for f in remove:
777 781 if f in m1:
778 782 del m1[f]
779 783 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
780 784
781 785 # add changeset
782 786 new = new.keys()
783 787 new.sort()
784 788
785 789 user = user or self.ui.username()
786 790 if not text or force_editor:
787 791 edittext = []
788 792 if text:
789 793 edittext.append(text)
790 794 edittext.append("")
791 795 edittext.append("HG: user: %s" % user)
792 796 if p2 != nullid:
793 797 edittext.append("HG: branch merge")
794 798 edittext.extend(["HG: changed %s" % f for f in changed])
795 799 edittext.extend(["HG: removed %s" % f for f in remove])
796 800 if not changed and not remove:
797 801 edittext.append("HG: no files changed")
798 802 edittext.append("")
799 803 # run editor in the repository root
800 804 olddir = os.getcwd()
801 805 os.chdir(self.root)
802 806 text = self.ui.edit("\n".join(edittext), user)
803 807 os.chdir(olddir)
804 808
805 809 lines = [line.rstrip() for line in text.rstrip().splitlines()]
806 810 while lines and not lines[0]:
807 811 del lines[0]
808 812 if not lines:
809 813 return None
810 814 text = '\n'.join(lines)
811 815 if branchname:
812 816 extra["branch"] = branchname
813 817 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
814 818 user, date, extra)
815 819 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
816 820 parent2=xp2)
817 821 tr.close()
818 822
819 823 if use_dirstate or update_dirstate:
820 824 self.dirstate.setparents(n)
821 825 if use_dirstate:
822 826 self.dirstate.update(new, "n")
823 827 self.dirstate.forget(remove)
824 828
825 829 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
826 830 return n
827 831
828 832 def walk(self, node=None, files=[], match=util.always, badmatch=None):
829 833 '''
830 834 walk recursively through the directory tree or a given
831 835 changeset, finding all files matched by the match
832 836 function
833 837
834 838 results are yielded in a tuple (src, filename), where src
835 839 is one of:
836 840 'f' the file was found in the directory tree
837 841 'm' the file was only in the dirstate and not in the tree
838 842 'b' file was not found and matched badmatch
839 843 '''
840 844
841 845 if node:
842 846 fdict = dict.fromkeys(files)
843 847 for fn in self.manifest.read(self.changelog.read(node)[0]):
844 848 for ffn in fdict:
845 849 # match if the file is the exact name or a directory
846 850 if ffn == fn or fn.startswith("%s/" % ffn):
847 851 del fdict[ffn]
848 852 break
849 853 if match(fn):
850 854 yield 'm', fn
851 855 for fn in fdict:
852 856 if badmatch and badmatch(fn):
853 857 if match(fn):
854 858 yield 'b', fn
855 859 else:
856 860 self.ui.warn(_('%s: No such file in rev %s\n') % (
857 861 util.pathto(self.root, self.getcwd(), fn), short(node)))
858 862 else:
859 863 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
860 864 yield src, fn
861 865
862 866 def status(self, node1=None, node2=None, files=[], match=util.always,
863 867 wlock=None, list_ignored=False, list_clean=False):
864 868 """return status of files between two nodes or node and working directory
865 869
866 870 If node1 is None, use the first dirstate parent instead.
867 871 If node2 is None, compare node1 with working directory.
868 872 """
869 873
870 874 def fcmp(fn, mf):
871 875 t1 = self.wread(fn)
872 876 return self.file(fn).cmp(mf.get(fn, nullid), t1)
873 877
874 878 def mfmatches(node):
875 879 change = self.changelog.read(node)
876 880 mf = self.manifest.read(change[0]).copy()
877 881 for fn in mf.keys():
878 882 if not match(fn):
879 883 del mf[fn]
880 884 return mf
881 885
882 886 modified, added, removed, deleted, unknown = [], [], [], [], []
883 887 ignored, clean = [], []
884 888
885 889 compareworking = False
886 890 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
887 891 compareworking = True
888 892
889 893 if not compareworking:
890 894 # read the manifest from node1 before the manifest from node2,
891 895 # so that we'll hit the manifest cache if we're going through
892 896 # all the revisions in parent->child order.
893 897 mf1 = mfmatches(node1)
894 898
895 899 # are we comparing the working directory?
896 900 if not node2:
897 901 if not wlock:
898 902 try:
899 903 wlock = self.wlock(wait=0)
900 904 except lock.LockException:
901 905 wlock = None
902 906 (lookup, modified, added, removed, deleted, unknown,
903 907 ignored, clean) = self.dirstate.status(files, match,
904 908 list_ignored, list_clean)
905 909
906 910 # are we comparing working dir against its parent?
907 911 if compareworking:
908 912 if lookup:
909 913 # do a full compare of any files that might have changed
910 914 mf2 = mfmatches(self.dirstate.parents()[0])
911 915 for f in lookup:
912 916 if fcmp(f, mf2):
913 917 modified.append(f)
914 918 else:
915 919 clean.append(f)
916 920 if wlock is not None:
917 921 self.dirstate.update([f], "n")
918 922 else:
919 923 # we are comparing working dir against non-parent
920 924 # generate a pseudo-manifest for the working dir
921 925 # XXX: create it in dirstate.py ?
922 926 mf2 = mfmatches(self.dirstate.parents()[0])
923 927 for f in lookup + modified + added:
924 928 mf2[f] = ""
925 929 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
926 930 for f in removed:
927 931 if f in mf2:
928 932 del mf2[f]
929 933 else:
930 934 # we are comparing two revisions
931 935 mf2 = mfmatches(node2)
932 936
933 937 if not compareworking:
934 938 # flush lists from dirstate before comparing manifests
935 939 modified, added, clean = [], [], []
936 940
937 941 # make sure to sort the files so we talk to the disk in a
938 942 # reasonable order
939 943 mf2keys = mf2.keys()
940 944 mf2keys.sort()
941 945 for fn in mf2keys:
942 946 if mf1.has_key(fn):
943 947 if mf1.flags(fn) != mf2.flags(fn) or \
944 948 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
945 949 modified.append(fn)
946 950 elif list_clean:
947 951 clean.append(fn)
948 952 del mf1[fn]
949 953 else:
950 954 added.append(fn)
951 955
952 956 removed = mf1.keys()
953 957
954 958 # sort and return results:
955 959 for l in modified, added, removed, deleted, unknown, ignored, clean:
956 960 l.sort()
957 961 return (modified, added, removed, deleted, unknown, ignored, clean)
958 962
959 963 def add(self, list, wlock=None):
960 964 if not wlock:
961 965 wlock = self.wlock()
962 966 for f in list:
963 967 p = self.wjoin(f)
964 968 if not os.path.exists(p):
965 969 self.ui.warn(_("%s does not exist!\n") % f)
966 970 elif not os.path.isfile(p):
967 971 self.ui.warn(_("%s not added: only files supported currently\n")
968 972 % f)
969 973 elif self.dirstate.state(f) in 'an':
970 974 self.ui.warn(_("%s already tracked!\n") % f)
971 975 else:
972 976 self.dirstate.update([f], "a")
973 977
974 978 def forget(self, list, wlock=None):
975 979 if not wlock:
976 980 wlock = self.wlock()
977 981 for f in list:
978 982 if self.dirstate.state(f) not in 'ai':
979 983 self.ui.warn(_("%s not added!\n") % f)
980 984 else:
981 985 self.dirstate.forget([f])
982 986
983 987 def remove(self, list, unlink=False, wlock=None):
984 988 if unlink:
985 989 for f in list:
986 990 try:
987 991 util.unlink(self.wjoin(f))
988 992 except OSError, inst:
989 993 if inst.errno != errno.ENOENT:
990 994 raise
991 995 if not wlock:
992 996 wlock = self.wlock()
993 997 for f in list:
994 998 p = self.wjoin(f)
995 999 if os.path.exists(p):
996 1000 self.ui.warn(_("%s still exists!\n") % f)
997 1001 elif self.dirstate.state(f) == 'a':
998 1002 self.dirstate.forget([f])
999 1003 elif f not in self.dirstate:
1000 1004 self.ui.warn(_("%s not tracked!\n") % f)
1001 1005 else:
1002 1006 self.dirstate.update([f], "r")
1003 1007
1004 1008 def undelete(self, list, wlock=None):
1005 1009 p = self.dirstate.parents()[0]
1006 1010 mn = self.changelog.read(p)[0]
1007 1011 m = self.manifest.read(mn)
1008 1012 if not wlock:
1009 1013 wlock = self.wlock()
1010 1014 for f in list:
1011 1015 if self.dirstate.state(f) not in "r":
1012 1016 self.ui.warn("%s not removed!\n" % f)
1013 1017 else:
1014 1018 t = self.file(f).read(m[f])
1015 1019 self.wwrite(f, t)
1016 1020 util.set_exec(self.wjoin(f), m.execf(f))
1017 1021 self.dirstate.update([f], "n")
1018 1022
1019 1023 def copy(self, source, dest, wlock=None):
1020 1024 p = self.wjoin(dest)
1021 1025 if not os.path.exists(p):
1022 1026 self.ui.warn(_("%s does not exist!\n") % dest)
1023 1027 elif not os.path.isfile(p):
1024 1028 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1025 1029 else:
1026 1030 if not wlock:
1027 1031 wlock = self.wlock()
1028 1032 if self.dirstate.state(dest) == '?':
1029 1033 self.dirstate.update([dest], "a")
1030 1034 self.dirstate.copy(source, dest)
1031 1035
1032 1036 def heads(self, start=None):
1033 1037 heads = self.changelog.heads(start)
1034 1038 # sort the output in rev descending order
1035 1039 heads = [(-self.changelog.rev(h), h) for h in heads]
1036 1040 heads.sort()
1037 1041 return [n for (r, n) in heads]
1038 1042
1039 1043 # branchlookup returns a dict giving a list of branches for
1040 1044 # each head. A branch is defined as the tag of a node or
1041 1045 # the branch of the node's parents. If a node has multiple
1042 1046 # branch tags, tags are eliminated if they are visible from other
1043 1047 # branch tags.
1044 1048 #
1045 1049 # So, for this graph: a->b->c->d->e
1046 1050 # \ /
1047 1051 # aa -----/
1048 1052 # a has tag 2.6.12
1049 1053 # d has tag 2.6.13
1050 1054 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1051 1055 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1052 1056 # from the list.
1053 1057 #
1054 1058 # It is possible that more than one head will have the same branch tag.
1055 1059 # callers need to check the result for multiple heads under the same
1056 1060 # branch tag if that is a problem for them (ie checkout of a specific
1057 1061 # branch).
1058 1062 #
1059 1063 # passing in a specific branch will limit the depth of the search
1060 1064 # through the parents. It won't limit the branches returned in the
1061 1065 # result though.
1062 1066 def branchlookup(self, heads=None, branch=None):
1063 1067 if not heads:
1064 1068 heads = self.heads()
1065 1069 headt = [ h for h in heads ]
1066 1070 chlog = self.changelog
1067 1071 branches = {}
1068 1072 merges = []
1069 1073 seenmerge = {}
1070 1074
1071 1075 # traverse the tree once for each head, recording in the branches
1072 1076 # dict which tags are visible from this head. The branches
1073 1077 # dict also records which tags are visible from each tag
1074 1078 # while we traverse.
1075 1079 while headt or merges:
1076 1080 if merges:
1077 1081 n, found = merges.pop()
1078 1082 visit = [n]
1079 1083 else:
1080 1084 h = headt.pop()
1081 1085 visit = [h]
1082 1086 found = [h]
1083 1087 seen = {}
1084 1088 while visit:
1085 1089 n = visit.pop()
1086 1090 if n in seen:
1087 1091 continue
1088 1092 pp = chlog.parents(n)
1089 1093 tags = self.nodetags(n)
1090 1094 if tags:
1091 1095 for x in tags:
1092 1096 if x == 'tip':
1093 1097 continue
1094 1098 for f in found:
1095 1099 branches.setdefault(f, {})[n] = 1
1096 1100 branches.setdefault(n, {})[n] = 1
1097 1101 break
1098 1102 if n not in found:
1099 1103 found.append(n)
1100 1104 if branch in tags:
1101 1105 continue
1102 1106 seen[n] = 1
1103 1107 if pp[1] != nullid and n not in seenmerge:
1104 1108 merges.append((pp[1], [x for x in found]))
1105 1109 seenmerge[n] = 1
1106 1110 if pp[0] != nullid:
1107 1111 visit.append(pp[0])
1108 1112 # traverse the branches dict, eliminating branch tags from each
1109 1113 # head that are visible from another branch tag for that head.
1110 1114 out = {}
1111 1115 viscache = {}
1112 1116 for h in heads:
1113 1117 def visible(node):
1114 1118 if node in viscache:
1115 1119 return viscache[node]
1116 1120 ret = {}
1117 1121 visit = [node]
1118 1122 while visit:
1119 1123 x = visit.pop()
1120 1124 if x in viscache:
1121 1125 ret.update(viscache[x])
1122 1126 elif x not in ret:
1123 1127 ret[x] = 1
1124 1128 if x in branches:
1125 1129 visit[len(visit):] = branches[x].keys()
1126 1130 viscache[node] = ret
1127 1131 return ret
1128 1132 if h not in branches:
1129 1133 continue
1130 1134 # O(n^2), but somewhat limited. This only searches the
1131 1135 # tags visible from a specific head, not all the tags in the
1132 1136 # whole repo.
1133 1137 for b in branches[h]:
1134 1138 vis = False
1135 1139 for bb in branches[h].keys():
1136 1140 if b != bb:
1137 1141 if b in visible(bb):
1138 1142 vis = True
1139 1143 break
1140 1144 if not vis:
1141 1145 l = out.setdefault(h, [])
1142 1146 l[len(l):] = self.nodetags(b)
1143 1147 return out
1144 1148
1145 1149 def branches(self, nodes):
1146 1150 if not nodes:
1147 1151 nodes = [self.changelog.tip()]
1148 1152 b = []
1149 1153 for n in nodes:
1150 1154 t = n
1151 1155 while 1:
1152 1156 p = self.changelog.parents(n)
1153 1157 if p[1] != nullid or p[0] == nullid:
1154 1158 b.append((t, n, p[0], p[1]))
1155 1159 break
1156 1160 n = p[0]
1157 1161 return b
1158 1162
1159 1163 def between(self, pairs):
1160 1164 r = []
1161 1165
1162 1166 for top, bottom in pairs:
1163 1167 n, l, i = top, [], 0
1164 1168 f = 1
1165 1169
1166 1170 while n != bottom:
1167 1171 p = self.changelog.parents(n)[0]
1168 1172 if i == f:
1169 1173 l.append(n)
1170 1174 f = f * 2
1171 1175 n = p
1172 1176 i += 1
1173 1177
1174 1178 r.append(l)
1175 1179
1176 1180 return r
1177 1181
1178 1182 def findincoming(self, remote, base=None, heads=None, force=False):
1179 1183 """Return list of roots of the subsets of missing nodes from remote
1180 1184
1181 1185 If base dict is specified, assume that these nodes and their parents
1182 1186 exist on the remote side and that no child of a node of base exists
1183 1187 in both remote and self.
1184 1188 Furthermore base will be updated to include the nodes that exists
1185 1189 in self and remote but no children exists in self and remote.
1186 1190 If a list of heads is specified, return only nodes which are heads
1187 1191 or ancestors of these heads.
1188 1192
1189 1193 All the ancestors of base are in self and in remote.
1190 1194 All the descendants of the list returned are missing in self.
1191 1195 (and so we know that the rest of the nodes are missing in remote, see
1192 1196 outgoing)
1193 1197 """
1194 1198 m = self.changelog.nodemap
1195 1199 search = []
1196 1200 fetch = {}
1197 1201 seen = {}
1198 1202 seenbranch = {}
1199 1203 if base == None:
1200 1204 base = {}
1201 1205
1202 1206 if not heads:
1203 1207 heads = remote.heads()
1204 1208
1205 1209 if self.changelog.tip() == nullid:
1206 1210 base[nullid] = 1
1207 1211 if heads != [nullid]:
1208 1212 return [nullid]
1209 1213 return []
1210 1214
1211 1215 # assume we're closer to the tip than the root
1212 1216 # and start by examining the heads
1213 1217 self.ui.status(_("searching for changes\n"))
1214 1218
1215 1219 unknown = []
1216 1220 for h in heads:
1217 1221 if h not in m:
1218 1222 unknown.append(h)
1219 1223 else:
1220 1224 base[h] = 1
1221 1225
1222 1226 if not unknown:
1223 1227 return []
1224 1228
1225 1229 req = dict.fromkeys(unknown)
1226 1230 reqcnt = 0
1227 1231
1228 1232 # search through remote branches
1229 1233 # a 'branch' here is a linear segment of history, with four parts:
1230 1234 # head, root, first parent, second parent
1231 1235 # (a branch always has two parents (or none) by definition)
1232 1236 unknown = remote.branches(unknown)
1233 1237 while unknown:
1234 1238 r = []
1235 1239 while unknown:
1236 1240 n = unknown.pop(0)
1237 1241 if n[0] in seen:
1238 1242 continue
1239 1243
1240 1244 self.ui.debug(_("examining %s:%s\n")
1241 1245 % (short(n[0]), short(n[1])))
1242 1246 if n[0] == nullid: # found the end of the branch
1243 1247 pass
1244 1248 elif n in seenbranch:
1245 1249 self.ui.debug(_("branch already found\n"))
1246 1250 continue
1247 1251 elif n[1] and n[1] in m: # do we know the base?
1248 1252 self.ui.debug(_("found incomplete branch %s:%s\n")
1249 1253 % (short(n[0]), short(n[1])))
1250 1254 search.append(n) # schedule branch range for scanning
1251 1255 seenbranch[n] = 1
1252 1256 else:
1253 1257 if n[1] not in seen and n[1] not in fetch:
1254 1258 if n[2] in m and n[3] in m:
1255 1259 self.ui.debug(_("found new changeset %s\n") %
1256 1260 short(n[1]))
1257 1261 fetch[n[1]] = 1 # earliest unknown
1258 1262 for p in n[2:4]:
1259 1263 if p in m:
1260 1264 base[p] = 1 # latest known
1261 1265
1262 1266 for p in n[2:4]:
1263 1267 if p not in req and p not in m:
1264 1268 r.append(p)
1265 1269 req[p] = 1
1266 1270 seen[n[0]] = 1
1267 1271
1268 1272 if r:
1269 1273 reqcnt += 1
1270 1274 self.ui.debug(_("request %d: %s\n") %
1271 1275 (reqcnt, " ".join(map(short, r))))
1272 1276 for p in xrange(0, len(r), 10):
1273 1277 for b in remote.branches(r[p:p+10]):
1274 1278 self.ui.debug(_("received %s:%s\n") %
1275 1279 (short(b[0]), short(b[1])))
1276 1280 unknown.append(b)
1277 1281
1278 1282 # do binary search on the branches we found
1279 1283 while search:
1280 1284 n = search.pop(0)
1281 1285 reqcnt += 1
1282 1286 l = remote.between([(n[0], n[1])])[0]
1283 1287 l.append(n[1])
1284 1288 p = n[0]
1285 1289 f = 1
1286 1290 for i in l:
1287 1291 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1288 1292 if i in m:
1289 1293 if f <= 2:
1290 1294 self.ui.debug(_("found new branch changeset %s\n") %
1291 1295 short(p))
1292 1296 fetch[p] = 1
1293 1297 base[i] = 1
1294 1298 else:
1295 1299 self.ui.debug(_("narrowed branch search to %s:%s\n")
1296 1300 % (short(p), short(i)))
1297 1301 search.append((p, i))
1298 1302 break
1299 1303 p, f = i, f * 2
1300 1304
1301 1305 # sanity check our fetch list
1302 1306 for f in fetch.keys():
1303 1307 if f in m:
1304 1308 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1305 1309
1306 1310 if base.keys() == [nullid]:
1307 1311 if force:
1308 1312 self.ui.warn(_("warning: repository is unrelated\n"))
1309 1313 else:
1310 1314 raise util.Abort(_("repository is unrelated"))
1311 1315
1312 1316 self.ui.debug(_("found new changesets starting at ") +
1313 1317 " ".join([short(f) for f in fetch]) + "\n")
1314 1318
1315 1319 self.ui.debug(_("%d total queries\n") % reqcnt)
1316 1320
1317 1321 return fetch.keys()
1318 1322
1319 1323 def findoutgoing(self, remote, base=None, heads=None, force=False):
1320 1324 """Return list of nodes that are roots of subsets not in remote
1321 1325
1322 1326 If base dict is specified, assume that these nodes and their parents
1323 1327 exist on the remote side.
1324 1328 If a list of heads is specified, return only nodes which are heads
1325 1329 or ancestors of these heads, and return a second element which
1326 1330 contains all remote heads which get new children.
1327 1331 """
1328 1332 if base == None:
1329 1333 base = {}
1330 1334 self.findincoming(remote, base, heads, force=force)
1331 1335
1332 1336 self.ui.debug(_("common changesets up to ")
1333 1337 + " ".join(map(short, base.keys())) + "\n")
1334 1338
1335 1339 remain = dict.fromkeys(self.changelog.nodemap)
1336 1340
1337 1341 # prune everything remote has from the tree
1338 1342 del remain[nullid]
1339 1343 remove = base.keys()
1340 1344 while remove:
1341 1345 n = remove.pop(0)
1342 1346 if n in remain:
1343 1347 del remain[n]
1344 1348 for p in self.changelog.parents(n):
1345 1349 remove.append(p)
1346 1350
1347 1351 # find every node whose parents have been pruned
1348 1352 subset = []
1349 1353 # find every remote head that will get new children
1350 1354 updated_heads = {}
1351 1355 for n in remain:
1352 1356 p1, p2 = self.changelog.parents(n)
1353 1357 if p1 not in remain and p2 not in remain:
1354 1358 subset.append(n)
1355 1359 if heads:
1356 1360 if p1 in heads:
1357 1361 updated_heads[p1] = True
1358 1362 if p2 in heads:
1359 1363 updated_heads[p2] = True
1360 1364
1361 1365 # this is the set of all roots we have to push
1362 1366 if heads:
1363 1367 return subset, updated_heads.keys()
1364 1368 else:
1365 1369 return subset
1366 1370
1367 1371 def pull(self, remote, heads=None, force=False, lock=None):
1368 1372 mylock = False
1369 1373 if not lock:
1370 1374 lock = self.lock()
1371 1375 mylock = True
1372 1376
1373 1377 try:
1374 1378 fetch = self.findincoming(remote, force=force)
1375 1379 if fetch == [nullid]:
1376 1380 self.ui.status(_("requesting all changes\n"))
1377 1381
1378 1382 if not fetch:
1379 1383 self.ui.status(_("no changes found\n"))
1380 1384 return 0
1381 1385
1382 1386 if heads is None:
1383 1387 cg = remote.changegroup(fetch, 'pull')
1384 1388 else:
1385 1389 if 'changegroupsubset' not in remote.capabilities:
1386 1390 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1387 1391 cg = remote.changegroupsubset(fetch, heads, 'pull')
1388 1392 return self.addchangegroup(cg, 'pull', remote.url())
1389 1393 finally:
1390 1394 if mylock:
1391 1395 lock.release()
1392 1396
1393 1397 def push(self, remote, force=False, revs=None):
1394 1398 # there are two ways to push to remote repo:
1395 1399 #
1396 1400 # addchangegroup assumes local user can lock remote
1397 1401 # repo (local filesystem, old ssh servers).
1398 1402 #
1399 1403 # unbundle assumes local user cannot lock remote repo (new ssh
1400 1404 # servers, http servers).
1401 1405
1402 1406 if remote.capable('unbundle'):
1403 1407 return self.push_unbundle(remote, force, revs)
1404 1408 return self.push_addchangegroup(remote, force, revs)
1405 1409
1406 1410 def prepush(self, remote, force, revs):
1407 1411 base = {}
1408 1412 remote_heads = remote.heads()
1409 1413 inc = self.findincoming(remote, base, remote_heads, force=force)
1410 1414
1411 1415 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1412 1416 if revs is not None:
1413 1417 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1414 1418 else:
1415 1419 bases, heads = update, self.changelog.heads()
1416 1420
1417 1421 if not bases:
1418 1422 self.ui.status(_("no changes found\n"))
1419 1423 return None, 1
1420 1424 elif not force:
1421 1425 # check if we're creating new remote heads
1422 1426 # to be a remote head after push, node must be either
1423 1427 # - unknown locally
1424 1428 # - a local outgoing head descended from update
1425 1429 # - a remote head that's known locally and not
1426 1430 # ancestral to an outgoing head
1427 1431
1428 1432 warn = 0
1429 1433
1430 1434 if remote_heads == [nullid]:
1431 1435 warn = 0
1432 1436 elif not revs and len(heads) > len(remote_heads):
1433 1437 warn = 1
1434 1438 else:
1435 1439 newheads = list(heads)
1436 1440 for r in remote_heads:
1437 1441 if r in self.changelog.nodemap:
1438 1442 desc = self.changelog.heads(r, heads)
1439 1443 l = [h for h in heads if h in desc]
1440 1444 if not l:
1441 1445 newheads.append(r)
1442 1446 else:
1443 1447 newheads.append(r)
1444 1448 if len(newheads) > len(remote_heads):
1445 1449 warn = 1
1446 1450
1447 1451 if warn:
1448 1452 self.ui.warn(_("abort: push creates new remote branches!\n"))
1449 1453 self.ui.status(_("(did you forget to merge?"
1450 1454 " use push -f to force)\n"))
1451 1455 return None, 1
1452 1456 elif inc:
1453 1457 self.ui.warn(_("note: unsynced remote changes!\n"))
1454 1458
1455 1459
1456 1460 if revs is None:
1457 1461 cg = self.changegroup(update, 'push')
1458 1462 else:
1459 1463 cg = self.changegroupsubset(update, revs, 'push')
1460 1464 return cg, remote_heads
1461 1465
1462 1466 def push_addchangegroup(self, remote, force, revs):
1463 1467 lock = remote.lock()
1464 1468
1465 1469 ret = self.prepush(remote, force, revs)
1466 1470 if ret[0] is not None:
1467 1471 cg, remote_heads = ret
1468 1472 return remote.addchangegroup(cg, 'push', self.url())
1469 1473 return ret[1]
1470 1474
1471 1475 def push_unbundle(self, remote, force, revs):
1472 1476 # local repo finds heads on server, finds out what revs it
1473 1477 # must push. once revs transferred, if server finds it has
1474 1478 # different heads (someone else won commit/push race), server
1475 1479 # aborts.
1476 1480
1477 1481 ret = self.prepush(remote, force, revs)
1478 1482 if ret[0] is not None:
1479 1483 cg, remote_heads = ret
1480 1484 if force: remote_heads = ['force']
1481 1485 return remote.unbundle(cg, remote_heads, 'push')
1482 1486 return ret[1]
1483 1487
1484 1488 def changegroupinfo(self, nodes):
1485 1489 self.ui.note(_("%d changesets found\n") % len(nodes))
1486 1490 if self.ui.debugflag:
1487 1491 self.ui.debug(_("List of changesets:\n"))
1488 1492 for node in nodes:
1489 1493 self.ui.debug("%s\n" % hex(node))
1490 1494
1491 1495 def changegroupsubset(self, bases, heads, source):
1492 1496 """This function generates a changegroup consisting of all the nodes
1493 1497 that are descendents of any of the bases, and ancestors of any of
1494 1498 the heads.
1495 1499
1496 1500 It is fairly complex as determining which filenodes and which
1497 1501 manifest nodes need to be included for the changeset to be complete
1498 1502 is non-trivial.
1499 1503
1500 1504 Another wrinkle is doing the reverse, figuring out which changeset in
1501 1505 the changegroup a particular filenode or manifestnode belongs to."""
1502 1506
1503 1507 self.hook('preoutgoing', throw=True, source=source)
1504 1508
1505 1509 # Set up some initial variables
1506 1510 # Make it easy to refer to self.changelog
1507 1511 cl = self.changelog
1508 1512 # msng is short for missing - compute the list of changesets in this
1509 1513 # changegroup.
1510 1514 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1511 1515 self.changegroupinfo(msng_cl_lst)
1512 1516 # Some bases may turn out to be superfluous, and some heads may be
1513 1517 # too. nodesbetween will return the minimal set of bases and heads
1514 1518 # necessary to re-create the changegroup.
1515 1519
1516 1520 # Known heads are the list of heads that it is assumed the recipient
1517 1521 # of this changegroup will know about.
1518 1522 knownheads = {}
1519 1523 # We assume that all parents of bases are known heads.
1520 1524 for n in bases:
1521 1525 for p in cl.parents(n):
1522 1526 if p != nullid:
1523 1527 knownheads[p] = 1
1524 1528 knownheads = knownheads.keys()
1525 1529 if knownheads:
1526 1530 # Now that we know what heads are known, we can compute which
1527 1531 # changesets are known. The recipient must know about all
1528 1532 # changesets required to reach the known heads from the null
1529 1533 # changeset.
1530 1534 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1531 1535 junk = None
1532 1536 # Transform the list into an ersatz set.
1533 1537 has_cl_set = dict.fromkeys(has_cl_set)
1534 1538 else:
1535 1539 # If there were no known heads, the recipient cannot be assumed to
1536 1540 # know about any changesets.
1537 1541 has_cl_set = {}
1538 1542
1539 1543 # Make it easy to refer to self.manifest
1540 1544 mnfst = self.manifest
1541 1545 # We don't know which manifests are missing yet
1542 1546 msng_mnfst_set = {}
1543 1547 # Nor do we know which filenodes are missing.
1544 1548 msng_filenode_set = {}
1545 1549
1546 1550 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1547 1551 junk = None
1548 1552
1549 1553 # A changeset always belongs to itself, so the changenode lookup
1550 1554 # function for a changenode is identity.
1551 1555 def identity(x):
1552 1556 return x
1553 1557
1554 1558 # A function generating function. Sets up an environment for the
1555 1559 # inner function.
1556 1560 def cmp_by_rev_func(revlog):
1557 1561 # Compare two nodes by their revision number in the environment's
1558 1562 # revision history. Since the revision number both represents the
1559 1563 # most efficient order to read the nodes in, and represents a
1560 1564 # topological sorting of the nodes, this function is often useful.
1561 1565 def cmp_by_rev(a, b):
1562 1566 return cmp(revlog.rev(a), revlog.rev(b))
1563 1567 return cmp_by_rev
1564 1568
1565 1569 # If we determine that a particular file or manifest node must be a
1566 1570 # node that the recipient of the changegroup will already have, we can
1567 1571 # also assume the recipient will have all the parents. This function
1568 1572 # prunes them from the set of missing nodes.
1569 1573 def prune_parents(revlog, hasset, msngset):
1570 1574 haslst = hasset.keys()
1571 1575 haslst.sort(cmp_by_rev_func(revlog))
1572 1576 for node in haslst:
1573 1577 parentlst = [p for p in revlog.parents(node) if p != nullid]
1574 1578 while parentlst:
1575 1579 n = parentlst.pop()
1576 1580 if n not in hasset:
1577 1581 hasset[n] = 1
1578 1582 p = [p for p in revlog.parents(n) if p != nullid]
1579 1583 parentlst.extend(p)
1580 1584 for n in hasset:
1581 1585 msngset.pop(n, None)
1582 1586
1583 1587 # This is a function generating function used to set up an environment
1584 1588 # for the inner function to execute in.
1585 1589 def manifest_and_file_collector(changedfileset):
1586 1590 # This is an information gathering function that gathers
1587 1591 # information from each changeset node that goes out as part of
1588 1592 # the changegroup. The information gathered is a list of which
1589 1593 # manifest nodes are potentially required (the recipient may
1590 1594 # already have them) and total list of all files which were
1591 1595 # changed in any changeset in the changegroup.
1592 1596 #
1593 1597 # We also remember the first changenode we saw any manifest
1594 1598 # referenced by so we can later determine which changenode 'owns'
1595 1599 # the manifest.
1596 1600 def collect_manifests_and_files(clnode):
1597 1601 c = cl.read(clnode)
1598 1602 for f in c[3]:
1599 1603 # This is to make sure we only have one instance of each
1600 1604 # filename string for each filename.
1601 1605 changedfileset.setdefault(f, f)
1602 1606 msng_mnfst_set.setdefault(c[0], clnode)
1603 1607 return collect_manifests_and_files
1604 1608
1605 1609 # Figure out which manifest nodes (of the ones we think might be part
1606 1610 # of the changegroup) the recipient must know about and remove them
1607 1611 # from the changegroup.
1608 1612 def prune_manifests():
1609 1613 has_mnfst_set = {}
1610 1614 for n in msng_mnfst_set:
1611 1615 # If a 'missing' manifest thinks it belongs to a changenode
1612 1616 # the recipient is assumed to have, obviously the recipient
1613 1617 # must have that manifest.
1614 1618 linknode = cl.node(mnfst.linkrev(n))
1615 1619 if linknode in has_cl_set:
1616 1620 has_mnfst_set[n] = 1
1617 1621 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1618 1622
1619 1623 # Use the information collected in collect_manifests_and_files to say
1620 1624 # which changenode any manifestnode belongs to.
1621 1625 def lookup_manifest_link(mnfstnode):
1622 1626 return msng_mnfst_set[mnfstnode]
1623 1627
1624 1628 # A function generating function that sets up the initial environment
1625 1629 # the inner function.
1626 1630 def filenode_collector(changedfiles):
1627 1631 next_rev = [0]
1628 1632 # This gathers information from each manifestnode included in the
1629 1633 # changegroup about which filenodes the manifest node references
1630 1634 # so we can include those in the changegroup too.
1631 1635 #
1632 1636 # It also remembers which changenode each filenode belongs to. It
1633 1637 # does this by assuming the a filenode belongs to the changenode
1634 1638 # the first manifest that references it belongs to.
1635 1639 def collect_msng_filenodes(mnfstnode):
1636 1640 r = mnfst.rev(mnfstnode)
1637 1641 if r == next_rev[0]:
1638 1642 # If the last rev we looked at was the one just previous,
1639 1643 # we only need to see a diff.
1640 1644 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1641 1645 # For each line in the delta
1642 1646 for dline in delta.splitlines():
1643 1647 # get the filename and filenode for that line
1644 1648 f, fnode = dline.split('\0')
1645 1649 fnode = bin(fnode[:40])
1646 1650 f = changedfiles.get(f, None)
1647 1651 # And if the file is in the list of files we care
1648 1652 # about.
1649 1653 if f is not None:
1650 1654 # Get the changenode this manifest belongs to
1651 1655 clnode = msng_mnfst_set[mnfstnode]
1652 1656 # Create the set of filenodes for the file if
1653 1657 # there isn't one already.
1654 1658 ndset = msng_filenode_set.setdefault(f, {})
1655 1659 # And set the filenode's changelog node to the
1656 1660 # manifest's if it hasn't been set already.
1657 1661 ndset.setdefault(fnode, clnode)
1658 1662 else:
1659 1663 # Otherwise we need a full manifest.
1660 1664 m = mnfst.read(mnfstnode)
1661 1665 # For every file in we care about.
1662 1666 for f in changedfiles:
1663 1667 fnode = m.get(f, None)
1664 1668 # If it's in the manifest
1665 1669 if fnode is not None:
1666 1670 # See comments above.
1667 1671 clnode = msng_mnfst_set[mnfstnode]
1668 1672 ndset = msng_filenode_set.setdefault(f, {})
1669 1673 ndset.setdefault(fnode, clnode)
1670 1674 # Remember the revision we hope to see next.
1671 1675 next_rev[0] = r + 1
1672 1676 return collect_msng_filenodes
1673 1677
1674 1678 # We have a list of filenodes we think we need for a file, lets remove
1675 1679 # all those we now the recipient must have.
1676 1680 def prune_filenodes(f, filerevlog):
1677 1681 msngset = msng_filenode_set[f]
1678 1682 hasset = {}
1679 1683 # If a 'missing' filenode thinks it belongs to a changenode we
1680 1684 # assume the recipient must have, then the recipient must have
1681 1685 # that filenode.
1682 1686 for n in msngset:
1683 1687 clnode = cl.node(filerevlog.linkrev(n))
1684 1688 if clnode in has_cl_set:
1685 1689 hasset[n] = 1
1686 1690 prune_parents(filerevlog, hasset, msngset)
1687 1691
1688 1692 # A function generator function that sets up the a context for the
1689 1693 # inner function.
1690 1694 def lookup_filenode_link_func(fname):
1691 1695 msngset = msng_filenode_set[fname]
1692 1696 # Lookup the changenode the filenode belongs to.
1693 1697 def lookup_filenode_link(fnode):
1694 1698 return msngset[fnode]
1695 1699 return lookup_filenode_link
1696 1700
1697 1701 # Now that we have all theses utility functions to help out and
1698 1702 # logically divide up the task, generate the group.
1699 1703 def gengroup():
1700 1704 # The set of changed files starts empty.
1701 1705 changedfiles = {}
1702 1706 # Create a changenode group generator that will call our functions
1703 1707 # back to lookup the owning changenode and collect information.
1704 1708 group = cl.group(msng_cl_lst, identity,
1705 1709 manifest_and_file_collector(changedfiles))
1706 1710 for chnk in group:
1707 1711 yield chnk
1708 1712
1709 1713 # The list of manifests has been collected by the generator
1710 1714 # calling our functions back.
1711 1715 prune_manifests()
1712 1716 msng_mnfst_lst = msng_mnfst_set.keys()
1713 1717 # Sort the manifestnodes by revision number.
1714 1718 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1715 1719 # Create a generator for the manifestnodes that calls our lookup
1716 1720 # and data collection functions back.
1717 1721 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1718 1722 filenode_collector(changedfiles))
1719 1723 for chnk in group:
1720 1724 yield chnk
1721 1725
1722 1726 # These are no longer needed, dereference and toss the memory for
1723 1727 # them.
1724 1728 msng_mnfst_lst = None
1725 1729 msng_mnfst_set.clear()
1726 1730
1727 1731 changedfiles = changedfiles.keys()
1728 1732 changedfiles.sort()
1729 1733 # Go through all our files in order sorted by name.
1730 1734 for fname in changedfiles:
1731 1735 filerevlog = self.file(fname)
1732 1736 # Toss out the filenodes that the recipient isn't really
1733 1737 # missing.
1734 1738 if msng_filenode_set.has_key(fname):
1735 1739 prune_filenodes(fname, filerevlog)
1736 1740 msng_filenode_lst = msng_filenode_set[fname].keys()
1737 1741 else:
1738 1742 msng_filenode_lst = []
1739 1743 # If any filenodes are left, generate the group for them,
1740 1744 # otherwise don't bother.
1741 1745 if len(msng_filenode_lst) > 0:
1742 1746 yield changegroup.genchunk(fname)
1743 1747 # Sort the filenodes by their revision #
1744 1748 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1745 1749 # Create a group generator and only pass in a changenode
1746 1750 # lookup function as we need to collect no information
1747 1751 # from filenodes.
1748 1752 group = filerevlog.group(msng_filenode_lst,
1749 1753 lookup_filenode_link_func(fname))
1750 1754 for chnk in group:
1751 1755 yield chnk
1752 1756 if msng_filenode_set.has_key(fname):
1753 1757 # Don't need this anymore, toss it to free memory.
1754 1758 del msng_filenode_set[fname]
1755 1759 # Signal that no more groups are left.
1756 1760 yield changegroup.closechunk()
1757 1761
1758 1762 if msng_cl_lst:
1759 1763 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1760 1764
1761 1765 return util.chunkbuffer(gengroup())
1762 1766
1763 1767 def changegroup(self, basenodes, source):
1764 1768 """Generate a changegroup of all nodes that we have that a recipient
1765 1769 doesn't.
1766 1770
1767 1771 This is much easier than the previous function as we can assume that
1768 1772 the recipient has any changenode we aren't sending them."""
1769 1773
1770 1774 self.hook('preoutgoing', throw=True, source=source)
1771 1775
1772 1776 cl = self.changelog
1773 1777 nodes = cl.nodesbetween(basenodes, None)[0]
1774 1778 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1775 1779 self.changegroupinfo(nodes)
1776 1780
1777 1781 def identity(x):
1778 1782 return x
1779 1783
1780 1784 def gennodelst(revlog):
1781 1785 for r in xrange(0, revlog.count()):
1782 1786 n = revlog.node(r)
1783 1787 if revlog.linkrev(n) in revset:
1784 1788 yield n
1785 1789
1786 1790 def changed_file_collector(changedfileset):
1787 1791 def collect_changed_files(clnode):
1788 1792 c = cl.read(clnode)
1789 1793 for fname in c[3]:
1790 1794 changedfileset[fname] = 1
1791 1795 return collect_changed_files
1792 1796
1793 1797 def lookuprevlink_func(revlog):
1794 1798 def lookuprevlink(n):
1795 1799 return cl.node(revlog.linkrev(n))
1796 1800 return lookuprevlink
1797 1801
1798 1802 def gengroup():
1799 1803 # construct a list of all changed files
1800 1804 changedfiles = {}
1801 1805
1802 1806 for chnk in cl.group(nodes, identity,
1803 1807 changed_file_collector(changedfiles)):
1804 1808 yield chnk
1805 1809 changedfiles = changedfiles.keys()
1806 1810 changedfiles.sort()
1807 1811
1808 1812 mnfst = self.manifest
1809 1813 nodeiter = gennodelst(mnfst)
1810 1814 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1811 1815 yield chnk
1812 1816
1813 1817 for fname in changedfiles:
1814 1818 filerevlog = self.file(fname)
1815 1819 nodeiter = gennodelst(filerevlog)
1816 1820 nodeiter = list(nodeiter)
1817 1821 if nodeiter:
1818 1822 yield changegroup.genchunk(fname)
1819 1823 lookup = lookuprevlink_func(filerevlog)
1820 1824 for chnk in filerevlog.group(nodeiter, lookup):
1821 1825 yield chnk
1822 1826
1823 1827 yield changegroup.closechunk()
1824 1828
1825 1829 if nodes:
1826 1830 self.hook('outgoing', node=hex(nodes[0]), source=source)
1827 1831
1828 1832 return util.chunkbuffer(gengroup())
1829 1833
1830 1834 def addchangegroup(self, source, srctype, url):
1831 1835 """add changegroup to repo.
1832 1836
1833 1837 return values:
1834 1838 - nothing changed or no source: 0
1835 1839 - more heads than before: 1+added heads (2..n)
1836 1840 - less heads than before: -1-removed heads (-2..-n)
1837 1841 - number of heads stays the same: 1
1838 1842 """
1839 1843 def csmap(x):
1840 1844 self.ui.debug(_("add changeset %s\n") % short(x))
1841 1845 return cl.count()
1842 1846
1843 1847 def revmap(x):
1844 1848 return cl.rev(x)
1845 1849
1846 1850 if not source:
1847 1851 return 0
1848 1852
1849 1853 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1850 1854
1851 1855 changesets = files = revisions = 0
1852 1856
1853 1857 tr = self.transaction()
1854 1858
1855 1859 # write changelog data to temp files so concurrent readers will not see
1856 1860 # inconsistent view
1857 1861 cl = None
1858 1862 try:
1859 1863 cl = appendfile.appendchangelog(self.sopener,
1860 1864 self.changelog.version)
1861 1865
1862 1866 oldheads = len(cl.heads())
1863 1867
1864 1868 # pull off the changeset group
1865 1869 self.ui.status(_("adding changesets\n"))
1866 1870 cor = cl.count() - 1
1867 1871 chunkiter = changegroup.chunkiter(source)
1868 1872 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1869 1873 raise util.Abort(_("received changelog group is empty"))
1870 1874 cnr = cl.count() - 1
1871 1875 changesets = cnr - cor
1872 1876
1873 1877 # pull off the manifest group
1874 1878 self.ui.status(_("adding manifests\n"))
1875 1879 chunkiter = changegroup.chunkiter(source)
1876 1880 # no need to check for empty manifest group here:
1877 1881 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1878 1882 # no new manifest will be created and the manifest group will
1879 1883 # be empty during the pull
1880 1884 self.manifest.addgroup(chunkiter, revmap, tr)
1881 1885
1882 1886 # process the files
1883 1887 self.ui.status(_("adding file changes\n"))
1884 1888 while 1:
1885 1889 f = changegroup.getchunk(source)
1886 1890 if not f:
1887 1891 break
1888 1892 self.ui.debug(_("adding %s revisions\n") % f)
1889 1893 fl = self.file(f)
1890 1894 o = fl.count()
1891 1895 chunkiter = changegroup.chunkiter(source)
1892 1896 if fl.addgroup(chunkiter, revmap, tr) is None:
1893 1897 raise util.Abort(_("received file revlog group is empty"))
1894 1898 revisions += fl.count() - o
1895 1899 files += 1
1896 1900
1897 1901 cl.writedata()
1898 1902 finally:
1899 1903 if cl:
1900 1904 cl.cleanup()
1901 1905
1902 1906 # make changelog see real files again
1903 1907 self.changelog = changelog.changelog(self.sopener,
1904 1908 self.changelog.version)
1905 1909 self.changelog.checkinlinesize(tr)
1906 1910
1907 1911 newheads = len(self.changelog.heads())
1908 1912 heads = ""
1909 1913 if oldheads and newheads != oldheads:
1910 1914 heads = _(" (%+d heads)") % (newheads - oldheads)
1911 1915
1912 1916 self.ui.status(_("added %d changesets"
1913 1917 " with %d changes to %d files%s\n")
1914 1918 % (changesets, revisions, files, heads))
1915 1919
1916 1920 if changesets > 0:
1917 1921 self.hook('pretxnchangegroup', throw=True,
1918 1922 node=hex(self.changelog.node(cor+1)), source=srctype,
1919 1923 url=url)
1920 1924
1921 1925 tr.close()
1922 1926
1923 1927 if changesets > 0:
1924 1928 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1925 1929 source=srctype, url=url)
1926 1930
1927 1931 for i in xrange(cor + 1, cnr + 1):
1928 1932 self.hook("incoming", node=hex(self.changelog.node(i)),
1929 1933 source=srctype, url=url)
1930 1934
1931 1935 # never return 0 here:
1932 1936 if newheads < oldheads:
1933 1937 return newheads - oldheads - 1
1934 1938 else:
1935 1939 return newheads - oldheads + 1
1936 1940
1937 1941
1938 1942 def stream_in(self, remote):
1939 1943 fp = remote.stream_out()
1940 1944 l = fp.readline()
1941 1945 try:
1942 1946 resp = int(l)
1943 1947 except ValueError:
1944 1948 raise util.UnexpectedOutput(
1945 1949 _('Unexpected response from remote server:'), l)
1946 1950 if resp == 1:
1947 1951 raise util.Abort(_('operation forbidden by server'))
1948 1952 elif resp == 2:
1949 1953 raise util.Abort(_('locking the remote repository failed'))
1950 1954 elif resp != 0:
1951 1955 raise util.Abort(_('the server sent an unknown error code'))
1952 1956 self.ui.status(_('streaming all changes\n'))
1953 1957 l = fp.readline()
1954 1958 try:
1955 1959 total_files, total_bytes = map(int, l.split(' ', 1))
1956 1960 except ValueError, TypeError:
1957 1961 raise util.UnexpectedOutput(
1958 1962 _('Unexpected response from remote server:'), l)
1959 1963 self.ui.status(_('%d files to transfer, %s of data\n') %
1960 1964 (total_files, util.bytecount(total_bytes)))
1961 1965 start = time.time()
1962 1966 for i in xrange(total_files):
1963 1967 # XXX doesn't support '\n' or '\r' in filenames
1964 1968 l = fp.readline()
1965 1969 try:
1966 1970 name, size = l.split('\0', 1)
1967 1971 size = int(size)
1968 1972 except ValueError, TypeError:
1969 1973 raise util.UnexpectedOutput(
1970 1974 _('Unexpected response from remote server:'), l)
1971 1975 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1972 1976 ofp = self.sopener(name, 'w')
1973 1977 for chunk in util.filechunkiter(fp, limit=size):
1974 1978 ofp.write(chunk)
1975 1979 ofp.close()
1976 1980 elapsed = time.time() - start
1977 1981 if elapsed <= 0:
1978 1982 elapsed = 0.001
1979 1983 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1980 1984 (util.bytecount(total_bytes), elapsed,
1981 1985 util.bytecount(total_bytes / elapsed)))
1982 1986 self.reload()
1983 1987 return len(self.heads()) + 1
1984 1988
1985 1989 def clone(self, remote, heads=[], stream=False):
1986 1990 '''clone remote repository.
1987 1991
1988 1992 keyword arguments:
1989 1993 heads: list of revs to clone (forces use of pull)
1990 1994 stream: use streaming clone if possible'''
1991 1995
1992 1996 # now, all clients that can request uncompressed clones can
1993 1997 # read repo formats supported by all servers that can serve
1994 1998 # them.
1995 1999
1996 2000 # if revlog format changes, client will have to check version
1997 2001 # and format flags on "stream" capability, and use
1998 2002 # uncompressed only if compatible.
1999 2003
2000 2004 if stream and not heads and remote.capable('stream'):
2001 2005 return self.stream_in(remote)
2002 2006 return self.pull(remote, heads)
2003 2007
2004 2008 # used to avoid circular references so destructors work
2005 2009 def aftertrans(files):
2006 2010 renamefiles = [tuple(t) for t in files]
2007 2011 def a():
2008 2012 for src, dest in renamefiles:
2009 2013 util.rename(src, dest)
2010 2014 return a
2011 2015
2012 2016 def instance(ui, path, create):
2013 2017 return localrepository(ui, util.drop_scheme('file', path), create)
2014 2018
2015 2019 def islocal(path):
2016 2020 return True
General Comments 0
You need to be logged in to leave comments. Login now