##// END OF EJS Templates
Break core of repo.tag into dirstate/hook-free repo._tag for convert-repo
Brendan Cully -
r4118:35b39097 default
parent child Browse files
Show More
@@ -1,1907 +1,1920 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.path = os.path.join(path, ".hg")
34 34 self.root = os.path.realpath(path)
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 os.mkdir(os.path.join(self.path, "store"))
45 45 requirements = ("revlogv1", "store")
46 46 reqfile = self.opener("requires", "w")
47 47 for r in requirements:
48 48 reqfile.write("%s\n" % r)
49 49 reqfile.close()
50 50 # create an invalid changelog
51 51 self.opener("00changelog.i", "a").write(
52 52 '\0\0\0\2' # represents revlogv2
53 53 ' dummy changelog to prevent using the old repo layout'
54 54 )
55 55 else:
56 56 raise repo.RepoError(_("repository %s not found") % path)
57 57 elif create:
58 58 raise repo.RepoError(_("repository %s already exists") % path)
59 59 else:
60 60 # find requirements
61 61 try:
62 62 requirements = self.opener("requires").read().splitlines()
63 63 except IOError, inst:
64 64 if inst.errno != errno.ENOENT:
65 65 raise
66 66 requirements = []
67 67 # check them
68 68 for r in requirements:
69 69 if r not in self.supported:
70 70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71 71
72 72 # setup store
73 73 if "store" in requirements:
74 74 self.encodefn = util.encodefilename
75 75 self.decodefn = util.decodefilename
76 76 self.spath = os.path.join(self.path, "store")
77 77 else:
78 78 self.encodefn = lambda x: x
79 79 self.decodefn = lambda x: x
80 80 self.spath = self.path
81 81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82 82
83 83 self.ui = ui.ui(parentui=parentui)
84 84 try:
85 85 self.ui.readconfig(self.join("hgrc"), self.root)
86 86 except IOError:
87 87 pass
88 88
89 89 v = self.ui.configrevlog()
90 90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 92 fl = v.get('flags', None)
93 93 flags = 0
94 94 if fl != None:
95 95 for x in fl.split():
96 96 flags |= revlog.flagstr(x)
97 97 elif self.revlogv1:
98 98 flags = revlog.REVLOG_DEFAULT_FLAGS
99 99
100 100 v = self.revlogversion | flags
101 101 self.manifest = manifest.manifest(self.sopener, v)
102 102 self.changelog = changelog.changelog(self.sopener, v)
103 103
104 104 fallback = self.ui.config('ui', 'fallbackencoding')
105 105 if fallback:
106 106 util._fallbackencoding = fallback
107 107
108 108 # the changelog might not have the inline index flag
109 109 # on. If the format of the changelog is the same as found in
110 110 # .hgrc, apply any flags found in the .hgrc as well.
111 111 # Otherwise, just version from the changelog
112 112 v = self.changelog.version
113 113 if v == self.revlogversion:
114 114 v |= flags
115 115 self.revlogversion = v
116 116
117 117 self.tagscache = None
118 118 self.branchcache = None
119 119 self.nodetagscache = None
120 120 self.filterpats = {}
121 121 self.transhandle = None
122 122
123 123 self._link = lambda x: False
124 124 if util.checklink(self.root):
125 125 r = self.root # avoid circular reference in lambda
126 126 self._link = lambda x: util.is_link(os.path.join(r, x))
127 127
128 128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129 129
130 130 def url(self):
131 131 return 'file:' + self.root
132 132
133 133 def hook(self, name, throw=False, **args):
134 134 def callhook(hname, funcname):
135 135 '''call python hook. hook is callable object, looked up as
136 136 name in python module. if callable returns "true", hook
137 137 fails, else passes. if hook raises exception, treated as
138 138 hook failure. exception propagates if throw is "true".
139 139
140 140 reason for "true" meaning "hook failed" is so that
141 141 unmodified commands (e.g. mercurial.commands.update) can
142 142 be run as hooks without wrappers to convert return values.'''
143 143
144 144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 145 obj = funcname
146 146 if not callable(obj):
147 147 d = funcname.rfind('.')
148 148 if d == -1:
149 149 raise util.Abort(_('%s hook is invalid ("%s" not in '
150 150 'a module)') % (hname, funcname))
151 151 modname = funcname[:d]
152 152 try:
153 153 obj = __import__(modname)
154 154 except ImportError:
155 155 try:
156 156 # extensions are loaded with hgext_ prefix
157 157 obj = __import__("hgext_%s" % modname)
158 158 except ImportError:
159 159 raise util.Abort(_('%s hook is invalid '
160 160 '(import of "%s" failed)') %
161 161 (hname, modname))
162 162 try:
163 163 for p in funcname.split('.')[1:]:
164 164 obj = getattr(obj, p)
165 165 except AttributeError, err:
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not defined)') %
168 168 (hname, funcname))
169 169 if not callable(obj):
170 170 raise util.Abort(_('%s hook is invalid '
171 171 '("%s" is not callable)') %
172 172 (hname, funcname))
173 173 try:
174 174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
175 175 except (KeyboardInterrupt, util.SignalInterrupt):
176 176 raise
177 177 except Exception, exc:
178 178 if isinstance(exc, util.Abort):
179 179 self.ui.warn(_('error: %s hook failed: %s\n') %
180 180 (hname, exc.args[0]))
181 181 else:
182 182 self.ui.warn(_('error: %s hook raised an exception: '
183 183 '%s\n') % (hname, exc))
184 184 if throw:
185 185 raise
186 186 self.ui.print_exc()
187 187 return True
188 188 if r:
189 189 if throw:
190 190 raise util.Abort(_('%s hook failed') % hname)
191 191 self.ui.warn(_('warning: %s hook failed\n') % hname)
192 192 return r
193 193
194 194 def runhook(name, cmd):
195 195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
196 196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
197 197 r = util.system(cmd, environ=env, cwd=self.root)
198 198 if r:
199 199 desc, r = util.explain_exit(r)
200 200 if throw:
201 201 raise util.Abort(_('%s hook %s') % (name, desc))
202 202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
203 203 return r
204 204
205 205 r = False
206 206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
207 207 if hname.split(".", 1)[0] == name and cmd]
208 208 hooks.sort()
209 209 for hname, cmd in hooks:
210 210 if callable(cmd):
211 211 r = callhook(hname, cmd) or r
212 212 elif cmd.startswith('python:'):
213 213 r = callhook(hname, cmd[7:].strip()) or r
214 214 else:
215 215 r = runhook(hname, cmd) or r
216 216 return r
217 217
218 218 tag_disallowed = ':\r\n'
219 219
220 def _tag(self, name, node, message, local, user, date, parent=None):
221 use_dirstate = parent is None
222
223 for c in self.tag_disallowed:
224 if c in name:
225 raise util.Abort(_('%r cannot be used in a tag name') % c)
226
227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
228
229 if local:
230 # local tags are stored in the current charset
231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
232 self.hook('tag', node=hex(node), tag=name, local=local)
233 return
234
235 # committed tags are stored in UTF-8
236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
237 if use_dirstate:
238 self.wfile('.hgtags', 'ab').write(line)
239 else:
240 ntags = self.filectx('.hgtags', parent).data()
241 self.wfile('.hgtags', 'ab').write(ntags + line)
242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
243 self.add(['.hgtags'])
244
245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
246
247 self.hook('tag', node=hex(node), tag=name, local=local)
248
249 return tagnode
250
220 251 def tag(self, name, node, message, local, user, date):
221 252 '''tag a revision with a symbolic name.
222 253
223 254 if local is True, the tag is stored in a per-repository file.
224 255 otherwise, it is stored in the .hgtags file, and a new
225 256 changeset is committed with the change.
226 257
227 258 keyword arguments:
228 259
229 260 local: whether to store tag in non-version-controlled file
230 261 (default False)
231 262
232 263 message: commit message to use if committing
233 264
234 265 user: name of user to use if committing
235 266
236 267 date: date tuple to use if committing'''
237 268
238 for c in self.tag_disallowed:
239 if c in name:
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
241
242 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
243
244 if local:
245 # local tags are stored in the current charset
246 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
247 self.hook('tag', node=hex(node), tag=name, local=local)
248 return
249
250 269 for x in self.status()[:5]:
251 270 if '.hgtags' in x:
252 271 raise util.Abort(_('working copy of .hgtags is changed '
253 272 '(please commit .hgtags manually)'))
254 273
255 # committed tags are stored in UTF-8
256 line = '%s %s\n' % (hex(node), util.fromlocal(name))
257 self.wfile('.hgtags', 'ab').write(line)
258 if self.dirstate.state('.hgtags') == '?':
259 self.add(['.hgtags'])
260 274
261 self.commit(['.hgtags'], message, user, date)
262 self.hook('tag', node=hex(node), tag=name, local=local)
275 self._tag(name, node, message, local, user, date)
263 276
264 277 def tags(self):
265 278 '''return a mapping of tag to node'''
266 279 if not self.tagscache:
267 280 self.tagscache = {}
268 281
269 282 def parsetag(line, context):
270 283 if not line:
271 284 return
272 285 s = l.split(" ", 1)
273 286 if len(s) != 2:
274 287 self.ui.warn(_("%s: cannot parse entry\n") % context)
275 288 return
276 289 node, key = s
277 290 key = util.tolocal(key.strip()) # stored in UTF-8
278 291 try:
279 292 bin_n = bin(node)
280 293 except TypeError:
281 294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
282 295 (context, node))
283 296 return
284 297 if bin_n not in self.changelog.nodemap:
285 298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
286 299 (context, key))
287 300 return
288 301 self.tagscache[key] = bin_n
289 302
290 303 # read the tags file from each head, ending with the tip,
291 304 # and add each tag found to the map, with "newer" ones
292 305 # taking precedence
293 306 f = None
294 307 for rev, node, fnode in self._hgtagsnodes():
295 308 f = (f and f.filectx(fnode) or
296 309 self.filectx('.hgtags', fileid=fnode))
297 310 count = 0
298 311 for l in f.data().splitlines():
299 312 count += 1
300 313 parsetag(l, _("%s, line %d") % (str(f), count))
301 314
302 315 try:
303 316 f = self.opener("localtags")
304 317 count = 0
305 318 for l in f:
306 319 # localtags are stored in the local character set
307 320 # while the internal tag table is stored in UTF-8
308 321 l = util.fromlocal(l)
309 322 count += 1
310 323 parsetag(l, _("localtags, line %d") % count)
311 324 except IOError:
312 325 pass
313 326
314 327 self.tagscache['tip'] = self.changelog.tip()
315 328
316 329 return self.tagscache
317 330
318 331 def _hgtagsnodes(self):
319 332 heads = self.heads()
320 333 heads.reverse()
321 334 last = {}
322 335 ret = []
323 336 for node in heads:
324 337 c = self.changectx(node)
325 338 rev = c.rev()
326 339 try:
327 340 fnode = c.filenode('.hgtags')
328 341 except revlog.LookupError:
329 342 continue
330 343 ret.append((rev, node, fnode))
331 344 if fnode in last:
332 345 ret[last[fnode]] = None
333 346 last[fnode] = len(ret) - 1
334 347 return [item for item in ret if item]
335 348
336 349 def tagslist(self):
337 350 '''return a list of tags ordered by revision'''
338 351 l = []
339 352 for t, n in self.tags().items():
340 353 try:
341 354 r = self.changelog.rev(n)
342 355 except:
343 356 r = -2 # sort to the beginning of the list if unknown
344 357 l.append((r, t, n))
345 358 l.sort()
346 359 return [(t, n) for r, t, n in l]
347 360
348 361 def nodetags(self, node):
349 362 '''return the tags associated with a node'''
350 363 if not self.nodetagscache:
351 364 self.nodetagscache = {}
352 365 for t, n in self.tags().items():
353 366 self.nodetagscache.setdefault(n, []).append(t)
354 367 return self.nodetagscache.get(node, [])
355 368
356 369 def _branchtags(self):
357 370 partial, last, lrev = self._readbranchcache()
358 371
359 372 tiprev = self.changelog.count() - 1
360 373 if lrev != tiprev:
361 374 self._updatebranchcache(partial, lrev+1, tiprev+1)
362 375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
363 376
364 377 return partial
365 378
366 379 def branchtags(self):
367 380 if self.branchcache is not None:
368 381 return self.branchcache
369 382
370 383 self.branchcache = {} # avoid recursion in changectx
371 384 partial = self._branchtags()
372 385
373 386 # the branch cache is stored on disk as UTF-8, but in the local
374 387 # charset internally
375 388 for k, v in partial.items():
376 389 self.branchcache[util.tolocal(k)] = v
377 390 return self.branchcache
378 391
379 392 def _readbranchcache(self):
380 393 partial = {}
381 394 try:
382 395 f = self.opener("branches.cache")
383 396 lines = f.read().split('\n')
384 397 f.close()
385 398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
386 399 last, lrev = bin(last), int(lrev)
387 400 if not (lrev < self.changelog.count() and
388 401 self.changelog.node(lrev) == last): # sanity check
389 402 # invalidate the cache
390 403 raise ValueError('Invalid branch cache: unknown tip')
391 404 for l in lines:
392 405 if not l: continue
393 406 node, label = l.rstrip().split(" ", 1)
394 407 partial[label] = bin(node)
395 408 except (KeyboardInterrupt, util.SignalInterrupt):
396 409 raise
397 410 except Exception, inst:
398 411 if self.ui.debugflag:
399 412 self.ui.warn(str(inst), '\n')
400 413 partial, last, lrev = {}, nullid, nullrev
401 414 return partial, last, lrev
402 415
403 416 def _writebranchcache(self, branches, tip, tiprev):
404 417 try:
405 418 f = self.opener("branches.cache", "w")
406 419 f.write("%s %s\n" % (hex(tip), tiprev))
407 420 for label, node in branches.iteritems():
408 421 f.write("%s %s\n" % (hex(node), label))
409 422 except IOError:
410 423 pass
411 424
412 425 def _updatebranchcache(self, partial, start, end):
413 426 for r in xrange(start, end):
414 427 c = self.changectx(r)
415 428 b = c.branch()
416 429 if b:
417 430 partial[b] = c.node()
418 431
419 432 def lookup(self, key):
420 433 if key == '.':
421 434 key = self.dirstate.parents()[0]
422 435 if key == nullid:
423 436 raise repo.RepoError(_("no revision checked out"))
424 437 elif key == 'null':
425 438 return nullid
426 439 n = self.changelog._match(key)
427 440 if n:
428 441 return n
429 442 if key in self.tags():
430 443 return self.tags()[key]
431 444 if key in self.branchtags():
432 445 return self.branchtags()[key]
433 446 n = self.changelog._partialmatch(key)
434 447 if n:
435 448 return n
436 449 raise repo.RepoError(_("unknown revision '%s'") % key)
437 450
438 451 def dev(self):
439 452 return os.lstat(self.path).st_dev
440 453
441 454 def local(self):
442 455 return True
443 456
444 457 def join(self, f):
445 458 return os.path.join(self.path, f)
446 459
447 460 def sjoin(self, f):
448 461 f = self.encodefn(f)
449 462 return os.path.join(self.spath, f)
450 463
451 464 def wjoin(self, f):
452 465 return os.path.join(self.root, f)
453 466
454 467 def file(self, f):
455 468 if f[0] == '/':
456 469 f = f[1:]
457 470 return filelog.filelog(self.sopener, f, self.revlogversion)
458 471
459 472 def changectx(self, changeid=None):
460 473 return context.changectx(self, changeid)
461 474
462 475 def workingctx(self):
463 476 return context.workingctx(self)
464 477
465 478 def parents(self, changeid=None):
466 479 '''
467 480 get list of changectxs for parents of changeid or working directory
468 481 '''
469 482 if changeid is None:
470 483 pl = self.dirstate.parents()
471 484 else:
472 485 n = self.changelog.lookup(changeid)
473 486 pl = self.changelog.parents(n)
474 487 if pl[1] == nullid:
475 488 return [self.changectx(pl[0])]
476 489 return [self.changectx(pl[0]), self.changectx(pl[1])]
477 490
478 491 def filectx(self, path, changeid=None, fileid=None):
479 492 """changeid can be a changeset revision, node, or tag.
480 493 fileid can be a file revision or node."""
481 494 return context.filectx(self, path, changeid, fileid)
482 495
483 496 def getcwd(self):
484 497 return self.dirstate.getcwd()
485 498
486 499 def wfile(self, f, mode='r'):
487 500 return self.wopener(f, mode)
488 501
489 502 def _filter(self, filter, filename, data):
490 503 if filter not in self.filterpats:
491 504 l = []
492 505 for pat, cmd in self.ui.configitems(filter):
493 506 mf = util.matcher(self.root, "", [pat], [], [])[1]
494 507 l.append((mf, cmd))
495 508 self.filterpats[filter] = l
496 509
497 510 for mf, cmd in self.filterpats[filter]:
498 511 if mf(filename):
499 512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
500 513 data = util.filter(data, cmd)
501 514 break
502 515
503 516 return data
504 517
505 518 def wread(self, filename):
506 519 if self._link(filename):
507 520 data = os.readlink(self.wjoin(filename))
508 521 else:
509 522 data = self.wopener(filename, 'r').read()
510 523 return self._filter("encode", filename, data)
511 524
512 525 def wwrite(self, filename, data, flags):
513 526 data = self._filter("decode", filename, data)
514 527 if "l" in flags:
515 528 try:
516 529 os.unlink(self.wjoin(filename))
517 530 except OSError:
518 531 pass
519 532 os.symlink(data, self.wjoin(filename))
520 533 else:
521 534 try:
522 535 if self._link(filename):
523 536 os.unlink(self.wjoin(filename))
524 537 except OSError:
525 538 pass
526 539 self.wopener(filename, 'w').write(data)
527 540 util.set_exec(self.wjoin(filename), "x" in flags)
528 541
529 542 def wwritedata(self, filename, data):
530 543 return self._filter("decode", filename, data)
531 544
532 545 def transaction(self):
533 546 tr = self.transhandle
534 547 if tr != None and tr.running():
535 548 return tr.nest()
536 549
537 550 # save dirstate for rollback
538 551 try:
539 552 ds = self.opener("dirstate").read()
540 553 except IOError:
541 554 ds = ""
542 555 self.opener("journal.dirstate", "w").write(ds)
543 556
544 557 renames = [(self.sjoin("journal"), self.sjoin("undo")),
545 558 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
546 559 tr = transaction.transaction(self.ui.warn, self.sopener,
547 560 self.sjoin("journal"),
548 561 aftertrans(renames))
549 562 self.transhandle = tr
550 563 return tr
551 564
552 565 def recover(self):
553 566 l = self.lock()
554 567 if os.path.exists(self.sjoin("journal")):
555 568 self.ui.status(_("rolling back interrupted transaction\n"))
556 569 transaction.rollback(self.sopener, self.sjoin("journal"))
557 570 self.reload()
558 571 return True
559 572 else:
560 573 self.ui.warn(_("no interrupted transaction available\n"))
561 574 return False
562 575
563 576 def rollback(self, wlock=None):
564 577 if not wlock:
565 578 wlock = self.wlock()
566 579 l = self.lock()
567 580 if os.path.exists(self.sjoin("undo")):
568 581 self.ui.status(_("rolling back last transaction\n"))
569 582 transaction.rollback(self.sopener, self.sjoin("undo"))
570 583 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
571 584 self.reload()
572 585 self.wreload()
573 586 else:
574 587 self.ui.warn(_("no rollback information available\n"))
575 588
576 589 def wreload(self):
577 590 self.dirstate.read()
578 591
579 592 def reload(self):
580 593 self.changelog.load()
581 594 self.manifest.load()
582 595 self.tagscache = None
583 596 self.nodetagscache = None
584 597
585 598 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
586 599 desc=None):
587 600 try:
588 601 l = lock.lock(lockname, 0, releasefn, desc=desc)
589 602 except lock.LockHeld, inst:
590 603 if not wait:
591 604 raise
592 605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
593 606 (desc, inst.locker))
594 607 # default to 600 seconds timeout
595 608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
596 609 releasefn, desc=desc)
597 610 if acquirefn:
598 611 acquirefn()
599 612 return l
600 613
601 614 def lock(self, wait=1):
602 615 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
603 616 desc=_('repository %s') % self.origroot)
604 617
605 618 def wlock(self, wait=1):
606 619 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
607 620 self.wreload,
608 621 desc=_('working directory of %s') % self.origroot)
609 622
610 623 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
611 624 """
612 625 commit an individual file as part of a larger transaction
613 626 """
614 627
615 628 t = self.wread(fn)
616 629 fl = self.file(fn)
617 630 fp1 = manifest1.get(fn, nullid)
618 631 fp2 = manifest2.get(fn, nullid)
619 632
620 633 meta = {}
621 634 cp = self.dirstate.copied(fn)
622 635 if cp:
623 636 # Mark the new revision of this file as a copy of another
624 637 # file. This copy data will effectively act as a parent
625 638 # of this new revision. If this is a merge, the first
626 639 # parent will be the nullid (meaning "look up the copy data")
627 640 # and the second one will be the other parent. For example:
628 641 #
629 642 # 0 --- 1 --- 3 rev1 changes file foo
630 643 # \ / rev2 renames foo to bar and changes it
631 644 # \- 2 -/ rev3 should have bar with all changes and
632 645 # should record that bar descends from
633 646 # bar in rev2 and foo in rev1
634 647 #
635 648 # this allows this merge to succeed:
636 649 #
637 650 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
638 651 # \ / merging rev3 and rev4 should use bar@rev2
639 652 # \- 2 --- 4 as the merge base
640 653 #
641 654 meta["copy"] = cp
642 655 if not manifest2: # not a branch merge
643 656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
644 657 fp2 = nullid
645 658 elif fp2 != nullid: # copied on remote side
646 659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
647 660 elif fp1 != nullid: # copied on local side, reversed
648 661 meta["copyrev"] = hex(manifest2.get(cp))
649 662 fp2 = fp1
650 663 else: # directory rename
651 664 meta["copyrev"] = hex(manifest1.get(cp, nullid))
652 665 self.ui.debug(_(" %s: copy %s:%s\n") %
653 666 (fn, cp, meta["copyrev"]))
654 667 fp1 = nullid
655 668 elif fp2 != nullid:
656 669 # is one parent an ancestor of the other?
657 670 fpa = fl.ancestor(fp1, fp2)
658 671 if fpa == fp1:
659 672 fp1, fp2 = fp2, nullid
660 673 elif fpa == fp2:
661 674 fp2 = nullid
662 675
663 676 # is the file unmodified from the parent? report existing entry
664 677 if fp2 == nullid and not fl.cmp(fp1, t):
665 678 return fp1
666 679
667 680 changelist.append(fn)
668 681 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
669 682
670 683 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
671 684 if p1 is None:
672 685 p1, p2 = self.dirstate.parents()
673 686 return self.commit(files=files, text=text, user=user, date=date,
674 687 p1=p1, p2=p2, wlock=wlock, extra=extra)
675 688
676 689 def commit(self, files=None, text="", user=None, date=None,
677 690 match=util.always, force=False, lock=None, wlock=None,
678 691 force_editor=False, p1=None, p2=None, extra={}):
679 692
680 693 commit = []
681 694 remove = []
682 695 changed = []
683 696 use_dirstate = (p1 is None) # not rawcommit
684 697 extra = extra.copy()
685 698
686 699 if use_dirstate:
687 700 if files:
688 701 for f in files:
689 702 s = self.dirstate.state(f)
690 703 if s in 'nmai':
691 704 commit.append(f)
692 705 elif s == 'r':
693 706 remove.append(f)
694 707 else:
695 708 self.ui.warn(_("%s not tracked!\n") % f)
696 709 else:
697 710 changes = self.status(match=match)[:5]
698 711 modified, added, removed, deleted, unknown = changes
699 712 commit = modified + added
700 713 remove = removed
701 714 else:
702 715 commit = files
703 716
704 717 if use_dirstate:
705 718 p1, p2 = self.dirstate.parents()
706 719 update_dirstate = True
707 720 else:
708 721 p1, p2 = p1, p2 or nullid
709 722 update_dirstate = (self.dirstate.parents()[0] == p1)
710 723
711 724 c1 = self.changelog.read(p1)
712 725 c2 = self.changelog.read(p2)
713 726 m1 = self.manifest.read(c1[0]).copy()
714 727 m2 = self.manifest.read(c2[0])
715 728
716 729 if use_dirstate:
717 730 branchname = self.workingctx().branch()
718 731 try:
719 732 branchname = branchname.decode('UTF-8').encode('UTF-8')
720 733 except UnicodeDecodeError:
721 734 raise util.Abort(_('branch name not in UTF-8!'))
722 735 else:
723 736 branchname = ""
724 737
725 738 if use_dirstate:
726 739 oldname = c1[5].get("branch", "") # stored in UTF-8
727 740 if not commit and not remove and not force and p2 == nullid and \
728 741 branchname == oldname:
729 742 self.ui.status(_("nothing changed\n"))
730 743 return None
731 744
732 745 xp1 = hex(p1)
733 746 if p2 == nullid: xp2 = ''
734 747 else: xp2 = hex(p2)
735 748
736 749 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
737 750
738 751 if not wlock:
739 752 wlock = self.wlock()
740 753 if not lock:
741 754 lock = self.lock()
742 755 tr = self.transaction()
743 756
744 757 # check in files
745 758 new = {}
746 759 linkrev = self.changelog.count()
747 760 commit.sort()
748 761 is_exec = util.execfunc(self.root, m1.execf)
749 762 is_link = util.linkfunc(self.root, m1.linkf)
750 763 for f in commit:
751 764 self.ui.note(f + "\n")
752 765 try:
753 766 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
754 767 m1.set(f, is_exec(f), is_link(f))
755 768 except (OSError, IOError):
756 769 if use_dirstate:
757 770 self.ui.warn(_("trouble committing %s!\n") % f)
758 771 raise
759 772 else:
760 773 remove.append(f)
761 774
762 775 # update manifest
763 776 m1.update(new)
764 777 remove.sort()
765 778 removed = []
766 779
767 780 for f in remove:
768 781 if f in m1:
769 782 del m1[f]
770 783 removed.append(f)
771 784 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
772 785
773 786 # add changeset
774 787 new = new.keys()
775 788 new.sort()
776 789
777 790 user = user or self.ui.username()
778 791 if not text or force_editor:
779 792 edittext = []
780 793 if text:
781 794 edittext.append(text)
782 795 edittext.append("")
783 796 edittext.append("HG: user: %s" % user)
784 797 if p2 != nullid:
785 798 edittext.append("HG: branch merge")
786 799 if branchname:
787 800 edittext.append("HG: branch %s" % util.tolocal(branchname))
788 801 edittext.extend(["HG: changed %s" % f for f in changed])
789 802 edittext.extend(["HG: removed %s" % f for f in removed])
790 803 if not changed and not remove:
791 804 edittext.append("HG: no files changed")
792 805 edittext.append("")
793 806 # run editor in the repository root
794 807 olddir = os.getcwd()
795 808 os.chdir(self.root)
796 809 text = self.ui.edit("\n".join(edittext), user)
797 810 os.chdir(olddir)
798 811
799 812 lines = [line.rstrip() for line in text.rstrip().splitlines()]
800 813 while lines and not lines[0]:
801 814 del lines[0]
802 815 if not lines:
803 816 return None
804 817 text = '\n'.join(lines)
805 818 if branchname:
806 819 extra["branch"] = branchname
807 820 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
808 821 user, date, extra)
809 822 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
810 823 parent2=xp2)
811 824 tr.close()
812 825
813 826 if self.branchcache and "branch" in extra:
814 827 self.branchcache[util.tolocal(extra["branch"])] = n
815 828
816 829 if use_dirstate or update_dirstate:
817 830 self.dirstate.setparents(n)
818 831 if use_dirstate:
819 832 self.dirstate.update(new, "n")
820 833 self.dirstate.forget(removed)
821 834
822 835 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
823 836 return n
824 837
825 838 def walk(self, node=None, files=[], match=util.always, badmatch=None):
826 839 '''
827 840 walk recursively through the directory tree or a given
828 841 changeset, finding all files matched by the match
829 842 function
830 843
831 844 results are yielded in a tuple (src, filename), where src
832 845 is one of:
833 846 'f' the file was found in the directory tree
834 847 'm' the file was only in the dirstate and not in the tree
835 848 'b' file was not found and matched badmatch
836 849 '''
837 850
838 851 if node:
839 852 fdict = dict.fromkeys(files)
840 853 for fn in self.manifest.read(self.changelog.read(node)[0]):
841 854 for ffn in fdict:
842 855 # match if the file is the exact name or a directory
843 856 if ffn == fn or fn.startswith("%s/" % ffn):
844 857 del fdict[ffn]
845 858 break
846 859 if match(fn):
847 860 yield 'm', fn
848 861 for fn in fdict:
849 862 if badmatch and badmatch(fn):
850 863 if match(fn):
851 864 yield 'b', fn
852 865 else:
853 866 self.ui.warn(_('%s: No such file in rev %s\n') % (
854 867 util.pathto(self.getcwd(), fn), short(node)))
855 868 else:
856 869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
857 870 yield src, fn
858 871
859 872 def status(self, node1=None, node2=None, files=[], match=util.always,
860 873 wlock=None, list_ignored=False, list_clean=False):
861 874 """return status of files between two nodes or node and working directory
862 875
863 876 If node1 is None, use the first dirstate parent instead.
864 877 If node2 is None, compare node1 with working directory.
865 878 """
866 879
867 880 def fcmp(fn, mf):
868 881 t1 = self.wread(fn)
869 882 return self.file(fn).cmp(mf.get(fn, nullid), t1)
870 883
871 884 def mfmatches(node):
872 885 change = self.changelog.read(node)
873 886 mf = self.manifest.read(change[0]).copy()
874 887 for fn in mf.keys():
875 888 if not match(fn):
876 889 del mf[fn]
877 890 return mf
878 891
879 892 modified, added, removed, deleted, unknown = [], [], [], [], []
880 893 ignored, clean = [], []
881 894
882 895 compareworking = False
883 896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
884 897 compareworking = True
885 898
886 899 if not compareworking:
887 900 # read the manifest from node1 before the manifest from node2,
888 901 # so that we'll hit the manifest cache if we're going through
889 902 # all the revisions in parent->child order.
890 903 mf1 = mfmatches(node1)
891 904
892 905 # are we comparing the working directory?
893 906 if not node2:
894 907 if not wlock:
895 908 try:
896 909 wlock = self.wlock(wait=0)
897 910 except lock.LockException:
898 911 wlock = None
899 912 (lookup, modified, added, removed, deleted, unknown,
900 913 ignored, clean) = self.dirstate.status(files, match,
901 914 list_ignored, list_clean)
902 915
903 916 # are we comparing working dir against its parent?
904 917 if compareworking:
905 918 if lookup:
906 919 # do a full compare of any files that might have changed
907 920 mf2 = mfmatches(self.dirstate.parents()[0])
908 921 for f in lookup:
909 922 if fcmp(f, mf2):
910 923 modified.append(f)
911 924 else:
912 925 clean.append(f)
913 926 if wlock is not None:
914 927 self.dirstate.update([f], "n")
915 928 else:
916 929 # we are comparing working dir against non-parent
917 930 # generate a pseudo-manifest for the working dir
918 931 # XXX: create it in dirstate.py ?
919 932 mf2 = mfmatches(self.dirstate.parents()[0])
920 933 is_exec = util.execfunc(self.root, mf2.execf)
921 934 is_link = util.linkfunc(self.root, mf2.linkf)
922 935 for f in lookup + modified + added:
923 936 mf2[f] = ""
924 937 mf2.set(f, is_exec(f), is_link(f))
925 938 for f in removed:
926 939 if f in mf2:
927 940 del mf2[f]
928 941 else:
929 942 # we are comparing two revisions
930 943 mf2 = mfmatches(node2)
931 944
932 945 if not compareworking:
933 946 # flush lists from dirstate before comparing manifests
934 947 modified, added, clean = [], [], []
935 948
936 949 # make sure to sort the files so we talk to the disk in a
937 950 # reasonable order
938 951 mf2keys = mf2.keys()
939 952 mf2keys.sort()
940 953 for fn in mf2keys:
941 954 if mf1.has_key(fn):
942 955 if mf1.flags(fn) != mf2.flags(fn) or \
943 956 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
944 957 modified.append(fn)
945 958 elif list_clean:
946 959 clean.append(fn)
947 960 del mf1[fn]
948 961 else:
949 962 added.append(fn)
950 963
951 964 removed = mf1.keys()
952 965
953 966 # sort and return results:
954 967 for l in modified, added, removed, deleted, unknown, ignored, clean:
955 968 l.sort()
956 969 return (modified, added, removed, deleted, unknown, ignored, clean)
957 970
958 971 def add(self, list, wlock=None):
959 972 if not wlock:
960 973 wlock = self.wlock()
961 974 for f in list:
962 975 p = self.wjoin(f)
963 976 islink = os.path.islink(p)
964 977 if not islink and not os.path.exists(p):
965 978 self.ui.warn(_("%s does not exist!\n") % f)
966 979 elif not islink and not os.path.isfile(p):
967 980 self.ui.warn(_("%s not added: only files and symlinks "
968 981 "supported currently\n") % f)
969 982 elif self.dirstate.state(f) in 'an':
970 983 self.ui.warn(_("%s already tracked!\n") % f)
971 984 else:
972 985 self.dirstate.update([f], "a")
973 986
974 987 def forget(self, list, wlock=None):
975 988 if not wlock:
976 989 wlock = self.wlock()
977 990 for f in list:
978 991 if self.dirstate.state(f) not in 'ai':
979 992 self.ui.warn(_("%s not added!\n") % f)
980 993 else:
981 994 self.dirstate.forget([f])
982 995
983 996 def remove(self, list, unlink=False, wlock=None):
984 997 if unlink:
985 998 for f in list:
986 999 try:
987 1000 util.unlink(self.wjoin(f))
988 1001 except OSError, inst:
989 1002 if inst.errno != errno.ENOENT:
990 1003 raise
991 1004 if not wlock:
992 1005 wlock = self.wlock()
993 1006 for f in list:
994 1007 p = self.wjoin(f)
995 1008 if os.path.exists(p):
996 1009 self.ui.warn(_("%s still exists!\n") % f)
997 1010 elif self.dirstate.state(f) == 'a':
998 1011 self.dirstate.forget([f])
999 1012 elif f not in self.dirstate:
1000 1013 self.ui.warn(_("%s not tracked!\n") % f)
1001 1014 else:
1002 1015 self.dirstate.update([f], "r")
1003 1016
1004 1017 def undelete(self, list, wlock=None):
1005 1018 p = self.dirstate.parents()[0]
1006 1019 mn = self.changelog.read(p)[0]
1007 1020 m = self.manifest.read(mn)
1008 1021 if not wlock:
1009 1022 wlock = self.wlock()
1010 1023 for f in list:
1011 1024 if self.dirstate.state(f) not in "r":
1012 1025 self.ui.warn("%s not removed!\n" % f)
1013 1026 else:
1014 1027 t = self.file(f).read(m[f])
1015 1028 self.wwrite(f, t, m.flags(f))
1016 1029 self.dirstate.update([f], "n")
1017 1030
1018 1031 def copy(self, source, dest, wlock=None):
1019 1032 p = self.wjoin(dest)
1020 1033 if not os.path.exists(p):
1021 1034 self.ui.warn(_("%s does not exist!\n") % dest)
1022 1035 elif not os.path.isfile(p):
1023 1036 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1024 1037 else:
1025 1038 if not wlock:
1026 1039 wlock = self.wlock()
1027 1040 if self.dirstate.state(dest) == '?':
1028 1041 self.dirstate.update([dest], "a")
1029 1042 self.dirstate.copy(source, dest)
1030 1043
1031 1044 def heads(self, start=None):
1032 1045 heads = self.changelog.heads(start)
1033 1046 # sort the output in rev descending order
1034 1047 heads = [(-self.changelog.rev(h), h) for h in heads]
1035 1048 heads.sort()
1036 1049 return [n for (r, n) in heads]
1037 1050
1038 1051 def branches(self, nodes):
1039 1052 if not nodes:
1040 1053 nodes = [self.changelog.tip()]
1041 1054 b = []
1042 1055 for n in nodes:
1043 1056 t = n
1044 1057 while 1:
1045 1058 p = self.changelog.parents(n)
1046 1059 if p[1] != nullid or p[0] == nullid:
1047 1060 b.append((t, n, p[0], p[1]))
1048 1061 break
1049 1062 n = p[0]
1050 1063 return b
1051 1064
1052 1065 def between(self, pairs):
1053 1066 r = []
1054 1067
1055 1068 for top, bottom in pairs:
1056 1069 n, l, i = top, [], 0
1057 1070 f = 1
1058 1071
1059 1072 while n != bottom:
1060 1073 p = self.changelog.parents(n)[0]
1061 1074 if i == f:
1062 1075 l.append(n)
1063 1076 f = f * 2
1064 1077 n = p
1065 1078 i += 1
1066 1079
1067 1080 r.append(l)
1068 1081
1069 1082 return r
1070 1083
1071 1084 def findincoming(self, remote, base=None, heads=None, force=False):
1072 1085 """Return list of roots of the subsets of missing nodes from remote
1073 1086
1074 1087 If base dict is specified, assume that these nodes and their parents
1075 1088 exist on the remote side and that no child of a node of base exists
1076 1089 in both remote and self.
1077 1090 Furthermore base will be updated to include the nodes that exists
1078 1091 in self and remote but no children exists in self and remote.
1079 1092 If a list of heads is specified, return only nodes which are heads
1080 1093 or ancestors of these heads.
1081 1094
1082 1095 All the ancestors of base are in self and in remote.
1083 1096 All the descendants of the list returned are missing in self.
1084 1097 (and so we know that the rest of the nodes are missing in remote, see
1085 1098 outgoing)
1086 1099 """
1087 1100 m = self.changelog.nodemap
1088 1101 search = []
1089 1102 fetch = {}
1090 1103 seen = {}
1091 1104 seenbranch = {}
1092 1105 if base == None:
1093 1106 base = {}
1094 1107
1095 1108 if not heads:
1096 1109 heads = remote.heads()
1097 1110
1098 1111 if self.changelog.tip() == nullid:
1099 1112 base[nullid] = 1
1100 1113 if heads != [nullid]:
1101 1114 return [nullid]
1102 1115 return []
1103 1116
1104 1117 # assume we're closer to the tip than the root
1105 1118 # and start by examining the heads
1106 1119 self.ui.status(_("searching for changes\n"))
1107 1120
1108 1121 unknown = []
1109 1122 for h in heads:
1110 1123 if h not in m:
1111 1124 unknown.append(h)
1112 1125 else:
1113 1126 base[h] = 1
1114 1127
1115 1128 if not unknown:
1116 1129 return []
1117 1130
1118 1131 req = dict.fromkeys(unknown)
1119 1132 reqcnt = 0
1120 1133
1121 1134 # search through remote branches
1122 1135 # a 'branch' here is a linear segment of history, with four parts:
1123 1136 # head, root, first parent, second parent
1124 1137 # (a branch always has two parents (or none) by definition)
1125 1138 unknown = remote.branches(unknown)
1126 1139 while unknown:
1127 1140 r = []
1128 1141 while unknown:
1129 1142 n = unknown.pop(0)
1130 1143 if n[0] in seen:
1131 1144 continue
1132 1145
1133 1146 self.ui.debug(_("examining %s:%s\n")
1134 1147 % (short(n[0]), short(n[1])))
1135 1148 if n[0] == nullid: # found the end of the branch
1136 1149 pass
1137 1150 elif n in seenbranch:
1138 1151 self.ui.debug(_("branch already found\n"))
1139 1152 continue
1140 1153 elif n[1] and n[1] in m: # do we know the base?
1141 1154 self.ui.debug(_("found incomplete branch %s:%s\n")
1142 1155 % (short(n[0]), short(n[1])))
1143 1156 search.append(n) # schedule branch range for scanning
1144 1157 seenbranch[n] = 1
1145 1158 else:
1146 1159 if n[1] not in seen and n[1] not in fetch:
1147 1160 if n[2] in m and n[3] in m:
1148 1161 self.ui.debug(_("found new changeset %s\n") %
1149 1162 short(n[1]))
1150 1163 fetch[n[1]] = 1 # earliest unknown
1151 1164 for p in n[2:4]:
1152 1165 if p in m:
1153 1166 base[p] = 1 # latest known
1154 1167
1155 1168 for p in n[2:4]:
1156 1169 if p not in req and p not in m:
1157 1170 r.append(p)
1158 1171 req[p] = 1
1159 1172 seen[n[0]] = 1
1160 1173
1161 1174 if r:
1162 1175 reqcnt += 1
1163 1176 self.ui.debug(_("request %d: %s\n") %
1164 1177 (reqcnt, " ".join(map(short, r))))
1165 1178 for p in xrange(0, len(r), 10):
1166 1179 for b in remote.branches(r[p:p+10]):
1167 1180 self.ui.debug(_("received %s:%s\n") %
1168 1181 (short(b[0]), short(b[1])))
1169 1182 unknown.append(b)
1170 1183
1171 1184 # do binary search on the branches we found
1172 1185 while search:
1173 1186 n = search.pop(0)
1174 1187 reqcnt += 1
1175 1188 l = remote.between([(n[0], n[1])])[0]
1176 1189 l.append(n[1])
1177 1190 p = n[0]
1178 1191 f = 1
1179 1192 for i in l:
1180 1193 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1181 1194 if i in m:
1182 1195 if f <= 2:
1183 1196 self.ui.debug(_("found new branch changeset %s\n") %
1184 1197 short(p))
1185 1198 fetch[p] = 1
1186 1199 base[i] = 1
1187 1200 else:
1188 1201 self.ui.debug(_("narrowed branch search to %s:%s\n")
1189 1202 % (short(p), short(i)))
1190 1203 search.append((p, i))
1191 1204 break
1192 1205 p, f = i, f * 2
1193 1206
1194 1207 # sanity check our fetch list
1195 1208 for f in fetch.keys():
1196 1209 if f in m:
1197 1210 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1198 1211
1199 1212 if base.keys() == [nullid]:
1200 1213 if force:
1201 1214 self.ui.warn(_("warning: repository is unrelated\n"))
1202 1215 else:
1203 1216 raise util.Abort(_("repository is unrelated"))
1204 1217
1205 1218 self.ui.debug(_("found new changesets starting at ") +
1206 1219 " ".join([short(f) for f in fetch]) + "\n")
1207 1220
1208 1221 self.ui.debug(_("%d total queries\n") % reqcnt)
1209 1222
1210 1223 return fetch.keys()
1211 1224
1212 1225 def findoutgoing(self, remote, base=None, heads=None, force=False):
1213 1226 """Return list of nodes that are roots of subsets not in remote
1214 1227
1215 1228 If base dict is specified, assume that these nodes and their parents
1216 1229 exist on the remote side.
1217 1230 If a list of heads is specified, return only nodes which are heads
1218 1231 or ancestors of these heads, and return a second element which
1219 1232 contains all remote heads which get new children.
1220 1233 """
1221 1234 if base == None:
1222 1235 base = {}
1223 1236 self.findincoming(remote, base, heads, force=force)
1224 1237
1225 1238 self.ui.debug(_("common changesets up to ")
1226 1239 + " ".join(map(short, base.keys())) + "\n")
1227 1240
1228 1241 remain = dict.fromkeys(self.changelog.nodemap)
1229 1242
1230 1243 # prune everything remote has from the tree
1231 1244 del remain[nullid]
1232 1245 remove = base.keys()
1233 1246 while remove:
1234 1247 n = remove.pop(0)
1235 1248 if n in remain:
1236 1249 del remain[n]
1237 1250 for p in self.changelog.parents(n):
1238 1251 remove.append(p)
1239 1252
1240 1253 # find every node whose parents have been pruned
1241 1254 subset = []
1242 1255 # find every remote head that will get new children
1243 1256 updated_heads = {}
1244 1257 for n in remain:
1245 1258 p1, p2 = self.changelog.parents(n)
1246 1259 if p1 not in remain and p2 not in remain:
1247 1260 subset.append(n)
1248 1261 if heads:
1249 1262 if p1 in heads:
1250 1263 updated_heads[p1] = True
1251 1264 if p2 in heads:
1252 1265 updated_heads[p2] = True
1253 1266
1254 1267 # this is the set of all roots we have to push
1255 1268 if heads:
1256 1269 return subset, updated_heads.keys()
1257 1270 else:
1258 1271 return subset
1259 1272
1260 1273 def pull(self, remote, heads=None, force=False, lock=None):
1261 1274 mylock = False
1262 1275 if not lock:
1263 1276 lock = self.lock()
1264 1277 mylock = True
1265 1278
1266 1279 try:
1267 1280 fetch = self.findincoming(remote, force=force)
1268 1281 if fetch == [nullid]:
1269 1282 self.ui.status(_("requesting all changes\n"))
1270 1283
1271 1284 if not fetch:
1272 1285 self.ui.status(_("no changes found\n"))
1273 1286 return 0
1274 1287
1275 1288 if heads is None:
1276 1289 cg = remote.changegroup(fetch, 'pull')
1277 1290 else:
1278 1291 if 'changegroupsubset' not in remote.capabilities:
1279 1292 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1280 1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1281 1294 return self.addchangegroup(cg, 'pull', remote.url())
1282 1295 finally:
1283 1296 if mylock:
1284 1297 lock.release()
1285 1298
1286 1299 def push(self, remote, force=False, revs=None):
1287 1300 # there are two ways to push to remote repo:
1288 1301 #
1289 1302 # addchangegroup assumes local user can lock remote
1290 1303 # repo (local filesystem, old ssh servers).
1291 1304 #
1292 1305 # unbundle assumes local user cannot lock remote repo (new ssh
1293 1306 # servers, http servers).
1294 1307
1295 1308 if remote.capable('unbundle'):
1296 1309 return self.push_unbundle(remote, force, revs)
1297 1310 return self.push_addchangegroup(remote, force, revs)
1298 1311
1299 1312 def prepush(self, remote, force, revs):
1300 1313 base = {}
1301 1314 remote_heads = remote.heads()
1302 1315 inc = self.findincoming(remote, base, remote_heads, force=force)
1303 1316
1304 1317 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1305 1318 if revs is not None:
1306 1319 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1307 1320 else:
1308 1321 bases, heads = update, self.changelog.heads()
1309 1322
1310 1323 if not bases:
1311 1324 self.ui.status(_("no changes found\n"))
1312 1325 return None, 1
1313 1326 elif not force:
1314 1327 # check if we're creating new remote heads
1315 1328 # to be a remote head after push, node must be either
1316 1329 # - unknown locally
1317 1330 # - a local outgoing head descended from update
1318 1331 # - a remote head that's known locally and not
1319 1332 # ancestral to an outgoing head
1320 1333
1321 1334 warn = 0
1322 1335
1323 1336 if remote_heads == [nullid]:
1324 1337 warn = 0
1325 1338 elif not revs and len(heads) > len(remote_heads):
1326 1339 warn = 1
1327 1340 else:
1328 1341 newheads = list(heads)
1329 1342 for r in remote_heads:
1330 1343 if r in self.changelog.nodemap:
1331 1344 desc = self.changelog.heads(r, heads)
1332 1345 l = [h for h in heads if h in desc]
1333 1346 if not l:
1334 1347 newheads.append(r)
1335 1348 else:
1336 1349 newheads.append(r)
1337 1350 if len(newheads) > len(remote_heads):
1338 1351 warn = 1
1339 1352
1340 1353 if warn:
1341 1354 self.ui.warn(_("abort: push creates new remote branches!\n"))
1342 1355 self.ui.status(_("(did you forget to merge?"
1343 1356 " use push -f to force)\n"))
1344 1357 return None, 1
1345 1358 elif inc:
1346 1359 self.ui.warn(_("note: unsynced remote changes!\n"))
1347 1360
1348 1361
1349 1362 if revs is None:
1350 1363 cg = self.changegroup(update, 'push')
1351 1364 else:
1352 1365 cg = self.changegroupsubset(update, revs, 'push')
1353 1366 return cg, remote_heads
1354 1367
1355 1368 def push_addchangegroup(self, remote, force, revs):
1356 1369 lock = remote.lock()
1357 1370
1358 1371 ret = self.prepush(remote, force, revs)
1359 1372 if ret[0] is not None:
1360 1373 cg, remote_heads = ret
1361 1374 return remote.addchangegroup(cg, 'push', self.url())
1362 1375 return ret[1]
1363 1376
1364 1377 def push_unbundle(self, remote, force, revs):
1365 1378 # local repo finds heads on server, finds out what revs it
1366 1379 # must push. once revs transferred, if server finds it has
1367 1380 # different heads (someone else won commit/push race), server
1368 1381 # aborts.
1369 1382
1370 1383 ret = self.prepush(remote, force, revs)
1371 1384 if ret[0] is not None:
1372 1385 cg, remote_heads = ret
1373 1386 if force: remote_heads = ['force']
1374 1387 return remote.unbundle(cg, remote_heads, 'push')
1375 1388 return ret[1]
1376 1389
1377 1390 def changegroupinfo(self, nodes):
1378 1391 self.ui.note(_("%d changesets found\n") % len(nodes))
1379 1392 if self.ui.debugflag:
1380 1393 self.ui.debug(_("List of changesets:\n"))
1381 1394 for node in nodes:
1382 1395 self.ui.debug("%s\n" % hex(node))
1383 1396
1384 1397 def changegroupsubset(self, bases, heads, source):
1385 1398 """This function generates a changegroup consisting of all the nodes
1386 1399 that are descendents of any of the bases, and ancestors of any of
1387 1400 the heads.
1388 1401
1389 1402 It is fairly complex as determining which filenodes and which
1390 1403 manifest nodes need to be included for the changeset to be complete
1391 1404 is non-trivial.
1392 1405
1393 1406 Another wrinkle is doing the reverse, figuring out which changeset in
1394 1407 the changegroup a particular filenode or manifestnode belongs to."""
1395 1408
1396 1409 self.hook('preoutgoing', throw=True, source=source)
1397 1410
1398 1411 # Set up some initial variables
1399 1412 # Make it easy to refer to self.changelog
1400 1413 cl = self.changelog
1401 1414 # msng is short for missing - compute the list of changesets in this
1402 1415 # changegroup.
1403 1416 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1404 1417 self.changegroupinfo(msng_cl_lst)
1405 1418 # Some bases may turn out to be superfluous, and some heads may be
1406 1419 # too. nodesbetween will return the minimal set of bases and heads
1407 1420 # necessary to re-create the changegroup.
1408 1421
1409 1422 # Known heads are the list of heads that it is assumed the recipient
1410 1423 # of this changegroup will know about.
1411 1424 knownheads = {}
1412 1425 # We assume that all parents of bases are known heads.
1413 1426 for n in bases:
1414 1427 for p in cl.parents(n):
1415 1428 if p != nullid:
1416 1429 knownheads[p] = 1
1417 1430 knownheads = knownheads.keys()
1418 1431 if knownheads:
1419 1432 # Now that we know what heads are known, we can compute which
1420 1433 # changesets are known. The recipient must know about all
1421 1434 # changesets required to reach the known heads from the null
1422 1435 # changeset.
1423 1436 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1424 1437 junk = None
1425 1438 # Transform the list into an ersatz set.
1426 1439 has_cl_set = dict.fromkeys(has_cl_set)
1427 1440 else:
1428 1441 # If there were no known heads, the recipient cannot be assumed to
1429 1442 # know about any changesets.
1430 1443 has_cl_set = {}
1431 1444
1432 1445 # Make it easy to refer to self.manifest
1433 1446 mnfst = self.manifest
1434 1447 # We don't know which manifests are missing yet
1435 1448 msng_mnfst_set = {}
1436 1449 # Nor do we know which filenodes are missing.
1437 1450 msng_filenode_set = {}
1438 1451
1439 1452 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1440 1453 junk = None
1441 1454
1442 1455 # A changeset always belongs to itself, so the changenode lookup
1443 1456 # function for a changenode is identity.
1444 1457 def identity(x):
1445 1458 return x
1446 1459
1447 1460 # A function generating function. Sets up an environment for the
1448 1461 # inner function.
1449 1462 def cmp_by_rev_func(revlog):
1450 1463 # Compare two nodes by their revision number in the environment's
1451 1464 # revision history. Since the revision number both represents the
1452 1465 # most efficient order to read the nodes in, and represents a
1453 1466 # topological sorting of the nodes, this function is often useful.
1454 1467 def cmp_by_rev(a, b):
1455 1468 return cmp(revlog.rev(a), revlog.rev(b))
1456 1469 return cmp_by_rev
1457 1470
1458 1471 # If we determine that a particular file or manifest node must be a
1459 1472 # node that the recipient of the changegroup will already have, we can
1460 1473 # also assume the recipient will have all the parents. This function
1461 1474 # prunes them from the set of missing nodes.
1462 1475 def prune_parents(revlog, hasset, msngset):
1463 1476 haslst = hasset.keys()
1464 1477 haslst.sort(cmp_by_rev_func(revlog))
1465 1478 for node in haslst:
1466 1479 parentlst = [p for p in revlog.parents(node) if p != nullid]
1467 1480 while parentlst:
1468 1481 n = parentlst.pop()
1469 1482 if n not in hasset:
1470 1483 hasset[n] = 1
1471 1484 p = [p for p in revlog.parents(n) if p != nullid]
1472 1485 parentlst.extend(p)
1473 1486 for n in hasset:
1474 1487 msngset.pop(n, None)
1475 1488
1476 1489 # This is a function generating function used to set up an environment
1477 1490 # for the inner function to execute in.
1478 1491 def manifest_and_file_collector(changedfileset):
1479 1492 # This is an information gathering function that gathers
1480 1493 # information from each changeset node that goes out as part of
1481 1494 # the changegroup. The information gathered is a list of which
1482 1495 # manifest nodes are potentially required (the recipient may
1483 1496 # already have them) and total list of all files which were
1484 1497 # changed in any changeset in the changegroup.
1485 1498 #
1486 1499 # We also remember the first changenode we saw any manifest
1487 1500 # referenced by so we can later determine which changenode 'owns'
1488 1501 # the manifest.
1489 1502 def collect_manifests_and_files(clnode):
1490 1503 c = cl.read(clnode)
1491 1504 for f in c[3]:
1492 1505 # This is to make sure we only have one instance of each
1493 1506 # filename string for each filename.
1494 1507 changedfileset.setdefault(f, f)
1495 1508 msng_mnfst_set.setdefault(c[0], clnode)
1496 1509 return collect_manifests_and_files
1497 1510
1498 1511 # Figure out which manifest nodes (of the ones we think might be part
1499 1512 # of the changegroup) the recipient must know about and remove them
1500 1513 # from the changegroup.
1501 1514 def prune_manifests():
1502 1515 has_mnfst_set = {}
1503 1516 for n in msng_mnfst_set:
1504 1517 # If a 'missing' manifest thinks it belongs to a changenode
1505 1518 # the recipient is assumed to have, obviously the recipient
1506 1519 # must have that manifest.
1507 1520 linknode = cl.node(mnfst.linkrev(n))
1508 1521 if linknode in has_cl_set:
1509 1522 has_mnfst_set[n] = 1
1510 1523 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1511 1524
1512 1525 # Use the information collected in collect_manifests_and_files to say
1513 1526 # which changenode any manifestnode belongs to.
1514 1527 def lookup_manifest_link(mnfstnode):
1515 1528 return msng_mnfst_set[mnfstnode]
1516 1529
1517 1530 # A function generating function that sets up the initial environment
1518 1531 # the inner function.
1519 1532 def filenode_collector(changedfiles):
1520 1533 next_rev = [0]
1521 1534 # This gathers information from each manifestnode included in the
1522 1535 # changegroup about which filenodes the manifest node references
1523 1536 # so we can include those in the changegroup too.
1524 1537 #
1525 1538 # It also remembers which changenode each filenode belongs to. It
1526 1539 # does this by assuming the a filenode belongs to the changenode
1527 1540 # the first manifest that references it belongs to.
1528 1541 def collect_msng_filenodes(mnfstnode):
1529 1542 r = mnfst.rev(mnfstnode)
1530 1543 if r == next_rev[0]:
1531 1544 # If the last rev we looked at was the one just previous,
1532 1545 # we only need to see a diff.
1533 1546 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1534 1547 # For each line in the delta
1535 1548 for dline in delta.splitlines():
1536 1549 # get the filename and filenode for that line
1537 1550 f, fnode = dline.split('\0')
1538 1551 fnode = bin(fnode[:40])
1539 1552 f = changedfiles.get(f, None)
1540 1553 # And if the file is in the list of files we care
1541 1554 # about.
1542 1555 if f is not None:
1543 1556 # Get the changenode this manifest belongs to
1544 1557 clnode = msng_mnfst_set[mnfstnode]
1545 1558 # Create the set of filenodes for the file if
1546 1559 # there isn't one already.
1547 1560 ndset = msng_filenode_set.setdefault(f, {})
1548 1561 # And set the filenode's changelog node to the
1549 1562 # manifest's if it hasn't been set already.
1550 1563 ndset.setdefault(fnode, clnode)
1551 1564 else:
1552 1565 # Otherwise we need a full manifest.
1553 1566 m = mnfst.read(mnfstnode)
1554 1567 # For every file in we care about.
1555 1568 for f in changedfiles:
1556 1569 fnode = m.get(f, None)
1557 1570 # If it's in the manifest
1558 1571 if fnode is not None:
1559 1572 # See comments above.
1560 1573 clnode = msng_mnfst_set[mnfstnode]
1561 1574 ndset = msng_filenode_set.setdefault(f, {})
1562 1575 ndset.setdefault(fnode, clnode)
1563 1576 # Remember the revision we hope to see next.
1564 1577 next_rev[0] = r + 1
1565 1578 return collect_msng_filenodes
1566 1579
1567 1580 # We have a list of filenodes we think we need for a file, lets remove
1568 1581 # all those we now the recipient must have.
1569 1582 def prune_filenodes(f, filerevlog):
1570 1583 msngset = msng_filenode_set[f]
1571 1584 hasset = {}
1572 1585 # If a 'missing' filenode thinks it belongs to a changenode we
1573 1586 # assume the recipient must have, then the recipient must have
1574 1587 # that filenode.
1575 1588 for n in msngset:
1576 1589 clnode = cl.node(filerevlog.linkrev(n))
1577 1590 if clnode in has_cl_set:
1578 1591 hasset[n] = 1
1579 1592 prune_parents(filerevlog, hasset, msngset)
1580 1593
1581 1594 # A function generator function that sets up the a context for the
1582 1595 # inner function.
1583 1596 def lookup_filenode_link_func(fname):
1584 1597 msngset = msng_filenode_set[fname]
1585 1598 # Lookup the changenode the filenode belongs to.
1586 1599 def lookup_filenode_link(fnode):
1587 1600 return msngset[fnode]
1588 1601 return lookup_filenode_link
1589 1602
1590 1603 # Now that we have all theses utility functions to help out and
1591 1604 # logically divide up the task, generate the group.
1592 1605 def gengroup():
1593 1606 # The set of changed files starts empty.
1594 1607 changedfiles = {}
1595 1608 # Create a changenode group generator that will call our functions
1596 1609 # back to lookup the owning changenode and collect information.
1597 1610 group = cl.group(msng_cl_lst, identity,
1598 1611 manifest_and_file_collector(changedfiles))
1599 1612 for chnk in group:
1600 1613 yield chnk
1601 1614
1602 1615 # The list of manifests has been collected by the generator
1603 1616 # calling our functions back.
1604 1617 prune_manifests()
1605 1618 msng_mnfst_lst = msng_mnfst_set.keys()
1606 1619 # Sort the manifestnodes by revision number.
1607 1620 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1608 1621 # Create a generator for the manifestnodes that calls our lookup
1609 1622 # and data collection functions back.
1610 1623 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1611 1624 filenode_collector(changedfiles))
1612 1625 for chnk in group:
1613 1626 yield chnk
1614 1627
1615 1628 # These are no longer needed, dereference and toss the memory for
1616 1629 # them.
1617 1630 msng_mnfst_lst = None
1618 1631 msng_mnfst_set.clear()
1619 1632
1620 1633 changedfiles = changedfiles.keys()
1621 1634 changedfiles.sort()
1622 1635 # Go through all our files in order sorted by name.
1623 1636 for fname in changedfiles:
1624 1637 filerevlog = self.file(fname)
1625 1638 # Toss out the filenodes that the recipient isn't really
1626 1639 # missing.
1627 1640 if msng_filenode_set.has_key(fname):
1628 1641 prune_filenodes(fname, filerevlog)
1629 1642 msng_filenode_lst = msng_filenode_set[fname].keys()
1630 1643 else:
1631 1644 msng_filenode_lst = []
1632 1645 # If any filenodes are left, generate the group for them,
1633 1646 # otherwise don't bother.
1634 1647 if len(msng_filenode_lst) > 0:
1635 1648 yield changegroup.genchunk(fname)
1636 1649 # Sort the filenodes by their revision #
1637 1650 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1638 1651 # Create a group generator and only pass in a changenode
1639 1652 # lookup function as we need to collect no information
1640 1653 # from filenodes.
1641 1654 group = filerevlog.group(msng_filenode_lst,
1642 1655 lookup_filenode_link_func(fname))
1643 1656 for chnk in group:
1644 1657 yield chnk
1645 1658 if msng_filenode_set.has_key(fname):
1646 1659 # Don't need this anymore, toss it to free memory.
1647 1660 del msng_filenode_set[fname]
1648 1661 # Signal that no more groups are left.
1649 1662 yield changegroup.closechunk()
1650 1663
1651 1664 if msng_cl_lst:
1652 1665 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1653 1666
1654 1667 return util.chunkbuffer(gengroup())
1655 1668
1656 1669 def changegroup(self, basenodes, source):
1657 1670 """Generate a changegroup of all nodes that we have that a recipient
1658 1671 doesn't.
1659 1672
1660 1673 This is much easier than the previous function as we can assume that
1661 1674 the recipient has any changenode we aren't sending them."""
1662 1675
1663 1676 self.hook('preoutgoing', throw=True, source=source)
1664 1677
1665 1678 cl = self.changelog
1666 1679 nodes = cl.nodesbetween(basenodes, None)[0]
1667 1680 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1668 1681 self.changegroupinfo(nodes)
1669 1682
1670 1683 def identity(x):
1671 1684 return x
1672 1685
1673 1686 def gennodelst(revlog):
1674 1687 for r in xrange(0, revlog.count()):
1675 1688 n = revlog.node(r)
1676 1689 if revlog.linkrev(n) in revset:
1677 1690 yield n
1678 1691
1679 1692 def changed_file_collector(changedfileset):
1680 1693 def collect_changed_files(clnode):
1681 1694 c = cl.read(clnode)
1682 1695 for fname in c[3]:
1683 1696 changedfileset[fname] = 1
1684 1697 return collect_changed_files
1685 1698
1686 1699 def lookuprevlink_func(revlog):
1687 1700 def lookuprevlink(n):
1688 1701 return cl.node(revlog.linkrev(n))
1689 1702 return lookuprevlink
1690 1703
1691 1704 def gengroup():
1692 1705 # construct a list of all changed files
1693 1706 changedfiles = {}
1694 1707
1695 1708 for chnk in cl.group(nodes, identity,
1696 1709 changed_file_collector(changedfiles)):
1697 1710 yield chnk
1698 1711 changedfiles = changedfiles.keys()
1699 1712 changedfiles.sort()
1700 1713
1701 1714 mnfst = self.manifest
1702 1715 nodeiter = gennodelst(mnfst)
1703 1716 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1704 1717 yield chnk
1705 1718
1706 1719 for fname in changedfiles:
1707 1720 filerevlog = self.file(fname)
1708 1721 nodeiter = gennodelst(filerevlog)
1709 1722 nodeiter = list(nodeiter)
1710 1723 if nodeiter:
1711 1724 yield changegroup.genchunk(fname)
1712 1725 lookup = lookuprevlink_func(filerevlog)
1713 1726 for chnk in filerevlog.group(nodeiter, lookup):
1714 1727 yield chnk
1715 1728
1716 1729 yield changegroup.closechunk()
1717 1730
1718 1731 if nodes:
1719 1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1720 1733
1721 1734 return util.chunkbuffer(gengroup())
1722 1735
1723 1736 def addchangegroup(self, source, srctype, url):
1724 1737 """add changegroup to repo.
1725 1738
1726 1739 return values:
1727 1740 - nothing changed or no source: 0
1728 1741 - more heads than before: 1+added heads (2..n)
1729 1742 - less heads than before: -1-removed heads (-2..-n)
1730 1743 - number of heads stays the same: 1
1731 1744 """
1732 1745 def csmap(x):
1733 1746 self.ui.debug(_("add changeset %s\n") % short(x))
1734 1747 return cl.count()
1735 1748
1736 1749 def revmap(x):
1737 1750 return cl.rev(x)
1738 1751
1739 1752 if not source:
1740 1753 return 0
1741 1754
1742 1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1743 1756
1744 1757 changesets = files = revisions = 0
1745 1758
1746 1759 tr = self.transaction()
1747 1760
1748 1761 # write changelog data to temp files so concurrent readers will not see
1749 1762 # inconsistent view
1750 1763 cl = None
1751 1764 try:
1752 1765 cl = appendfile.appendchangelog(self.sopener,
1753 1766 self.changelog.version)
1754 1767
1755 1768 oldheads = len(cl.heads())
1756 1769
1757 1770 # pull off the changeset group
1758 1771 self.ui.status(_("adding changesets\n"))
1759 1772 cor = cl.count() - 1
1760 1773 chunkiter = changegroup.chunkiter(source)
1761 1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1762 1775 raise util.Abort(_("received changelog group is empty"))
1763 1776 cnr = cl.count() - 1
1764 1777 changesets = cnr - cor
1765 1778
1766 1779 # pull off the manifest group
1767 1780 self.ui.status(_("adding manifests\n"))
1768 1781 chunkiter = changegroup.chunkiter(source)
1769 1782 # no need to check for empty manifest group here:
1770 1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1771 1784 # no new manifest will be created and the manifest group will
1772 1785 # be empty during the pull
1773 1786 self.manifest.addgroup(chunkiter, revmap, tr)
1774 1787
1775 1788 # process the files
1776 1789 self.ui.status(_("adding file changes\n"))
1777 1790 while 1:
1778 1791 f = changegroup.getchunk(source)
1779 1792 if not f:
1780 1793 break
1781 1794 self.ui.debug(_("adding %s revisions\n") % f)
1782 1795 fl = self.file(f)
1783 1796 o = fl.count()
1784 1797 chunkiter = changegroup.chunkiter(source)
1785 1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1786 1799 raise util.Abort(_("received file revlog group is empty"))
1787 1800 revisions += fl.count() - o
1788 1801 files += 1
1789 1802
1790 1803 cl.writedata()
1791 1804 finally:
1792 1805 if cl:
1793 1806 cl.cleanup()
1794 1807
1795 1808 # make changelog see real files again
1796 1809 self.changelog = changelog.changelog(self.sopener,
1797 1810 self.changelog.version)
1798 1811 self.changelog.checkinlinesize(tr)
1799 1812
1800 1813 newheads = len(self.changelog.heads())
1801 1814 heads = ""
1802 1815 if oldheads and newheads != oldheads:
1803 1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1804 1817
1805 1818 self.ui.status(_("added %d changesets"
1806 1819 " with %d changes to %d files%s\n")
1807 1820 % (changesets, revisions, files, heads))
1808 1821
1809 1822 if changesets > 0:
1810 1823 self.hook('pretxnchangegroup', throw=True,
1811 1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1812 1825 url=url)
1813 1826
1814 1827 tr.close()
1815 1828
1816 1829 if changesets > 0:
1817 1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1818 1831 source=srctype, url=url)
1819 1832
1820 1833 for i in xrange(cor + 1, cnr + 1):
1821 1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1822 1835 source=srctype, url=url)
1823 1836
1824 1837 # never return 0 here:
1825 1838 if newheads < oldheads:
1826 1839 return newheads - oldheads - 1
1827 1840 else:
1828 1841 return newheads - oldheads + 1
1829 1842
1830 1843
1831 1844 def stream_in(self, remote):
1832 1845 fp = remote.stream_out()
1833 1846 l = fp.readline()
1834 1847 try:
1835 1848 resp = int(l)
1836 1849 except ValueError:
1837 1850 raise util.UnexpectedOutput(
1838 1851 _('Unexpected response from remote server:'), l)
1839 1852 if resp == 1:
1840 1853 raise util.Abort(_('operation forbidden by server'))
1841 1854 elif resp == 2:
1842 1855 raise util.Abort(_('locking the remote repository failed'))
1843 1856 elif resp != 0:
1844 1857 raise util.Abort(_('the server sent an unknown error code'))
1845 1858 self.ui.status(_('streaming all changes\n'))
1846 1859 l = fp.readline()
1847 1860 try:
1848 1861 total_files, total_bytes = map(int, l.split(' ', 1))
1849 1862 except ValueError, TypeError:
1850 1863 raise util.UnexpectedOutput(
1851 1864 _('Unexpected response from remote server:'), l)
1852 1865 self.ui.status(_('%d files to transfer, %s of data\n') %
1853 1866 (total_files, util.bytecount(total_bytes)))
1854 1867 start = time.time()
1855 1868 for i in xrange(total_files):
1856 1869 # XXX doesn't support '\n' or '\r' in filenames
1857 1870 l = fp.readline()
1858 1871 try:
1859 1872 name, size = l.split('\0', 1)
1860 1873 size = int(size)
1861 1874 except ValueError, TypeError:
1862 1875 raise util.UnexpectedOutput(
1863 1876 _('Unexpected response from remote server:'), l)
1864 1877 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1865 1878 ofp = self.sopener(name, 'w')
1866 1879 for chunk in util.filechunkiter(fp, limit=size):
1867 1880 ofp.write(chunk)
1868 1881 ofp.close()
1869 1882 elapsed = time.time() - start
1870 1883 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1871 1884 (util.bytecount(total_bytes), elapsed,
1872 1885 util.bytecount(total_bytes / elapsed)))
1873 1886 self.reload()
1874 1887 return len(self.heads()) + 1
1875 1888
1876 1889 def clone(self, remote, heads=[], stream=False):
1877 1890 '''clone remote repository.
1878 1891
1879 1892 keyword arguments:
1880 1893 heads: list of revs to clone (forces use of pull)
1881 1894 stream: use streaming clone if possible'''
1882 1895
1883 1896 # now, all clients that can request uncompressed clones can
1884 1897 # read repo formats supported by all servers that can serve
1885 1898 # them.
1886 1899
1887 1900 # if revlog format changes, client will have to check version
1888 1901 # and format flags on "stream" capability, and use
1889 1902 # uncompressed only if compatible.
1890 1903
1891 1904 if stream and not heads and remote.capable('stream'):
1892 1905 return self.stream_in(remote)
1893 1906 return self.pull(remote, heads)
1894 1907
1895 1908 # used to avoid circular references so destructors work
1896 1909 def aftertrans(files):
1897 1910 renamefiles = [tuple(t) for t in files]
1898 1911 def a():
1899 1912 for src, dest in renamefiles:
1900 1913 util.rename(src, dest)
1901 1914 return a
1902 1915
1903 1916 def instance(ui, path, create):
1904 1917 return localrepository(ui, util.drop_scheme('file', path), create)
1905 1918
1906 1919 def islocal(path):
1907 1920 return True
General Comments 0
You need to be logged in to leave comments. Login now