##// END OF EJS Templates
small fixes for the parent patch...
Alexis S. L. Carvalho -
r4166:c0271aba default
parent child Browse files
Show More
@@ -1,1933 +1,1932
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.path = os.path.join(path, ".hg")
34 34 self.root = os.path.realpath(path)
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 if parentui.config('format', 'usestore', 1):
44 requirements = ["revlogv1"]
45 if parentui.configbool('format', 'usestore', True):
45 46 os.mkdir(os.path.join(self.path, "store"))
46 requirements = ("revlogv1", "store")
47 else:
48 requirements = ("revlogv1")
47 requirements.append("store")
48 # create an invalid changelog
49 self.opener("00changelog.i", "a").write(
50 '\0\0\0\2' # represents revlogv2
51 ' dummy changelog to prevent using the old repo layout'
52 )
49 53 reqfile = self.opener("requires", "w")
50 54 for r in requirements:
51 55 reqfile.write("%s\n" % r)
52 56 reqfile.close()
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
57 )
58 57 else:
59 58 raise repo.RepoError(_("repository %s not found") % path)
60 59 elif create:
61 60 raise repo.RepoError(_("repository %s already exists") % path)
62 61 else:
63 62 # find requirements
64 63 try:
65 64 requirements = self.opener("requires").read().splitlines()
66 65 except IOError, inst:
67 66 if inst.errno != errno.ENOENT:
68 67 raise
69 68 requirements = []
70 69 # check them
71 70 for r in requirements:
72 71 if r not in self.supported:
73 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
74 73
75 74 # setup store
76 75 if "store" in requirements:
77 76 self.encodefn = util.encodefilename
78 77 self.decodefn = util.decodefilename
79 78 self.spath = os.path.join(self.path, "store")
80 79 else:
81 80 self.encodefn = lambda x: x
82 81 self.decodefn = lambda x: x
83 82 self.spath = self.path
84 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
85 84
86 85 self.ui = ui.ui(parentui=parentui)
87 86 try:
88 87 self.ui.readconfig(self.join("hgrc"), self.root)
89 88 except IOError:
90 89 pass
91 90
92 91 v = self.ui.configrevlog()
93 92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
94 93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
95 94 fl = v.get('flags', None)
96 95 flags = 0
97 96 if fl != None:
98 97 for x in fl.split():
99 98 flags |= revlog.flagstr(x)
100 99 elif self.revlogv1:
101 100 flags = revlog.REVLOG_DEFAULT_FLAGS
102 101
103 102 v = self.revlogversion | flags
104 103 self.manifest = manifest.manifest(self.sopener, v)
105 104 self.changelog = changelog.changelog(self.sopener, v)
106 105
107 106 fallback = self.ui.config('ui', 'fallbackencoding')
108 107 if fallback:
109 108 util._fallbackencoding = fallback
110 109
111 110 # the changelog might not have the inline index flag
112 111 # on. If the format of the changelog is the same as found in
113 112 # .hgrc, apply any flags found in the .hgrc as well.
114 113 # Otherwise, just version from the changelog
115 114 v = self.changelog.version
116 115 if v == self.revlogversion:
117 116 v |= flags
118 117 self.revlogversion = v
119 118
120 119 self.tagscache = None
121 120 self.branchcache = None
122 121 self.nodetagscache = None
123 122 self.filterpats = {}
124 123 self.transhandle = None
125 124
126 125 self._link = lambda x: False
127 126 if util.checklink(self.root):
128 127 r = self.root # avoid circular reference in lambda
129 128 self._link = lambda x: util.is_link(os.path.join(r, x))
130 129
131 130 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
132 131
133 132 def url(self):
134 133 return 'file:' + self.root
135 134
136 135 def hook(self, name, throw=False, **args):
137 136 def callhook(hname, funcname):
138 137 '''call python hook. hook is callable object, looked up as
139 138 name in python module. if callable returns "true", hook
140 139 fails, else passes. if hook raises exception, treated as
141 140 hook failure. exception propagates if throw is "true".
142 141
143 142 reason for "true" meaning "hook failed" is so that
144 143 unmodified commands (e.g. mercurial.commands.update) can
145 144 be run as hooks without wrappers to convert return values.'''
146 145
147 146 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
148 147 obj = funcname
149 148 if not callable(obj):
150 149 d = funcname.rfind('.')
151 150 if d == -1:
152 151 raise util.Abort(_('%s hook is invalid ("%s" not in '
153 152 'a module)') % (hname, funcname))
154 153 modname = funcname[:d]
155 154 try:
156 155 obj = __import__(modname)
157 156 except ImportError:
158 157 try:
159 158 # extensions are loaded with hgext_ prefix
160 159 obj = __import__("hgext_%s" % modname)
161 160 except ImportError:
162 161 raise util.Abort(_('%s hook is invalid '
163 162 '(import of "%s" failed)') %
164 163 (hname, modname))
165 164 try:
166 165 for p in funcname.split('.')[1:]:
167 166 obj = getattr(obj, p)
168 167 except AttributeError, err:
169 168 raise util.Abort(_('%s hook is invalid '
170 169 '("%s" is not defined)') %
171 170 (hname, funcname))
172 171 if not callable(obj):
173 172 raise util.Abort(_('%s hook is invalid '
174 173 '("%s" is not callable)') %
175 174 (hname, funcname))
176 175 try:
177 176 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
178 177 except (KeyboardInterrupt, util.SignalInterrupt):
179 178 raise
180 179 except Exception, exc:
181 180 if isinstance(exc, util.Abort):
182 181 self.ui.warn(_('error: %s hook failed: %s\n') %
183 182 (hname, exc.args[0]))
184 183 else:
185 184 self.ui.warn(_('error: %s hook raised an exception: '
186 185 '%s\n') % (hname, exc))
187 186 if throw:
188 187 raise
189 188 self.ui.print_exc()
190 189 return True
191 190 if r:
192 191 if throw:
193 192 raise util.Abort(_('%s hook failed') % hname)
194 193 self.ui.warn(_('warning: %s hook failed\n') % hname)
195 194 return r
196 195
197 196 def runhook(name, cmd):
198 197 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
199 198 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
200 199 r = util.system(cmd, environ=env, cwd=self.root)
201 200 if r:
202 201 desc, r = util.explain_exit(r)
203 202 if throw:
204 203 raise util.Abort(_('%s hook %s') % (name, desc))
205 204 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
206 205 return r
207 206
208 207 r = False
209 208 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
210 209 if hname.split(".", 1)[0] == name and cmd]
211 210 hooks.sort()
212 211 for hname, cmd in hooks:
213 212 if callable(cmd):
214 213 r = callhook(hname, cmd) or r
215 214 elif cmd.startswith('python:'):
216 215 r = callhook(hname, cmd[7:].strip()) or r
217 216 else:
218 217 r = runhook(hname, cmd) or r
219 218 return r
220 219
221 220 tag_disallowed = ':\r\n'
222 221
223 222 def _tag(self, name, node, message, local, user, date, parent=None):
224 223 use_dirstate = parent is None
225 224
226 225 for c in self.tag_disallowed:
227 226 if c in name:
228 227 raise util.Abort(_('%r cannot be used in a tag name') % c)
229 228
230 229 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
231 230
232 231 if local:
233 232 # local tags are stored in the current charset
234 233 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
235 234 self.hook('tag', node=hex(node), tag=name, local=local)
236 235 return
237 236
238 237 # committed tags are stored in UTF-8
239 238 line = '%s %s\n' % (hex(node), util.fromlocal(name))
240 239 if use_dirstate:
241 240 self.wfile('.hgtags', 'ab').write(line)
242 241 else:
243 242 ntags = self.filectx('.hgtags', parent).data()
244 243 self.wfile('.hgtags', 'ab').write(ntags + line)
245 244 if use_dirstate and self.dirstate.state('.hgtags') == '?':
246 245 self.add(['.hgtags'])
247 246
248 247 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
249 248
250 249 self.hook('tag', node=hex(node), tag=name, local=local)
251 250
252 251 return tagnode
253 252
254 253 def tag(self, name, node, message, local, user, date):
255 254 '''tag a revision with a symbolic name.
256 255
257 256 if local is True, the tag is stored in a per-repository file.
258 257 otherwise, it is stored in the .hgtags file, and a new
259 258 changeset is committed with the change.
260 259
261 260 keyword arguments:
262 261
263 262 local: whether to store tag in non-version-controlled file
264 263 (default False)
265 264
266 265 message: commit message to use if committing
267 266
268 267 user: name of user to use if committing
269 268
270 269 date: date tuple to use if committing'''
271 270
272 271 for x in self.status()[:5]:
273 272 if '.hgtags' in x:
274 273 raise util.Abort(_('working copy of .hgtags is changed '
275 274 '(please commit .hgtags manually)'))
276 275
277 276
278 277 self._tag(name, node, message, local, user, date)
279 278
280 279 def tags(self):
281 280 '''return a mapping of tag to node'''
282 281 if not self.tagscache:
283 282 self.tagscache = {}
284 283
285 284 def parsetag(line, context):
286 285 if not line:
287 286 return
288 287 s = l.split(" ", 1)
289 288 if len(s) != 2:
290 289 self.ui.warn(_("%s: cannot parse entry\n") % context)
291 290 return
292 291 node, key = s
293 292 key = util.tolocal(key.strip()) # stored in UTF-8
294 293 try:
295 294 bin_n = bin(node)
296 295 except TypeError:
297 296 self.ui.warn(_("%s: node '%s' is not well formed\n") %
298 297 (context, node))
299 298 return
300 299 if bin_n not in self.changelog.nodemap:
301 300 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
302 301 (context, key))
303 302 return
304 303 self.tagscache[key] = bin_n
305 304
306 305 # read the tags file from each head, ending with the tip,
307 306 # and add each tag found to the map, with "newer" ones
308 307 # taking precedence
309 308 f = None
310 309 for rev, node, fnode in self._hgtagsnodes():
311 310 f = (f and f.filectx(fnode) or
312 311 self.filectx('.hgtags', fileid=fnode))
313 312 count = 0
314 313 for l in f.data().splitlines():
315 314 count += 1
316 315 parsetag(l, _("%s, line %d") % (str(f), count))
317 316
318 317 try:
319 318 f = self.opener("localtags")
320 319 count = 0
321 320 for l in f:
322 321 # localtags are stored in the local character set
323 322 # while the internal tag table is stored in UTF-8
324 323 l = util.fromlocal(l)
325 324 count += 1
326 325 parsetag(l, _("localtags, line %d") % count)
327 326 except IOError:
328 327 pass
329 328
330 329 self.tagscache['tip'] = self.changelog.tip()
331 330
332 331 return self.tagscache
333 332
334 333 def _hgtagsnodes(self):
335 334 heads = self.heads()
336 335 heads.reverse()
337 336 last = {}
338 337 ret = []
339 338 for node in heads:
340 339 c = self.changectx(node)
341 340 rev = c.rev()
342 341 try:
343 342 fnode = c.filenode('.hgtags')
344 343 except revlog.LookupError:
345 344 continue
346 345 ret.append((rev, node, fnode))
347 346 if fnode in last:
348 347 ret[last[fnode]] = None
349 348 last[fnode] = len(ret) - 1
350 349 return [item for item in ret if item]
351 350
352 351 def tagslist(self):
353 352 '''return a list of tags ordered by revision'''
354 353 l = []
355 354 for t, n in self.tags().items():
356 355 try:
357 356 r = self.changelog.rev(n)
358 357 except:
359 358 r = -2 # sort to the beginning of the list if unknown
360 359 l.append((r, t, n))
361 360 l.sort()
362 361 return [(t, n) for r, t, n in l]
363 362
364 363 def nodetags(self, node):
365 364 '''return the tags associated with a node'''
366 365 if not self.nodetagscache:
367 366 self.nodetagscache = {}
368 367 for t, n in self.tags().items():
369 368 self.nodetagscache.setdefault(n, []).append(t)
370 369 return self.nodetagscache.get(node, [])
371 370
372 371 def _branchtags(self):
373 372 partial, last, lrev = self._readbranchcache()
374 373
375 374 tiprev = self.changelog.count() - 1
376 375 if lrev != tiprev:
377 376 self._updatebranchcache(partial, lrev+1, tiprev+1)
378 377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
379 378
380 379 return partial
381 380
382 381 def branchtags(self):
383 382 if self.branchcache is not None:
384 383 return self.branchcache
385 384
386 385 self.branchcache = {} # avoid recursion in changectx
387 386 partial = self._branchtags()
388 387
389 388 # the branch cache is stored on disk as UTF-8, but in the local
390 389 # charset internally
391 390 for k, v in partial.items():
392 391 self.branchcache[util.tolocal(k)] = v
393 392 return self.branchcache
394 393
395 394 def _readbranchcache(self):
396 395 partial = {}
397 396 try:
398 397 f = self.opener("branches.cache")
399 398 lines = f.read().split('\n')
400 399 f.close()
401 400 last, lrev = lines.pop(0).rstrip().split(" ", 1)
402 401 last, lrev = bin(last), int(lrev)
403 402 if not (lrev < self.changelog.count() and
404 403 self.changelog.node(lrev) == last): # sanity check
405 404 # invalidate the cache
406 405 raise ValueError('Invalid branch cache: unknown tip')
407 406 for l in lines:
408 407 if not l: continue
409 408 node, label = l.rstrip().split(" ", 1)
410 409 partial[label] = bin(node)
411 410 except (KeyboardInterrupt, util.SignalInterrupt):
412 411 raise
413 412 except Exception, inst:
414 413 if self.ui.debugflag:
415 414 self.ui.warn(str(inst), '\n')
416 415 partial, last, lrev = {}, nullid, nullrev
417 416 return partial, last, lrev
418 417
419 418 def _writebranchcache(self, branches, tip, tiprev):
420 419 try:
421 420 f = self.opener("branches.cache", "w")
422 421 f.write("%s %s\n" % (hex(tip), tiprev))
423 422 for label, node in branches.iteritems():
424 423 f.write("%s %s\n" % (hex(node), label))
425 424 except IOError:
426 425 pass
427 426
428 427 def _updatebranchcache(self, partial, start, end):
429 428 for r in xrange(start, end):
430 429 c = self.changectx(r)
431 430 b = c.branch()
432 431 if b:
433 432 partial[b] = c.node()
434 433
435 434 def lookup(self, key):
436 435 if key == '.':
437 436 key = self.dirstate.parents()[0]
438 437 if key == nullid:
439 438 raise repo.RepoError(_("no revision checked out"))
440 439 elif key == 'null':
441 440 return nullid
442 441 n = self.changelog._match(key)
443 442 if n:
444 443 return n
445 444 if key in self.tags():
446 445 return self.tags()[key]
447 446 if key in self.branchtags():
448 447 return self.branchtags()[key]
449 448 n = self.changelog._partialmatch(key)
450 449 if n:
451 450 return n
452 451 raise repo.RepoError(_("unknown revision '%s'") % key)
453 452
454 453 def dev(self):
455 454 return os.lstat(self.path).st_dev
456 455
457 456 def local(self):
458 457 return True
459 458
460 459 def join(self, f):
461 460 return os.path.join(self.path, f)
462 461
463 462 def sjoin(self, f):
464 463 f = self.encodefn(f)
465 464 return os.path.join(self.spath, f)
466 465
467 466 def wjoin(self, f):
468 467 return os.path.join(self.root, f)
469 468
470 469 def file(self, f):
471 470 if f[0] == '/':
472 471 f = f[1:]
473 472 return filelog.filelog(self.sopener, f, self.revlogversion)
474 473
475 474 def changectx(self, changeid=None):
476 475 return context.changectx(self, changeid)
477 476
478 477 def workingctx(self):
479 478 return context.workingctx(self)
480 479
481 480 def parents(self, changeid=None):
482 481 '''
483 482 get list of changectxs for parents of changeid or working directory
484 483 '''
485 484 if changeid is None:
486 485 pl = self.dirstate.parents()
487 486 else:
488 487 n = self.changelog.lookup(changeid)
489 488 pl = self.changelog.parents(n)
490 489 if pl[1] == nullid:
491 490 return [self.changectx(pl[0])]
492 491 return [self.changectx(pl[0]), self.changectx(pl[1])]
493 492
494 493 def filectx(self, path, changeid=None, fileid=None):
495 494 """changeid can be a changeset revision, node, or tag.
496 495 fileid can be a file revision or node."""
497 496 return context.filectx(self, path, changeid, fileid)
498 497
499 498 def getcwd(self):
500 499 return self.dirstate.getcwd()
501 500
502 501 def wfile(self, f, mode='r'):
503 502 return self.wopener(f, mode)
504 503
505 504 def _filter(self, filter, filename, data):
506 505 if filter not in self.filterpats:
507 506 l = []
508 507 for pat, cmd in self.ui.configitems(filter):
509 508 mf = util.matcher(self.root, "", [pat], [], [])[1]
510 509 l.append((mf, cmd))
511 510 self.filterpats[filter] = l
512 511
513 512 for mf, cmd in self.filterpats[filter]:
514 513 if mf(filename):
515 514 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
516 515 data = util.filter(data, cmd)
517 516 break
518 517
519 518 return data
520 519
521 520 def wread(self, filename):
522 521 if self._link(filename):
523 522 data = os.readlink(self.wjoin(filename))
524 523 else:
525 524 data = self.wopener(filename, 'r').read()
526 525 return self._filter("encode", filename, data)
527 526
528 527 def wwrite(self, filename, data, flags):
529 528 data = self._filter("decode", filename, data)
530 529 if "l" in flags:
531 530 f = self.wjoin(filename)
532 531 try:
533 532 os.unlink(f)
534 533 except OSError:
535 534 pass
536 535 d = os.path.dirname(f)
537 536 if not os.path.exists(d):
538 537 os.makedirs(d)
539 538 os.symlink(data, f)
540 539 else:
541 540 try:
542 541 if self._link(filename):
543 542 os.unlink(self.wjoin(filename))
544 543 except OSError:
545 544 pass
546 545 self.wopener(filename, 'w').write(data)
547 546 util.set_exec(self.wjoin(filename), "x" in flags)
548 547
549 548 def wwritedata(self, filename, data):
550 549 return self._filter("decode", filename, data)
551 550
552 551 def transaction(self):
553 552 tr = self.transhandle
554 553 if tr != None and tr.running():
555 554 return tr.nest()
556 555
557 556 # save dirstate for rollback
558 557 try:
559 558 ds = self.opener("dirstate").read()
560 559 except IOError:
561 560 ds = ""
562 561 self.opener("journal.dirstate", "w").write(ds)
563 562
564 563 renames = [(self.sjoin("journal"), self.sjoin("undo")),
565 564 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
566 565 tr = transaction.transaction(self.ui.warn, self.sopener,
567 566 self.sjoin("journal"),
568 567 aftertrans(renames))
569 568 self.transhandle = tr
570 569 return tr
571 570
572 571 def recover(self):
573 572 l = self.lock()
574 573 if os.path.exists(self.sjoin("journal")):
575 574 self.ui.status(_("rolling back interrupted transaction\n"))
576 575 transaction.rollback(self.sopener, self.sjoin("journal"))
577 576 self.reload()
578 577 return True
579 578 else:
580 579 self.ui.warn(_("no interrupted transaction available\n"))
581 580 return False
582 581
583 582 def rollback(self, wlock=None):
584 583 if not wlock:
585 584 wlock = self.wlock()
586 585 l = self.lock()
587 586 if os.path.exists(self.sjoin("undo")):
588 587 self.ui.status(_("rolling back last transaction\n"))
589 588 transaction.rollback(self.sopener, self.sjoin("undo"))
590 589 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
591 590 self.reload()
592 591 self.wreload()
593 592 else:
594 593 self.ui.warn(_("no rollback information available\n"))
595 594
596 595 def wreload(self):
597 596 self.dirstate.read()
598 597
599 598 def reload(self):
600 599 self.changelog.load()
601 600 self.manifest.load()
602 601 self.tagscache = None
603 602 self.nodetagscache = None
604 603
605 604 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
606 605 desc=None):
607 606 try:
608 607 l = lock.lock(lockname, 0, releasefn, desc=desc)
609 608 except lock.LockHeld, inst:
610 609 if not wait:
611 610 raise
612 611 self.ui.warn(_("waiting for lock on %s held by %r\n") %
613 612 (desc, inst.locker))
614 613 # default to 600 seconds timeout
615 614 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
616 615 releasefn, desc=desc)
617 616 if acquirefn:
618 617 acquirefn()
619 618 return l
620 619
621 620 def lock(self, wait=1):
622 621 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
623 622 desc=_('repository %s') % self.origroot)
624 623
625 624 def wlock(self, wait=1):
626 625 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
627 626 self.wreload,
628 627 desc=_('working directory of %s') % self.origroot)
629 628
630 629 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
631 630 """
632 631 commit an individual file as part of a larger transaction
633 632 """
634 633
635 634 t = self.wread(fn)
636 635 fl = self.file(fn)
637 636 fp1 = manifest1.get(fn, nullid)
638 637 fp2 = manifest2.get(fn, nullid)
639 638
640 639 meta = {}
641 640 cp = self.dirstate.copied(fn)
642 641 if cp:
643 642 # Mark the new revision of this file as a copy of another
644 643 # file. This copy data will effectively act as a parent
645 644 # of this new revision. If this is a merge, the first
646 645 # parent will be the nullid (meaning "look up the copy data")
647 646 # and the second one will be the other parent. For example:
648 647 #
649 648 # 0 --- 1 --- 3 rev1 changes file foo
650 649 # \ / rev2 renames foo to bar and changes it
651 650 # \- 2 -/ rev3 should have bar with all changes and
652 651 # should record that bar descends from
653 652 # bar in rev2 and foo in rev1
654 653 #
655 654 # this allows this merge to succeed:
656 655 #
657 656 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
658 657 # \ / merging rev3 and rev4 should use bar@rev2
659 658 # \- 2 --- 4 as the merge base
660 659 #
661 660 meta["copy"] = cp
662 661 if not manifest2: # not a branch merge
663 662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
664 663 fp2 = nullid
665 664 elif fp2 != nullid: # copied on remote side
666 665 meta["copyrev"] = hex(manifest1.get(cp, nullid))
667 666 elif fp1 != nullid: # copied on local side, reversed
668 667 meta["copyrev"] = hex(manifest2.get(cp))
669 668 fp2 = fp1
670 669 else: # directory rename
671 670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
672 671 self.ui.debug(_(" %s: copy %s:%s\n") %
673 672 (fn, cp, meta["copyrev"]))
674 673 fp1 = nullid
675 674 elif fp2 != nullid:
676 675 # is one parent an ancestor of the other?
677 676 fpa = fl.ancestor(fp1, fp2)
678 677 if fpa == fp1:
679 678 fp1, fp2 = fp2, nullid
680 679 elif fpa == fp2:
681 680 fp2 = nullid
682 681
683 682 # is the file unmodified from the parent? report existing entry
684 683 if fp2 == nullid and not fl.cmp(fp1, t):
685 684 return fp1
686 685
687 686 changelist.append(fn)
688 687 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
689 688
690 689 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
691 690 if p1 is None:
692 691 p1, p2 = self.dirstate.parents()
693 692 return self.commit(files=files, text=text, user=user, date=date,
694 693 p1=p1, p2=p2, wlock=wlock, extra=extra)
695 694
696 695 def commit(self, files=None, text="", user=None, date=None,
697 696 match=util.always, force=False, lock=None, wlock=None,
698 697 force_editor=False, p1=None, p2=None, extra={}):
699 698
700 699 commit = []
701 700 remove = []
702 701 changed = []
703 702 use_dirstate = (p1 is None) # not rawcommit
704 703 extra = extra.copy()
705 704
706 705 if use_dirstate:
707 706 if files:
708 707 for f in files:
709 708 s = self.dirstate.state(f)
710 709 if s in 'nmai':
711 710 commit.append(f)
712 711 elif s == 'r':
713 712 remove.append(f)
714 713 else:
715 714 self.ui.warn(_("%s not tracked!\n") % f)
716 715 else:
717 716 changes = self.status(match=match)[:5]
718 717 modified, added, removed, deleted, unknown = changes
719 718 commit = modified + added
720 719 remove = removed
721 720 else:
722 721 commit = files
723 722
724 723 if use_dirstate:
725 724 p1, p2 = self.dirstate.parents()
726 725 update_dirstate = True
727 726 else:
728 727 p1, p2 = p1, p2 or nullid
729 728 update_dirstate = (self.dirstate.parents()[0] == p1)
730 729
731 730 c1 = self.changelog.read(p1)
732 731 c2 = self.changelog.read(p2)
733 732 m1 = self.manifest.read(c1[0]).copy()
734 733 m2 = self.manifest.read(c2[0])
735 734
736 735 if use_dirstate:
737 736 branchname = self.workingctx().branch()
738 737 try:
739 738 branchname = branchname.decode('UTF-8').encode('UTF-8')
740 739 except UnicodeDecodeError:
741 740 raise util.Abort(_('branch name not in UTF-8!'))
742 741 else:
743 742 branchname = ""
744 743
745 744 if use_dirstate:
746 745 oldname = c1[5].get("branch", "") # stored in UTF-8
747 746 if not commit and not remove and not force and p2 == nullid and \
748 747 branchname == oldname:
749 748 self.ui.status(_("nothing changed\n"))
750 749 return None
751 750
752 751 xp1 = hex(p1)
753 752 if p2 == nullid: xp2 = ''
754 753 else: xp2 = hex(p2)
755 754
756 755 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
757 756
758 757 if not wlock:
759 758 wlock = self.wlock()
760 759 if not lock:
761 760 lock = self.lock()
762 761 tr = self.transaction()
763 762
764 763 # check in files
765 764 new = {}
766 765 linkrev = self.changelog.count()
767 766 commit.sort()
768 767 is_exec = util.execfunc(self.root, m1.execf)
769 768 is_link = util.linkfunc(self.root, m1.linkf)
770 769 for f in commit:
771 770 self.ui.note(f + "\n")
772 771 try:
773 772 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
774 773 m1.set(f, is_exec(f), is_link(f))
775 774 except (OSError, IOError):
776 775 if use_dirstate:
777 776 self.ui.warn(_("trouble committing %s!\n") % f)
778 777 raise
779 778 else:
780 779 remove.append(f)
781 780
782 781 # update manifest
783 782 m1.update(new)
784 783 remove.sort()
785 784 removed = []
786 785
787 786 for f in remove:
788 787 if f in m1:
789 788 del m1[f]
790 789 removed.append(f)
791 790 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
792 791
793 792 # add changeset
794 793 new = new.keys()
795 794 new.sort()
796 795
797 796 user = user or self.ui.username()
798 797 if not text or force_editor:
799 798 edittext = []
800 799 if text:
801 800 edittext.append(text)
802 801 edittext.append("")
803 802 edittext.append("HG: user: %s" % user)
804 803 if p2 != nullid:
805 804 edittext.append("HG: branch merge")
806 805 if branchname:
807 806 edittext.append("HG: branch %s" % util.tolocal(branchname))
808 807 edittext.extend(["HG: changed %s" % f for f in changed])
809 808 edittext.extend(["HG: removed %s" % f for f in removed])
810 809 if not changed and not remove:
811 810 edittext.append("HG: no files changed")
812 811 edittext.append("")
813 812 # run editor in the repository root
814 813 olddir = os.getcwd()
815 814 os.chdir(self.root)
816 815 text = self.ui.edit("\n".join(edittext), user)
817 816 os.chdir(olddir)
818 817
819 818 lines = [line.rstrip() for line in text.rstrip().splitlines()]
820 819 while lines and not lines[0]:
821 820 del lines[0]
822 821 if not lines:
823 822 return None
824 823 text = '\n'.join(lines)
825 824 if branchname:
826 825 extra["branch"] = branchname
827 826 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
828 827 user, date, extra)
829 828 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
830 829 parent2=xp2)
831 830 tr.close()
832 831
833 832 if self.branchcache and "branch" in extra:
834 833 self.branchcache[util.tolocal(extra["branch"])] = n
835 834
836 835 if use_dirstate or update_dirstate:
837 836 self.dirstate.setparents(n)
838 837 if use_dirstate:
839 838 self.dirstate.update(new, "n")
840 839 self.dirstate.forget(removed)
841 840
842 841 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
843 842 return n
844 843
845 844 def walk(self, node=None, files=[], match=util.always, badmatch=None):
846 845 '''
847 846 walk recursively through the directory tree or a given
848 847 changeset, finding all files matched by the match
849 848 function
850 849
851 850 results are yielded in a tuple (src, filename), where src
852 851 is one of:
853 852 'f' the file was found in the directory tree
854 853 'm' the file was only in the dirstate and not in the tree
855 854 'b' file was not found and matched badmatch
856 855 '''
857 856
858 857 if node:
859 858 fdict = dict.fromkeys(files)
860 859 for fn in self.manifest.read(self.changelog.read(node)[0]):
861 860 for ffn in fdict:
862 861 # match if the file is the exact name or a directory
863 862 if ffn == fn or fn.startswith("%s/" % ffn):
864 863 del fdict[ffn]
865 864 break
866 865 if match(fn):
867 866 yield 'm', fn
868 867 for fn in fdict:
869 868 if badmatch and badmatch(fn):
870 869 if match(fn):
871 870 yield 'b', fn
872 871 else:
873 872 self.ui.warn(_('%s: No such file in rev %s\n') % (
874 873 util.pathto(self.getcwd(), fn), short(node)))
875 874 else:
876 875 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
877 876 yield src, fn
878 877
879 878 def status(self, node1=None, node2=None, files=[], match=util.always,
880 879 wlock=None, list_ignored=False, list_clean=False):
881 880 """return status of files between two nodes or node and working directory
882 881
883 882 If node1 is None, use the first dirstate parent instead.
884 883 If node2 is None, compare node1 with working directory.
885 884 """
886 885
887 886 def fcmp(fn, getnode):
888 887 t1 = self.wread(fn)
889 888 return self.file(fn).cmp(getnode(fn), t1)
890 889
891 890 def mfmatches(node):
892 891 change = self.changelog.read(node)
893 892 mf = self.manifest.read(change[0]).copy()
894 893 for fn in mf.keys():
895 894 if not match(fn):
896 895 del mf[fn]
897 896 return mf
898 897
899 898 modified, added, removed, deleted, unknown = [], [], [], [], []
900 899 ignored, clean = [], []
901 900
902 901 compareworking = False
903 902 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
904 903 compareworking = True
905 904
906 905 if not compareworking:
907 906 # read the manifest from node1 before the manifest from node2,
908 907 # so that we'll hit the manifest cache if we're going through
909 908 # all the revisions in parent->child order.
910 909 mf1 = mfmatches(node1)
911 910
912 911 # are we comparing the working directory?
913 912 if not node2:
914 913 if not wlock:
915 914 try:
916 915 wlock = self.wlock(wait=0)
917 916 except lock.LockException:
918 917 wlock = None
919 918 (lookup, modified, added, removed, deleted, unknown,
920 919 ignored, clean) = self.dirstate.status(files, match,
921 920 list_ignored, list_clean)
922 921
923 922 # are we comparing working dir against its parent?
924 923 if compareworking:
925 924 if lookup:
926 925 # do a full compare of any files that might have changed
927 926 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
928 927 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
929 928 nullid)
930 929 for f in lookup:
931 930 if fcmp(f, getnode):
932 931 modified.append(f)
933 932 else:
934 933 clean.append(f)
935 934 if wlock is not None:
936 935 self.dirstate.update([f], "n")
937 936 else:
938 937 # we are comparing working dir against non-parent
939 938 # generate a pseudo-manifest for the working dir
940 939 # XXX: create it in dirstate.py ?
941 940 mf2 = mfmatches(self.dirstate.parents()[0])
942 941 is_exec = util.execfunc(self.root, mf2.execf)
943 942 is_link = util.linkfunc(self.root, mf2.linkf)
944 943 for f in lookup + modified + added:
945 944 mf2[f] = ""
946 945 mf2.set(f, is_exec(f), is_link(f))
947 946 for f in removed:
948 947 if f in mf2:
949 948 del mf2[f]
950 949 else:
951 950 # we are comparing two revisions
952 951 mf2 = mfmatches(node2)
953 952
954 953 if not compareworking:
955 954 # flush lists from dirstate before comparing manifests
956 955 modified, added, clean = [], [], []
957 956
958 957 # make sure to sort the files so we talk to the disk in a
959 958 # reasonable order
960 959 mf2keys = mf2.keys()
961 960 mf2keys.sort()
962 961 getnode = lambda fn: mf1.get(fn, nullid)
963 962 for fn in mf2keys:
964 963 if mf1.has_key(fn):
965 964 if mf1.flags(fn) != mf2.flags(fn) or \
966 965 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
967 966 fcmp(fn, getnode))):
968 967 modified.append(fn)
969 968 elif list_clean:
970 969 clean.append(fn)
971 970 del mf1[fn]
972 971 else:
973 972 added.append(fn)
974 973
975 974 removed = mf1.keys()
976 975
977 976 # sort and return results:
978 977 for l in modified, added, removed, deleted, unknown, ignored, clean:
979 978 l.sort()
980 979 return (modified, added, removed, deleted, unknown, ignored, clean)
981 980
982 981 def add(self, list, wlock=None):
983 982 if not wlock:
984 983 wlock = self.wlock()
985 984 for f in list:
986 985 p = self.wjoin(f)
987 986 islink = os.path.islink(p)
988 987 if not islink and not os.path.exists(p):
989 988 self.ui.warn(_("%s does not exist!\n") % f)
990 989 elif not islink and not os.path.isfile(p):
991 990 self.ui.warn(_("%s not added: only files and symlinks "
992 991 "supported currently\n") % f)
993 992 elif self.dirstate.state(f) in 'an':
994 993 self.ui.warn(_("%s already tracked!\n") % f)
995 994 else:
996 995 self.dirstate.update([f], "a")
997 996
998 997 def forget(self, list, wlock=None):
999 998 if not wlock:
1000 999 wlock = self.wlock()
1001 1000 for f in list:
1002 1001 if self.dirstate.state(f) not in 'ai':
1003 1002 self.ui.warn(_("%s not added!\n") % f)
1004 1003 else:
1005 1004 self.dirstate.forget([f])
1006 1005
1007 1006 def remove(self, list, unlink=False, wlock=None):
1008 1007 if unlink:
1009 1008 for f in list:
1010 1009 try:
1011 1010 util.unlink(self.wjoin(f))
1012 1011 except OSError, inst:
1013 1012 if inst.errno != errno.ENOENT:
1014 1013 raise
1015 1014 if not wlock:
1016 1015 wlock = self.wlock()
1017 1016 for f in list:
1018 1017 p = self.wjoin(f)
1019 1018 if os.path.exists(p):
1020 1019 self.ui.warn(_("%s still exists!\n") % f)
1021 1020 elif self.dirstate.state(f) == 'a':
1022 1021 self.dirstate.forget([f])
1023 1022 elif f not in self.dirstate:
1024 1023 self.ui.warn(_("%s not tracked!\n") % f)
1025 1024 else:
1026 1025 self.dirstate.update([f], "r")
1027 1026
1028 1027 def undelete(self, list, wlock=None):
1029 1028 p = self.dirstate.parents()[0]
1030 1029 mn = self.changelog.read(p)[0]
1031 1030 m = self.manifest.read(mn)
1032 1031 if not wlock:
1033 1032 wlock = self.wlock()
1034 1033 for f in list:
1035 1034 if self.dirstate.state(f) not in "r":
1036 1035 self.ui.warn("%s not removed!\n" % f)
1037 1036 else:
1038 1037 t = self.file(f).read(m[f])
1039 1038 self.wwrite(f, t, m.flags(f))
1040 1039 self.dirstate.update([f], "n")
1041 1040
1042 1041 def copy(self, source, dest, wlock=None):
1043 1042 p = self.wjoin(dest)
1044 1043 if not os.path.exists(p):
1045 1044 self.ui.warn(_("%s does not exist!\n") % dest)
1046 1045 elif not os.path.isfile(p):
1047 1046 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1048 1047 else:
1049 1048 if not wlock:
1050 1049 wlock = self.wlock()
1051 1050 if self.dirstate.state(dest) == '?':
1052 1051 self.dirstate.update([dest], "a")
1053 1052 self.dirstate.copy(source, dest)
1054 1053
1055 1054 def heads(self, start=None):
1056 1055 heads = self.changelog.heads(start)
1057 1056 # sort the output in rev descending order
1058 1057 heads = [(-self.changelog.rev(h), h) for h in heads]
1059 1058 heads.sort()
1060 1059 return [n for (r, n) in heads]
1061 1060
1062 1061 def branches(self, nodes):
1063 1062 if not nodes:
1064 1063 nodes = [self.changelog.tip()]
1065 1064 b = []
1066 1065 for n in nodes:
1067 1066 t = n
1068 1067 while 1:
1069 1068 p = self.changelog.parents(n)
1070 1069 if p[1] != nullid or p[0] == nullid:
1071 1070 b.append((t, n, p[0], p[1]))
1072 1071 break
1073 1072 n = p[0]
1074 1073 return b
1075 1074
1076 1075 def between(self, pairs):
1077 1076 r = []
1078 1077
1079 1078 for top, bottom in pairs:
1080 1079 n, l, i = top, [], 0
1081 1080 f = 1
1082 1081
1083 1082 while n != bottom:
1084 1083 p = self.changelog.parents(n)[0]
1085 1084 if i == f:
1086 1085 l.append(n)
1087 1086 f = f * 2
1088 1087 n = p
1089 1088 i += 1
1090 1089
1091 1090 r.append(l)
1092 1091
1093 1092 return r
1094 1093
1095 1094 def findincoming(self, remote, base=None, heads=None, force=False):
1096 1095 """Return list of roots of the subsets of missing nodes from remote
1097 1096
1098 1097 If base dict is specified, assume that these nodes and their parents
1099 1098 exist on the remote side and that no child of a node of base exists
1100 1099 in both remote and self.
1101 1100 Furthermore base will be updated to include the nodes that exists
1102 1101 in self and remote but no children exists in self and remote.
1103 1102 If a list of heads is specified, return only nodes which are heads
1104 1103 or ancestors of these heads.
1105 1104
1106 1105 All the ancestors of base are in self and in remote.
1107 1106 All the descendants of the list returned are missing in self.
1108 1107 (and so we know that the rest of the nodes are missing in remote, see
1109 1108 outgoing)
1110 1109 """
1111 1110 m = self.changelog.nodemap
1112 1111 search = []
1113 1112 fetch = {}
1114 1113 seen = {}
1115 1114 seenbranch = {}
1116 1115 if base == None:
1117 1116 base = {}
1118 1117
1119 1118 if not heads:
1120 1119 heads = remote.heads()
1121 1120
1122 1121 if self.changelog.tip() == nullid:
1123 1122 base[nullid] = 1
1124 1123 if heads != [nullid]:
1125 1124 return [nullid]
1126 1125 return []
1127 1126
1128 1127 # assume we're closer to the tip than the root
1129 1128 # and start by examining the heads
1130 1129 self.ui.status(_("searching for changes\n"))
1131 1130
1132 1131 unknown = []
1133 1132 for h in heads:
1134 1133 if h not in m:
1135 1134 unknown.append(h)
1136 1135 else:
1137 1136 base[h] = 1
1138 1137
1139 1138 if not unknown:
1140 1139 return []
1141 1140
1142 1141 req = dict.fromkeys(unknown)
1143 1142 reqcnt = 0
1144 1143
1145 1144 # search through remote branches
1146 1145 # a 'branch' here is a linear segment of history, with four parts:
1147 1146 # head, root, first parent, second parent
1148 1147 # (a branch always has two parents (or none) by definition)
1149 1148 unknown = remote.branches(unknown)
1150 1149 while unknown:
1151 1150 r = []
1152 1151 while unknown:
1153 1152 n = unknown.pop(0)
1154 1153 if n[0] in seen:
1155 1154 continue
1156 1155
1157 1156 self.ui.debug(_("examining %s:%s\n")
1158 1157 % (short(n[0]), short(n[1])))
1159 1158 if n[0] == nullid: # found the end of the branch
1160 1159 pass
1161 1160 elif n in seenbranch:
1162 1161 self.ui.debug(_("branch already found\n"))
1163 1162 continue
1164 1163 elif n[1] and n[1] in m: # do we know the base?
1165 1164 self.ui.debug(_("found incomplete branch %s:%s\n")
1166 1165 % (short(n[0]), short(n[1])))
1167 1166 search.append(n) # schedule branch range for scanning
1168 1167 seenbranch[n] = 1
1169 1168 else:
1170 1169 if n[1] not in seen and n[1] not in fetch:
1171 1170 if n[2] in m and n[3] in m:
1172 1171 self.ui.debug(_("found new changeset %s\n") %
1173 1172 short(n[1]))
1174 1173 fetch[n[1]] = 1 # earliest unknown
1175 1174 for p in n[2:4]:
1176 1175 if p in m:
1177 1176 base[p] = 1 # latest known
1178 1177
1179 1178 for p in n[2:4]:
1180 1179 if p not in req and p not in m:
1181 1180 r.append(p)
1182 1181 req[p] = 1
1183 1182 seen[n[0]] = 1
1184 1183
1185 1184 if r:
1186 1185 reqcnt += 1
1187 1186 self.ui.debug(_("request %d: %s\n") %
1188 1187 (reqcnt, " ".join(map(short, r))))
1189 1188 for p in xrange(0, len(r), 10):
1190 1189 for b in remote.branches(r[p:p+10]):
1191 1190 self.ui.debug(_("received %s:%s\n") %
1192 1191 (short(b[0]), short(b[1])))
1193 1192 unknown.append(b)
1194 1193
1195 1194 # do binary search on the branches we found
1196 1195 while search:
1197 1196 n = search.pop(0)
1198 1197 reqcnt += 1
1199 1198 l = remote.between([(n[0], n[1])])[0]
1200 1199 l.append(n[1])
1201 1200 p = n[0]
1202 1201 f = 1
1203 1202 for i in l:
1204 1203 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1205 1204 if i in m:
1206 1205 if f <= 2:
1207 1206 self.ui.debug(_("found new branch changeset %s\n") %
1208 1207 short(p))
1209 1208 fetch[p] = 1
1210 1209 base[i] = 1
1211 1210 else:
1212 1211 self.ui.debug(_("narrowed branch search to %s:%s\n")
1213 1212 % (short(p), short(i)))
1214 1213 search.append((p, i))
1215 1214 break
1216 1215 p, f = i, f * 2
1217 1216
1218 1217 # sanity check our fetch list
1219 1218 for f in fetch.keys():
1220 1219 if f in m:
1221 1220 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1222 1221
1223 1222 if base.keys() == [nullid]:
1224 1223 if force:
1225 1224 self.ui.warn(_("warning: repository is unrelated\n"))
1226 1225 else:
1227 1226 raise util.Abort(_("repository is unrelated"))
1228 1227
1229 1228 self.ui.debug(_("found new changesets starting at ") +
1230 1229 " ".join([short(f) for f in fetch]) + "\n")
1231 1230
1232 1231 self.ui.debug(_("%d total queries\n") % reqcnt)
1233 1232
1234 1233 return fetch.keys()
1235 1234
1236 1235 def findoutgoing(self, remote, base=None, heads=None, force=False):
1237 1236 """Return list of nodes that are roots of subsets not in remote
1238 1237
1239 1238 If base dict is specified, assume that these nodes and their parents
1240 1239 exist on the remote side.
1241 1240 If a list of heads is specified, return only nodes which are heads
1242 1241 or ancestors of these heads, and return a second element which
1243 1242 contains all remote heads which get new children.
1244 1243 """
1245 1244 if base == None:
1246 1245 base = {}
1247 1246 self.findincoming(remote, base, heads, force=force)
1248 1247
1249 1248 self.ui.debug(_("common changesets up to ")
1250 1249 + " ".join(map(short, base.keys())) + "\n")
1251 1250
1252 1251 remain = dict.fromkeys(self.changelog.nodemap)
1253 1252
1254 1253 # prune everything remote has from the tree
1255 1254 del remain[nullid]
1256 1255 remove = base.keys()
1257 1256 while remove:
1258 1257 n = remove.pop(0)
1259 1258 if n in remain:
1260 1259 del remain[n]
1261 1260 for p in self.changelog.parents(n):
1262 1261 remove.append(p)
1263 1262
1264 1263 # find every node whose parents have been pruned
1265 1264 subset = []
1266 1265 # find every remote head that will get new children
1267 1266 updated_heads = {}
1268 1267 for n in remain:
1269 1268 p1, p2 = self.changelog.parents(n)
1270 1269 if p1 not in remain and p2 not in remain:
1271 1270 subset.append(n)
1272 1271 if heads:
1273 1272 if p1 in heads:
1274 1273 updated_heads[p1] = True
1275 1274 if p2 in heads:
1276 1275 updated_heads[p2] = True
1277 1276
1278 1277 # this is the set of all roots we have to push
1279 1278 if heads:
1280 1279 return subset, updated_heads.keys()
1281 1280 else:
1282 1281 return subset
1283 1282
1284 1283 def pull(self, remote, heads=None, force=False, lock=None):
1285 1284 mylock = False
1286 1285 if not lock:
1287 1286 lock = self.lock()
1288 1287 mylock = True
1289 1288
1290 1289 try:
1291 1290 fetch = self.findincoming(remote, force=force)
1292 1291 if fetch == [nullid]:
1293 1292 self.ui.status(_("requesting all changes\n"))
1294 1293
1295 1294 if not fetch:
1296 1295 self.ui.status(_("no changes found\n"))
1297 1296 return 0
1298 1297
1299 1298 if heads is None:
1300 1299 cg = remote.changegroup(fetch, 'pull')
1301 1300 else:
1302 1301 if 'changegroupsubset' not in remote.capabilities:
1303 1302 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1304 1303 cg = remote.changegroupsubset(fetch, heads, 'pull')
1305 1304 return self.addchangegroup(cg, 'pull', remote.url())
1306 1305 finally:
1307 1306 if mylock:
1308 1307 lock.release()
1309 1308
1310 1309 def push(self, remote, force=False, revs=None):
1311 1310 # there are two ways to push to remote repo:
1312 1311 #
1313 1312 # addchangegroup assumes local user can lock remote
1314 1313 # repo (local filesystem, old ssh servers).
1315 1314 #
1316 1315 # unbundle assumes local user cannot lock remote repo (new ssh
1317 1316 # servers, http servers).
1318 1317
1319 1318 if remote.capable('unbundle'):
1320 1319 return self.push_unbundle(remote, force, revs)
1321 1320 return self.push_addchangegroup(remote, force, revs)
1322 1321
1323 1322 def prepush(self, remote, force, revs):
1324 1323 base = {}
1325 1324 remote_heads = remote.heads()
1326 1325 inc = self.findincoming(remote, base, remote_heads, force=force)
1327 1326
1328 1327 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1329 1328 if revs is not None:
1330 1329 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1331 1330 else:
1332 1331 bases, heads = update, self.changelog.heads()
1333 1332
1334 1333 if not bases:
1335 1334 self.ui.status(_("no changes found\n"))
1336 1335 return None, 1
1337 1336 elif not force:
1338 1337 # check if we're creating new remote heads
1339 1338 # to be a remote head after push, node must be either
1340 1339 # - unknown locally
1341 1340 # - a local outgoing head descended from update
1342 1341 # - a remote head that's known locally and not
1343 1342 # ancestral to an outgoing head
1344 1343
1345 1344 warn = 0
1346 1345
1347 1346 if remote_heads == [nullid]:
1348 1347 warn = 0
1349 1348 elif not revs and len(heads) > len(remote_heads):
1350 1349 warn = 1
1351 1350 else:
1352 1351 newheads = list(heads)
1353 1352 for r in remote_heads:
1354 1353 if r in self.changelog.nodemap:
1355 1354 desc = self.changelog.heads(r, heads)
1356 1355 l = [h for h in heads if h in desc]
1357 1356 if not l:
1358 1357 newheads.append(r)
1359 1358 else:
1360 1359 newheads.append(r)
1361 1360 if len(newheads) > len(remote_heads):
1362 1361 warn = 1
1363 1362
1364 1363 if warn:
1365 1364 self.ui.warn(_("abort: push creates new remote branches!\n"))
1366 1365 self.ui.status(_("(did you forget to merge?"
1367 1366 " use push -f to force)\n"))
1368 1367 return None, 1
1369 1368 elif inc:
1370 1369 self.ui.warn(_("note: unsynced remote changes!\n"))
1371 1370
1372 1371
1373 1372 if revs is None:
1374 1373 cg = self.changegroup(update, 'push')
1375 1374 else:
1376 1375 cg = self.changegroupsubset(update, revs, 'push')
1377 1376 return cg, remote_heads
1378 1377
1379 1378 def push_addchangegroup(self, remote, force, revs):
1380 1379 lock = remote.lock()
1381 1380
1382 1381 ret = self.prepush(remote, force, revs)
1383 1382 if ret[0] is not None:
1384 1383 cg, remote_heads = ret
1385 1384 return remote.addchangegroup(cg, 'push', self.url())
1386 1385 return ret[1]
1387 1386
1388 1387 def push_unbundle(self, remote, force, revs):
1389 1388 # local repo finds heads on server, finds out what revs it
1390 1389 # must push. once revs transferred, if server finds it has
1391 1390 # different heads (someone else won commit/push race), server
1392 1391 # aborts.
1393 1392
1394 1393 ret = self.prepush(remote, force, revs)
1395 1394 if ret[0] is not None:
1396 1395 cg, remote_heads = ret
1397 1396 if force: remote_heads = ['force']
1398 1397 return remote.unbundle(cg, remote_heads, 'push')
1399 1398 return ret[1]
1400 1399
1401 1400 def changegroupinfo(self, nodes):
1402 1401 self.ui.note(_("%d changesets found\n") % len(nodes))
1403 1402 if self.ui.debugflag:
1404 1403 self.ui.debug(_("List of changesets:\n"))
1405 1404 for node in nodes:
1406 1405 self.ui.debug("%s\n" % hex(node))
1407 1406
1408 1407 def changegroupsubset(self, bases, heads, source):
1409 1408 """This function generates a changegroup consisting of all the nodes
1410 1409 that are descendents of any of the bases, and ancestors of any of
1411 1410 the heads.
1412 1411
1413 1412 It is fairly complex as determining which filenodes and which
1414 1413 manifest nodes need to be included for the changeset to be complete
1415 1414 is non-trivial.
1416 1415
1417 1416 Another wrinkle is doing the reverse, figuring out which changeset in
1418 1417 the changegroup a particular filenode or manifestnode belongs to."""
1419 1418
1420 1419 self.hook('preoutgoing', throw=True, source=source)
1421 1420
1422 1421 # Set up some initial variables
1423 1422 # Make it easy to refer to self.changelog
1424 1423 cl = self.changelog
1425 1424 # msng is short for missing - compute the list of changesets in this
1426 1425 # changegroup.
1427 1426 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1428 1427 self.changegroupinfo(msng_cl_lst)
1429 1428 # Some bases may turn out to be superfluous, and some heads may be
1430 1429 # too. nodesbetween will return the minimal set of bases and heads
1431 1430 # necessary to re-create the changegroup.
1432 1431
1433 1432 # Known heads are the list of heads that it is assumed the recipient
1434 1433 # of this changegroup will know about.
1435 1434 knownheads = {}
1436 1435 # We assume that all parents of bases are known heads.
1437 1436 for n in bases:
1438 1437 for p in cl.parents(n):
1439 1438 if p != nullid:
1440 1439 knownheads[p] = 1
1441 1440 knownheads = knownheads.keys()
1442 1441 if knownheads:
1443 1442 # Now that we know what heads are known, we can compute which
1444 1443 # changesets are known. The recipient must know about all
1445 1444 # changesets required to reach the known heads from the null
1446 1445 # changeset.
1447 1446 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1448 1447 junk = None
1449 1448 # Transform the list into an ersatz set.
1450 1449 has_cl_set = dict.fromkeys(has_cl_set)
1451 1450 else:
1452 1451 # If there were no known heads, the recipient cannot be assumed to
1453 1452 # know about any changesets.
1454 1453 has_cl_set = {}
1455 1454
1456 1455 # Make it easy to refer to self.manifest
1457 1456 mnfst = self.manifest
1458 1457 # We don't know which manifests are missing yet
1459 1458 msng_mnfst_set = {}
1460 1459 # Nor do we know which filenodes are missing.
1461 1460 msng_filenode_set = {}
1462 1461
1463 1462 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1464 1463 junk = None
1465 1464
1466 1465 # A changeset always belongs to itself, so the changenode lookup
1467 1466 # function for a changenode is identity.
1468 1467 def identity(x):
1469 1468 return x
1470 1469
1471 1470 # A function generating function. Sets up an environment for the
1472 1471 # inner function.
1473 1472 def cmp_by_rev_func(revlog):
1474 1473 # Compare two nodes by their revision number in the environment's
1475 1474 # revision history. Since the revision number both represents the
1476 1475 # most efficient order to read the nodes in, and represents a
1477 1476 # topological sorting of the nodes, this function is often useful.
1478 1477 def cmp_by_rev(a, b):
1479 1478 return cmp(revlog.rev(a), revlog.rev(b))
1480 1479 return cmp_by_rev
1481 1480
1482 1481 # If we determine that a particular file or manifest node must be a
1483 1482 # node that the recipient of the changegroup will already have, we can
1484 1483 # also assume the recipient will have all the parents. This function
1485 1484 # prunes them from the set of missing nodes.
1486 1485 def prune_parents(revlog, hasset, msngset):
1487 1486 haslst = hasset.keys()
1488 1487 haslst.sort(cmp_by_rev_func(revlog))
1489 1488 for node in haslst:
1490 1489 parentlst = [p for p in revlog.parents(node) if p != nullid]
1491 1490 while parentlst:
1492 1491 n = parentlst.pop()
1493 1492 if n not in hasset:
1494 1493 hasset[n] = 1
1495 1494 p = [p for p in revlog.parents(n) if p != nullid]
1496 1495 parentlst.extend(p)
1497 1496 for n in hasset:
1498 1497 msngset.pop(n, None)
1499 1498
1500 1499 # This is a function generating function used to set up an environment
1501 1500 # for the inner function to execute in.
1502 1501 def manifest_and_file_collector(changedfileset):
1503 1502 # This is an information gathering function that gathers
1504 1503 # information from each changeset node that goes out as part of
1505 1504 # the changegroup. The information gathered is a list of which
1506 1505 # manifest nodes are potentially required (the recipient may
1507 1506 # already have them) and total list of all files which were
1508 1507 # changed in any changeset in the changegroup.
1509 1508 #
1510 1509 # We also remember the first changenode we saw any manifest
1511 1510 # referenced by so we can later determine which changenode 'owns'
1512 1511 # the manifest.
1513 1512 def collect_manifests_and_files(clnode):
1514 1513 c = cl.read(clnode)
1515 1514 for f in c[3]:
1516 1515 # This is to make sure we only have one instance of each
1517 1516 # filename string for each filename.
1518 1517 changedfileset.setdefault(f, f)
1519 1518 msng_mnfst_set.setdefault(c[0], clnode)
1520 1519 return collect_manifests_and_files
1521 1520
1522 1521 # Figure out which manifest nodes (of the ones we think might be part
1523 1522 # of the changegroup) the recipient must know about and remove them
1524 1523 # from the changegroup.
1525 1524 def prune_manifests():
1526 1525 has_mnfst_set = {}
1527 1526 for n in msng_mnfst_set:
1528 1527 # If a 'missing' manifest thinks it belongs to a changenode
1529 1528 # the recipient is assumed to have, obviously the recipient
1530 1529 # must have that manifest.
1531 1530 linknode = cl.node(mnfst.linkrev(n))
1532 1531 if linknode in has_cl_set:
1533 1532 has_mnfst_set[n] = 1
1534 1533 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1535 1534
1536 1535 # Use the information collected in collect_manifests_and_files to say
1537 1536 # which changenode any manifestnode belongs to.
1538 1537 def lookup_manifest_link(mnfstnode):
1539 1538 return msng_mnfst_set[mnfstnode]
1540 1539
1541 1540 # A function generating function that sets up the initial environment
1542 1541 # the inner function.
1543 1542 def filenode_collector(changedfiles):
1544 1543 next_rev = [0]
1545 1544 # This gathers information from each manifestnode included in the
1546 1545 # changegroup about which filenodes the manifest node references
1547 1546 # so we can include those in the changegroup too.
1548 1547 #
1549 1548 # It also remembers which changenode each filenode belongs to. It
1550 1549 # does this by assuming the a filenode belongs to the changenode
1551 1550 # the first manifest that references it belongs to.
1552 1551 def collect_msng_filenodes(mnfstnode):
1553 1552 r = mnfst.rev(mnfstnode)
1554 1553 if r == next_rev[0]:
1555 1554 # If the last rev we looked at was the one just previous,
1556 1555 # we only need to see a diff.
1557 1556 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1558 1557 # For each line in the delta
1559 1558 for dline in delta.splitlines():
1560 1559 # get the filename and filenode for that line
1561 1560 f, fnode = dline.split('\0')
1562 1561 fnode = bin(fnode[:40])
1563 1562 f = changedfiles.get(f, None)
1564 1563 # And if the file is in the list of files we care
1565 1564 # about.
1566 1565 if f is not None:
1567 1566 # Get the changenode this manifest belongs to
1568 1567 clnode = msng_mnfst_set[mnfstnode]
1569 1568 # Create the set of filenodes for the file if
1570 1569 # there isn't one already.
1571 1570 ndset = msng_filenode_set.setdefault(f, {})
1572 1571 # And set the filenode's changelog node to the
1573 1572 # manifest's if it hasn't been set already.
1574 1573 ndset.setdefault(fnode, clnode)
1575 1574 else:
1576 1575 # Otherwise we need a full manifest.
1577 1576 m = mnfst.read(mnfstnode)
1578 1577 # For every file in we care about.
1579 1578 for f in changedfiles:
1580 1579 fnode = m.get(f, None)
1581 1580 # If it's in the manifest
1582 1581 if fnode is not None:
1583 1582 # See comments above.
1584 1583 clnode = msng_mnfst_set[mnfstnode]
1585 1584 ndset = msng_filenode_set.setdefault(f, {})
1586 1585 ndset.setdefault(fnode, clnode)
1587 1586 # Remember the revision we hope to see next.
1588 1587 next_rev[0] = r + 1
1589 1588 return collect_msng_filenodes
1590 1589
1591 1590 # We have a list of filenodes we think we need for a file, lets remove
1592 1591 # all those we now the recipient must have.
1593 1592 def prune_filenodes(f, filerevlog):
1594 1593 msngset = msng_filenode_set[f]
1595 1594 hasset = {}
1596 1595 # If a 'missing' filenode thinks it belongs to a changenode we
1597 1596 # assume the recipient must have, then the recipient must have
1598 1597 # that filenode.
1599 1598 for n in msngset:
1600 1599 clnode = cl.node(filerevlog.linkrev(n))
1601 1600 if clnode in has_cl_set:
1602 1601 hasset[n] = 1
1603 1602 prune_parents(filerevlog, hasset, msngset)
1604 1603
1605 1604 # A function generator function that sets up the a context for the
1606 1605 # inner function.
1607 1606 def lookup_filenode_link_func(fname):
1608 1607 msngset = msng_filenode_set[fname]
1609 1608 # Lookup the changenode the filenode belongs to.
1610 1609 def lookup_filenode_link(fnode):
1611 1610 return msngset[fnode]
1612 1611 return lookup_filenode_link
1613 1612
1614 1613 # Now that we have all theses utility functions to help out and
1615 1614 # logically divide up the task, generate the group.
1616 1615 def gengroup():
1617 1616 # The set of changed files starts empty.
1618 1617 changedfiles = {}
1619 1618 # Create a changenode group generator that will call our functions
1620 1619 # back to lookup the owning changenode and collect information.
1621 1620 group = cl.group(msng_cl_lst, identity,
1622 1621 manifest_and_file_collector(changedfiles))
1623 1622 for chnk in group:
1624 1623 yield chnk
1625 1624
1626 1625 # The list of manifests has been collected by the generator
1627 1626 # calling our functions back.
1628 1627 prune_manifests()
1629 1628 msng_mnfst_lst = msng_mnfst_set.keys()
1630 1629 # Sort the manifestnodes by revision number.
1631 1630 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1632 1631 # Create a generator for the manifestnodes that calls our lookup
1633 1632 # and data collection functions back.
1634 1633 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1635 1634 filenode_collector(changedfiles))
1636 1635 for chnk in group:
1637 1636 yield chnk
1638 1637
1639 1638 # These are no longer needed, dereference and toss the memory for
1640 1639 # them.
1641 1640 msng_mnfst_lst = None
1642 1641 msng_mnfst_set.clear()
1643 1642
1644 1643 changedfiles = changedfiles.keys()
1645 1644 changedfiles.sort()
1646 1645 # Go through all our files in order sorted by name.
1647 1646 for fname in changedfiles:
1648 1647 filerevlog = self.file(fname)
1649 1648 # Toss out the filenodes that the recipient isn't really
1650 1649 # missing.
1651 1650 if msng_filenode_set.has_key(fname):
1652 1651 prune_filenodes(fname, filerevlog)
1653 1652 msng_filenode_lst = msng_filenode_set[fname].keys()
1654 1653 else:
1655 1654 msng_filenode_lst = []
1656 1655 # If any filenodes are left, generate the group for them,
1657 1656 # otherwise don't bother.
1658 1657 if len(msng_filenode_lst) > 0:
1659 1658 yield changegroup.genchunk(fname)
1660 1659 # Sort the filenodes by their revision #
1661 1660 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1662 1661 # Create a group generator and only pass in a changenode
1663 1662 # lookup function as we need to collect no information
1664 1663 # from filenodes.
1665 1664 group = filerevlog.group(msng_filenode_lst,
1666 1665 lookup_filenode_link_func(fname))
1667 1666 for chnk in group:
1668 1667 yield chnk
1669 1668 if msng_filenode_set.has_key(fname):
1670 1669 # Don't need this anymore, toss it to free memory.
1671 1670 del msng_filenode_set[fname]
1672 1671 # Signal that no more groups are left.
1673 1672 yield changegroup.closechunk()
1674 1673
1675 1674 if msng_cl_lst:
1676 1675 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1677 1676
1678 1677 return util.chunkbuffer(gengroup())
1679 1678
1680 1679 def changegroup(self, basenodes, source):
1681 1680 """Generate a changegroup of all nodes that we have that a recipient
1682 1681 doesn't.
1683 1682
1684 1683 This is much easier than the previous function as we can assume that
1685 1684 the recipient has any changenode we aren't sending them."""
1686 1685
1687 1686 self.hook('preoutgoing', throw=True, source=source)
1688 1687
1689 1688 cl = self.changelog
1690 1689 nodes = cl.nodesbetween(basenodes, None)[0]
1691 1690 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1692 1691 self.changegroupinfo(nodes)
1693 1692
1694 1693 def identity(x):
1695 1694 return x
1696 1695
1697 1696 def gennodelst(revlog):
1698 1697 for r in xrange(0, revlog.count()):
1699 1698 n = revlog.node(r)
1700 1699 if revlog.linkrev(n) in revset:
1701 1700 yield n
1702 1701
1703 1702 def changed_file_collector(changedfileset):
1704 1703 def collect_changed_files(clnode):
1705 1704 c = cl.read(clnode)
1706 1705 for fname in c[3]:
1707 1706 changedfileset[fname] = 1
1708 1707 return collect_changed_files
1709 1708
1710 1709 def lookuprevlink_func(revlog):
1711 1710 def lookuprevlink(n):
1712 1711 return cl.node(revlog.linkrev(n))
1713 1712 return lookuprevlink
1714 1713
1715 1714 def gengroup():
1716 1715 # construct a list of all changed files
1717 1716 changedfiles = {}
1718 1717
1719 1718 for chnk in cl.group(nodes, identity,
1720 1719 changed_file_collector(changedfiles)):
1721 1720 yield chnk
1722 1721 changedfiles = changedfiles.keys()
1723 1722 changedfiles.sort()
1724 1723
1725 1724 mnfst = self.manifest
1726 1725 nodeiter = gennodelst(mnfst)
1727 1726 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1728 1727 yield chnk
1729 1728
1730 1729 for fname in changedfiles:
1731 1730 filerevlog = self.file(fname)
1732 1731 nodeiter = gennodelst(filerevlog)
1733 1732 nodeiter = list(nodeiter)
1734 1733 if nodeiter:
1735 1734 yield changegroup.genchunk(fname)
1736 1735 lookup = lookuprevlink_func(filerevlog)
1737 1736 for chnk in filerevlog.group(nodeiter, lookup):
1738 1737 yield chnk
1739 1738
1740 1739 yield changegroup.closechunk()
1741 1740
1742 1741 if nodes:
1743 1742 self.hook('outgoing', node=hex(nodes[0]), source=source)
1744 1743
1745 1744 return util.chunkbuffer(gengroup())
1746 1745
1747 1746 def addchangegroup(self, source, srctype, url):
1748 1747 """add changegroup to repo.
1749 1748
1750 1749 return values:
1751 1750 - nothing changed or no source: 0
1752 1751 - more heads than before: 1+added heads (2..n)
1753 1752 - less heads than before: -1-removed heads (-2..-n)
1754 1753 - number of heads stays the same: 1
1755 1754 """
1756 1755 def csmap(x):
1757 1756 self.ui.debug(_("add changeset %s\n") % short(x))
1758 1757 return cl.count()
1759 1758
1760 1759 def revmap(x):
1761 1760 return cl.rev(x)
1762 1761
1763 1762 if not source:
1764 1763 return 0
1765 1764
1766 1765 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1767 1766
1768 1767 changesets = files = revisions = 0
1769 1768
1770 1769 tr = self.transaction()
1771 1770
1772 1771 # write changelog data to temp files so concurrent readers will not see
1773 1772 # inconsistent view
1774 1773 cl = None
1775 1774 try:
1776 1775 cl = appendfile.appendchangelog(self.sopener,
1777 1776 self.changelog.version)
1778 1777
1779 1778 oldheads = len(cl.heads())
1780 1779
1781 1780 # pull off the changeset group
1782 1781 self.ui.status(_("adding changesets\n"))
1783 1782 cor = cl.count() - 1
1784 1783 chunkiter = changegroup.chunkiter(source)
1785 1784 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1786 1785 raise util.Abort(_("received changelog group is empty"))
1787 1786 cnr = cl.count() - 1
1788 1787 changesets = cnr - cor
1789 1788
1790 1789 # pull off the manifest group
1791 1790 self.ui.status(_("adding manifests\n"))
1792 1791 chunkiter = changegroup.chunkiter(source)
1793 1792 # no need to check for empty manifest group here:
1794 1793 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1795 1794 # no new manifest will be created and the manifest group will
1796 1795 # be empty during the pull
1797 1796 self.manifest.addgroup(chunkiter, revmap, tr)
1798 1797
1799 1798 # process the files
1800 1799 self.ui.status(_("adding file changes\n"))
1801 1800 while 1:
1802 1801 f = changegroup.getchunk(source)
1803 1802 if not f:
1804 1803 break
1805 1804 self.ui.debug(_("adding %s revisions\n") % f)
1806 1805 fl = self.file(f)
1807 1806 o = fl.count()
1808 1807 chunkiter = changegroup.chunkiter(source)
1809 1808 if fl.addgroup(chunkiter, revmap, tr) is None:
1810 1809 raise util.Abort(_("received file revlog group is empty"))
1811 1810 revisions += fl.count() - o
1812 1811 files += 1
1813 1812
1814 1813 cl.writedata()
1815 1814 finally:
1816 1815 if cl:
1817 1816 cl.cleanup()
1818 1817
1819 1818 # make changelog see real files again
1820 1819 self.changelog = changelog.changelog(self.sopener,
1821 1820 self.changelog.version)
1822 1821 self.changelog.checkinlinesize(tr)
1823 1822
1824 1823 newheads = len(self.changelog.heads())
1825 1824 heads = ""
1826 1825 if oldheads and newheads != oldheads:
1827 1826 heads = _(" (%+d heads)") % (newheads - oldheads)
1828 1827
1829 1828 self.ui.status(_("added %d changesets"
1830 1829 " with %d changes to %d files%s\n")
1831 1830 % (changesets, revisions, files, heads))
1832 1831
1833 1832 if changesets > 0:
1834 1833 self.hook('pretxnchangegroup', throw=True,
1835 1834 node=hex(self.changelog.node(cor+1)), source=srctype,
1836 1835 url=url)
1837 1836
1838 1837 tr.close()
1839 1838
1840 1839 if changesets > 0:
1841 1840 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1842 1841 source=srctype, url=url)
1843 1842
1844 1843 for i in xrange(cor + 1, cnr + 1):
1845 1844 self.hook("incoming", node=hex(self.changelog.node(i)),
1846 1845 source=srctype, url=url)
1847 1846
1848 1847 # never return 0 here:
1849 1848 if newheads < oldheads:
1850 1849 return newheads - oldheads - 1
1851 1850 else:
1852 1851 return newheads - oldheads + 1
1853 1852
1854 1853
1855 1854 def stream_in(self, remote):
1856 1855 fp = remote.stream_out()
1857 1856 l = fp.readline()
1858 1857 try:
1859 1858 resp = int(l)
1860 1859 except ValueError:
1861 1860 raise util.UnexpectedOutput(
1862 1861 _('Unexpected response from remote server:'), l)
1863 1862 if resp == 1:
1864 1863 raise util.Abort(_('operation forbidden by server'))
1865 1864 elif resp == 2:
1866 1865 raise util.Abort(_('locking the remote repository failed'))
1867 1866 elif resp != 0:
1868 1867 raise util.Abort(_('the server sent an unknown error code'))
1869 1868 self.ui.status(_('streaming all changes\n'))
1870 1869 l = fp.readline()
1871 1870 try:
1872 1871 total_files, total_bytes = map(int, l.split(' ', 1))
1873 1872 except ValueError, TypeError:
1874 1873 raise util.UnexpectedOutput(
1875 1874 _('Unexpected response from remote server:'), l)
1876 1875 self.ui.status(_('%d files to transfer, %s of data\n') %
1877 1876 (total_files, util.bytecount(total_bytes)))
1878 1877 start = time.time()
1879 1878 for i in xrange(total_files):
1880 1879 # XXX doesn't support '\n' or '\r' in filenames
1881 1880 l = fp.readline()
1882 1881 try:
1883 1882 name, size = l.split('\0', 1)
1884 1883 size = int(size)
1885 1884 except ValueError, TypeError:
1886 1885 raise util.UnexpectedOutput(
1887 1886 _('Unexpected response from remote server:'), l)
1888 1887 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1889 1888 ofp = self.sopener(name, 'w')
1890 1889 for chunk in util.filechunkiter(fp, limit=size):
1891 1890 ofp.write(chunk)
1892 1891 ofp.close()
1893 1892 elapsed = time.time() - start
1894 1893 if elapsed <= 0:
1895 1894 elapsed = 0.001
1896 1895 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1897 1896 (util.bytecount(total_bytes), elapsed,
1898 1897 util.bytecount(total_bytes / elapsed)))
1899 1898 self.reload()
1900 1899 return len(self.heads()) + 1
1901 1900
1902 1901 def clone(self, remote, heads=[], stream=False):
1903 1902 '''clone remote repository.
1904 1903
1905 1904 keyword arguments:
1906 1905 heads: list of revs to clone (forces use of pull)
1907 1906 stream: use streaming clone if possible'''
1908 1907
1909 1908 # now, all clients that can request uncompressed clones can
1910 1909 # read repo formats supported by all servers that can serve
1911 1910 # them.
1912 1911
1913 1912 # if revlog format changes, client will have to check version
1914 1913 # and format flags on "stream" capability, and use
1915 1914 # uncompressed only if compatible.
1916 1915
1917 1916 if stream and not heads and remote.capable('stream'):
1918 1917 return self.stream_in(remote)
1919 1918 return self.pull(remote, heads)
1920 1919
1921 1920 # used to avoid circular references so destructors work
1922 1921 def aftertrans(files):
1923 1922 renamefiles = [tuple(t) for t in files]
1924 1923 def a():
1925 1924 for src, dest in renamefiles:
1926 1925 util.rename(src, dest)
1927 1926 return a
1928 1927
1929 1928 def instance(ui, path, create):
1930 1929 return localrepository(ui, util.drop_scheme('file', path), create)
1931 1930
1932 1931 def islocal(path):
1933 1932 return True
@@ -1,61 +1,81
1 1 #!/bin/sh
2 2
3 3 # This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 cat <<'EOF' > dummyssh
6 6 #!/bin/sh
7 7 # this attempts to deal with relative pathnames
8 8 cd `dirname $0`
9 9
10 10 # check for proper args
11 11 if [ $1 != "user@dummy" ] ; then
12 12 exit -1
13 13 fi
14 14
15 15 # check that we're in the right directory
16 16 if [ ! -x dummyssh ] ; then
17 17 exit -1
18 18 fi
19 19
20 20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
21 21 $2
22 22 EOF
23 23 chmod +x dummyssh
24 24
25 checknewrepo()
26 {
27 name=$1
28
29 if [ -d $name/.hg/store ]; then
30 echo store created
31 fi
32
33 if [ -f $name/.hg/00changelog.i ]; then
34 echo 00changelog.i created
35 fi
36
37 cat $name/.hg/requires
38 }
39
25 40 echo "# creating 'local'"
26 41 hg init local
42 checknewrepo local
27 43 echo this > local/foo
28 44 hg ci --cwd local -A -m "init" -d "1000000 0"
29 45
46 echo "# creating repo with old format"
47 hg --config format.usestore=false init old
48 checknewrepo old
49
30 50 echo "#test failure"
31 51 hg init local
32 52
33 53 echo "# init+push to remote2"
34 54 hg init -e ./dummyssh ssh://user@dummy/remote2
35 55 hg incoming -R remote2 local
36 56 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
37 57
38 58 echo "# clone to remote1"
39 59 hg clone -e ./dummyssh local ssh://user@dummy/remote1
40 60
41 61 echo "# init to existing repo"
42 62 hg init -e ./dummyssh ssh://user@dummy/remote1
43 63
44 64 echo "# clone to existing repo"
45 65 hg clone -e ./dummyssh local ssh://user@dummy/remote1
46 66
47 67 echo "# output of dummyssh"
48 68 cat dummylog
49 69
50 70 echo "# comparing repositories"
51 71 hg tip -q -R local
52 72 hg tip -q -R remote1
53 73 hg tip -q -R remote2
54 74
55 75 echo "# check names for repositories (clashes with URL schemes, special chars)"
56 76 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
57 77 echo "# hg init \"$i\""
58 78 hg init "$i"
59 79 test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
60 80 done
61 81
@@ -1,63 +1,69
1 1 # creating 'local'
2 store created
3 00changelog.i created
4 revlogv1
5 store
2 6 adding foo
7 # creating repo with old format
8 revlogv1
3 9 #test failure
4 10 abort: repository local already exists!
5 11 # init+push to remote2
6 12 comparing with local
7 13 changeset: 0:c4e059d443be
8 14 tag: tip
9 15 user: test
10 16 date: Mon Jan 12 13:46:40 1970 +0000
11 17 summary: init
12 18
13 19 pushing to ssh://user@dummy/remote2
14 20 searching for changes
15 21 remote: adding changesets
16 22 remote: adding manifests
17 23 remote: adding file changes
18 24 remote: added 1 changesets with 1 changes to 1 files
19 25 # clone to remote1
20 26 searching for changes
21 27 remote: adding changesets
22 28 remote: adding manifests
23 29 remote: adding file changes
24 30 remote: added 1 changesets with 1 changes to 1 files
25 31 # init to existing repo
26 32 abort: repository remote1 already exists!
27 33 abort: could not create remote repo!
28 34 # clone to existing repo
29 35 abort: repository remote1 already exists!
30 36 abort: could not create remote repo!
31 37 # output of dummyssh
32 38 Got arguments 1:user@dummy 2:hg init remote2 3: 4: 5:
33 39 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
34 40 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
35 41 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
36 42 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
37 43 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
38 44 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
39 45 # comparing repositories
40 46 0:c4e059d443be
41 47 0:c4e059d443be
42 48 0:c4e059d443be
43 49 # check names for repositories (clashes with URL schemes, special chars)
44 50 # hg init "bundle"
45 51 ok
46 52 # hg init "file"
47 53 ok
48 54 # hg init "hg"
49 55 ok
50 56 # hg init "http"
51 57 ok
52 58 # hg init "https"
53 59 ok
54 60 # hg init "old-http"
55 61 ok
56 62 # hg init "ssh"
57 63 ok
58 64 # hg init "static-http"
59 65 ok
60 66 # hg init " "
61 67 ok
62 68 # hg init "with space"
63 69 ok
General Comments 0
You need to be logged in to leave comments. Login now