##// END OF EJS Templates
localrepo: add separate methods for manipulating repository data...
Matt Mackall -
r3457:ff06fe07 default
parent child Browse files
Show More
@@ -1,1819 +1,1824 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
50 51 self.wopener = util.opener(self.root)
51 52
52 53 try:
53 54 self.ui.readconfig(self.join("hgrc"), self.root)
54 55 except IOError:
55 56 pass
56 57
57 58 v = self.ui.configrevlog()
58 59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 61 fl = v.get('flags', None)
61 62 flags = 0
62 63 if fl != None:
63 64 for x in fl.split():
64 65 flags |= revlog.flagstr(x)
65 66 elif self.revlogv1:
66 67 flags = revlog.REVLOG_DEFAULT_FLAGS
67 68
68 69 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.manifest = manifest.manifest(self.sopener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
71 72
72 73 # the changelog might not have the inline index flag
73 74 # on. If the format of the changelog is the same as found in
74 75 # .hgrc, apply any flags found in the .hgrc as well.
75 76 # Otherwise, just version from the changelog
76 77 v = self.changelog.version
77 78 if v == self.revlogversion:
78 79 v |= flags
79 80 self.revlogversion = v
80 81
81 82 self.tagscache = None
82 83 self.branchcache = None
83 84 self.nodetagscache = None
84 85 self.encodepats = None
85 86 self.decodepats = None
86 87 self.transhandle = None
87 88
88 89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 90
90 91 def url(self):
91 92 return 'file:' + self.root
92 93
93 94 def hook(self, name, throw=False, **args):
94 95 def callhook(hname, funcname):
95 96 '''call python hook. hook is callable object, looked up as
96 97 name in python module. if callable returns "true", hook
97 98 fails, else passes. if hook raises exception, treated as
98 99 hook failure. exception propagates if throw is "true".
99 100
100 101 reason for "true" meaning "hook failed" is so that
101 102 unmodified commands (e.g. mercurial.commands.update) can
102 103 be run as hooks without wrappers to convert return values.'''
103 104
104 105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 106 d = funcname.rfind('.')
106 107 if d == -1:
107 108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 109 % (hname, funcname))
109 110 modname = funcname[:d]
110 111 try:
111 112 obj = __import__(modname)
112 113 except ImportError:
113 114 try:
114 115 # extensions are loaded with hgext_ prefix
115 116 obj = __import__("hgext_%s" % modname)
116 117 except ImportError:
117 118 raise util.Abort(_('%s hook is invalid '
118 119 '(import of "%s" failed)') %
119 120 (hname, modname))
120 121 try:
121 122 for p in funcname.split('.')[1:]:
122 123 obj = getattr(obj, p)
123 124 except AttributeError, err:
124 125 raise util.Abort(_('%s hook is invalid '
125 126 '("%s" is not defined)') %
126 127 (hname, funcname))
127 128 if not callable(obj):
128 129 raise util.Abort(_('%s hook is invalid '
129 130 '("%s" is not callable)') %
130 131 (hname, funcname))
131 132 try:
132 133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 134 except (KeyboardInterrupt, util.SignalInterrupt):
134 135 raise
135 136 except Exception, exc:
136 137 if isinstance(exc, util.Abort):
137 138 self.ui.warn(_('error: %s hook failed: %s\n') %
138 139 (hname, exc.args[0]))
139 140 else:
140 141 self.ui.warn(_('error: %s hook raised an exception: '
141 142 '%s\n') % (hname, exc))
142 143 if throw:
143 144 raise
144 145 self.ui.print_exc()
145 146 return True
146 147 if r:
147 148 if throw:
148 149 raise util.Abort(_('%s hook failed') % hname)
149 150 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 151 return r
151 152
152 153 def runhook(name, cmd):
153 154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 156 r = util.system(cmd, environ=env, cwd=self.root)
156 157 if r:
157 158 desc, r = util.explain_exit(r)
158 159 if throw:
159 160 raise util.Abort(_('%s hook %s') % (name, desc))
160 161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 162 return r
162 163
163 164 r = False
164 165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 166 if hname.split(".", 1)[0] == name and cmd]
166 167 hooks.sort()
167 168 for hname, cmd in hooks:
168 169 if cmd.startswith('python:'):
169 170 r = callhook(hname, cmd[7:].strip()) or r
170 171 else:
171 172 r = runhook(hname, cmd) or r
172 173 return r
173 174
174 175 tag_disallowed = ':\r\n'
175 176
176 177 def tag(self, name, node, message, local, user, date):
177 178 '''tag a revision with a symbolic name.
178 179
179 180 if local is True, the tag is stored in a per-repository file.
180 181 otherwise, it is stored in the .hgtags file, and a new
181 182 changeset is committed with the change.
182 183
183 184 keyword arguments:
184 185
185 186 local: whether to store tag in non-version-controlled file
186 187 (default False)
187 188
188 189 message: commit message to use if committing
189 190
190 191 user: name of user to use if committing
191 192
192 193 date: date tuple to use if committing'''
193 194
194 195 for c in self.tag_disallowed:
195 196 if c in name:
196 197 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 198
198 199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 200
200 201 if local:
201 202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 203 self.hook('tag', node=hex(node), tag=name, local=local)
203 204 return
204 205
205 206 for x in self.status()[:5]:
206 207 if '.hgtags' in x:
207 208 raise util.Abort(_('working copy of .hgtags is changed '
208 209 '(please commit .hgtags manually)'))
209 210
210 211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 212 if self.dirstate.state('.hgtags') == '?':
212 213 self.add(['.hgtags'])
213 214
214 215 self.commit(['.hgtags'], message, user, date)
215 216 self.hook('tag', node=hex(node), tag=name, local=local)
216 217
217 218 def tags(self):
218 219 '''return a mapping of tag to node'''
219 220 if not self.tagscache:
220 221 self.tagscache = {}
221 222
222 223 def parsetag(line, context):
223 224 if not line:
224 225 return
225 226 s = l.split(" ", 1)
226 227 if len(s) != 2:
227 228 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 229 return
229 230 node, key = s
230 231 key = key.strip()
231 232 try:
232 233 bin_n = bin(node)
233 234 except TypeError:
234 235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 236 (context, node))
236 237 return
237 238 if bin_n not in self.changelog.nodemap:
238 239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 240 (context, key))
240 241 return
241 242 self.tagscache[key] = bin_n
242 243
243 244 # read the tags file from each head, ending with the tip,
244 245 # and add each tag found to the map, with "newer" ones
245 246 # taking precedence
246 247 heads = self.heads()
247 248 heads.reverse()
248 249 seen = {}
249 250 for node in heads:
250 251 f = self.filectx('.hgtags', node)
251 252 if not f or f.filerev() in seen: continue
252 253 seen[f.filerev()] = 1
253 254 count = 0
254 255 for l in f.data().splitlines():
255 256 count += 1
256 257 parsetag(l, _("%s, line %d") % (str(f), count))
257 258
258 259 try:
259 260 f = self.opener("localtags")
260 261 count = 0
261 262 for l in f:
262 263 count += 1
263 264 parsetag(l, _("localtags, line %d") % count)
264 265 except IOError:
265 266 pass
266 267
267 268 self.tagscache['tip'] = self.changelog.tip()
268 269
269 270 return self.tagscache
270 271
271 272 def tagslist(self):
272 273 '''return a list of tags ordered by revision'''
273 274 l = []
274 275 for t, n in self.tags().items():
275 276 try:
276 277 r = self.changelog.rev(n)
277 278 except:
278 279 r = -2 # sort to the beginning of the list if unknown
279 280 l.append((r, t, n))
280 281 l.sort()
281 282 return [(t, n) for r, t, n in l]
282 283
283 284 def nodetags(self, node):
284 285 '''return the tags associated with a node'''
285 286 if not self.nodetagscache:
286 287 self.nodetagscache = {}
287 288 for t, n in self.tags().items():
288 289 self.nodetagscache.setdefault(n, []).append(t)
289 290 return self.nodetagscache.get(node, [])
290 291
291 292 def branchtags(self):
292 293 if self.branchcache != None:
293 294 return self.branchcache
294 295
295 296 self.branchcache = {} # avoid recursion in changectx
296 297
297 298 try:
298 299 f = self.opener("branches.cache")
299 300 last, lrev = f.readline().rstrip().split(" ", 1)
300 301 last, lrev = bin(last), int(lrev)
301 302 if (lrev < self.changelog.count() and
302 303 self.changelog.node(lrev) == last): # sanity check
303 304 for l in f:
304 305 node, label = l.rstrip().split(" ", 1)
305 306 self.branchcache[label] = bin(node)
306 307 else: # invalidate the cache
307 308 last, lrev = nullid, -1
308 309 f.close()
309 310 except IOError:
310 311 last, lrev = nullid, -1
311 312
312 313 tip = self.changelog.count() - 1
313 314 if lrev != tip:
314 315 for r in xrange(lrev + 1, tip + 1):
315 316 c = self.changectx(r)
316 317 b = c.branch()
317 318 if b:
318 319 self.branchcache[b] = c.node()
319 320 self._writebranchcache()
320 321
321 322 return self.branchcache
322 323
323 324 def _writebranchcache(self):
324 325 try:
325 326 f = self.opener("branches.cache", "w")
326 327 t = self.changelog.tip()
327 328 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
328 329 for label, node in self.branchcache.iteritems():
329 330 f.write("%s %s\n" % (hex(node), label))
330 331 except IOError:
331 332 pass
332 333
333 334 def lookup(self, key):
334 335 if key == '.':
335 336 key = self.dirstate.parents()[0]
336 337 if key == nullid:
337 338 raise repo.RepoError(_("no revision checked out"))
338 339 n = self.changelog._match(key)
339 340 if n:
340 341 return n
341 342 if key in self.tags():
342 343 return self.tags()[key]
343 344 if key in self.branchtags():
344 345 return self.branchtags()[key]
345 346 n = self.changelog._partialmatch(key)
346 347 if n:
347 348 return n
348 349 raise repo.RepoError(_("unknown revision '%s'") % key)
349 350
350 351 def dev(self):
351 352 return os.lstat(self.path).st_dev
352 353
353 354 def local(self):
354 355 return True
355 356
356 357 def join(self, f):
357 358 return os.path.join(self.path, f)
358 359
360 def sjoin(self, f):
361 return os.path.join(self.path, f)
362
359 363 def wjoin(self, f):
360 364 return os.path.join(self.root, f)
361 365
362 366 def file(self, f):
363 367 if f[0] == '/':
364 368 f = f[1:]
365 return filelog.filelog(self.opener, f, self.revlogversion)
369 return filelog.filelog(self.sopener, f, self.revlogversion)
366 370
367 371 def changectx(self, changeid=None):
368 372 return context.changectx(self, changeid)
369 373
370 374 def workingctx(self):
371 375 return context.workingctx(self)
372 376
373 377 def parents(self, changeid=None):
374 378 '''
375 379 get list of changectxs for parents of changeid or working directory
376 380 '''
377 381 if changeid is None:
378 382 pl = self.dirstate.parents()
379 383 else:
380 384 n = self.changelog.lookup(changeid)
381 385 pl = self.changelog.parents(n)
382 386 if pl[1] == nullid:
383 387 return [self.changectx(pl[0])]
384 388 return [self.changectx(pl[0]), self.changectx(pl[1])]
385 389
386 390 def filectx(self, path, changeid=None, fileid=None):
387 391 """changeid can be a changeset revision, node, or tag.
388 392 fileid can be a file revision or node."""
389 393 return context.filectx(self, path, changeid, fileid)
390 394
391 395 def getcwd(self):
392 396 return self.dirstate.getcwd()
393 397
394 398 def wfile(self, f, mode='r'):
395 399 return self.wopener(f, mode)
396 400
397 401 def wread(self, filename):
398 402 if self.encodepats == None:
399 403 l = []
400 404 for pat, cmd in self.ui.configitems("encode"):
401 405 mf = util.matcher(self.root, "", [pat], [], [])[1]
402 406 l.append((mf, cmd))
403 407 self.encodepats = l
404 408
405 409 data = self.wopener(filename, 'r').read()
406 410
407 411 for mf, cmd in self.encodepats:
408 412 if mf(filename):
409 413 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
410 414 data = util.filter(data, cmd)
411 415 break
412 416
413 417 return data
414 418
415 419 def wwrite(self, filename, data, fd=None):
416 420 if self.decodepats == None:
417 421 l = []
418 422 for pat, cmd in self.ui.configitems("decode"):
419 423 mf = util.matcher(self.root, "", [pat], [], [])[1]
420 424 l.append((mf, cmd))
421 425 self.decodepats = l
422 426
423 427 for mf, cmd in self.decodepats:
424 428 if mf(filename):
425 429 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
426 430 data = util.filter(data, cmd)
427 431 break
428 432
429 433 if fd:
430 434 return fd.write(data)
431 435 return self.wopener(filename, 'w').write(data)
432 436
433 437 def transaction(self):
434 438 tr = self.transhandle
435 439 if tr != None and tr.running():
436 440 return tr.nest()
437 441
438 442 # save dirstate for rollback
439 443 try:
440 444 ds = self.opener("dirstate").read()
441 445 except IOError:
442 446 ds = ""
443 447 self.opener("journal.dirstate", "w").write(ds)
444 448
445 tr = transaction.transaction(self.ui.warn, self.opener,
446 self.join("journal"),
449 tr = transaction.transaction(self.ui.warn, self.sopener,
450 self.sjoin("journal"),
447 451 aftertrans(self.path))
448 452 self.transhandle = tr
449 453 return tr
450 454
451 455 def recover(self):
452 456 l = self.lock()
453 if os.path.exists(self.join("journal")):
457 if os.path.exists(self.sjoin("journal")):
454 458 self.ui.status(_("rolling back interrupted transaction\n"))
455 transaction.rollback(self.opener, self.join("journal"))
459 transaction.rollback(self.sopener, self.sjoin("journal"))
456 460 self.reload()
457 461 return True
458 462 else:
459 463 self.ui.warn(_("no interrupted transaction available\n"))
460 464 return False
461 465
462 466 def rollback(self, wlock=None):
463 467 if not wlock:
464 468 wlock = self.wlock()
465 469 l = self.lock()
466 if os.path.exists(self.join("undo")):
470 if os.path.exists(self.sjoin("undo")):
467 471 self.ui.status(_("rolling back last transaction\n"))
468 transaction.rollback(self.opener, self.join("undo"))
472 transaction.rollback(self.sopener, self.sjoin("undo"))
469 473 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
470 474 self.reload()
471 475 self.wreload()
472 476 else:
473 477 self.ui.warn(_("no rollback information available\n"))
474 478
475 479 def wreload(self):
476 480 self.dirstate.read()
477 481
478 482 def reload(self):
479 483 self.changelog.load()
480 484 self.manifest.load()
481 485 self.tagscache = None
482 486 self.nodetagscache = None
483 487
484 488 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
485 489 desc=None):
486 490 try:
487 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
491 l = lock.lock(lockname, 0, releasefn, desc=desc)
488 492 except lock.LockHeld, inst:
489 493 if not wait:
490 494 raise
491 495 self.ui.warn(_("waiting for lock on %s held by %s\n") %
492 496 (desc, inst.args[0]))
493 497 # default to 600 seconds timeout
494 l = lock.lock(self.join(lockname),
495 int(self.ui.config("ui", "timeout") or 600),
498 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
496 499 releasefn, desc=desc)
497 500 if acquirefn:
498 501 acquirefn()
499 502 return l
500 503
501 504 def lock(self, wait=1):
502 return self.do_lock("lock", wait, acquirefn=self.reload,
505 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
503 506 desc=_('repository %s') % self.origroot)
504 507
505 508 def wlock(self, wait=1):
506 return self.do_lock("wlock", wait, self.dirstate.write,
509 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
507 510 self.wreload,
508 511 desc=_('working directory of %s') % self.origroot)
509 512
510 513 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
511 514 """
512 515 commit an individual file as part of a larger transaction
513 516 """
514 517
515 518 t = self.wread(fn)
516 519 fl = self.file(fn)
517 520 fp1 = manifest1.get(fn, nullid)
518 521 fp2 = manifest2.get(fn, nullid)
519 522
520 523 meta = {}
521 524 cp = self.dirstate.copied(fn)
522 525 if cp:
523 526 meta["copy"] = cp
524 527 if not manifest2: # not a branch merge
525 528 meta["copyrev"] = hex(manifest1.get(cp, nullid))
526 529 fp2 = nullid
527 530 elif fp2 != nullid: # copied on remote side
528 531 meta["copyrev"] = hex(manifest1.get(cp, nullid))
529 532 else: # copied on local side, reversed
530 533 meta["copyrev"] = hex(manifest2.get(cp))
531 534 fp2 = nullid
532 535 self.ui.debug(_(" %s: copy %s:%s\n") %
533 536 (fn, cp, meta["copyrev"]))
534 537 fp1 = nullid
535 538 elif fp2 != nullid:
536 539 # is one parent an ancestor of the other?
537 540 fpa = fl.ancestor(fp1, fp2)
538 541 if fpa == fp1:
539 542 fp1, fp2 = fp2, nullid
540 543 elif fpa == fp2:
541 544 fp2 = nullid
542 545
543 546 # is the file unmodified from the parent? report existing entry
544 547 if fp2 == nullid and not fl.cmp(fp1, t):
545 548 return fp1
546 549
547 550 changelist.append(fn)
548 551 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
549 552
550 553 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
551 554 orig_parent = self.dirstate.parents()[0] or nullid
552 555 p1 = p1 or self.dirstate.parents()[0] or nullid
553 556 p2 = p2 or self.dirstate.parents()[1] or nullid
554 557 c1 = self.changelog.read(p1)
555 558 c2 = self.changelog.read(p2)
556 559 m1 = self.manifest.read(c1[0]).copy()
557 560 m2 = self.manifest.read(c2[0])
558 561 changed = []
559 562 removed = []
560 563
561 564 if orig_parent == p1:
562 565 update_dirstate = 1
563 566 else:
564 567 update_dirstate = 0
565 568
566 569 if not wlock:
567 570 wlock = self.wlock()
568 571 l = self.lock()
569 572 tr = self.transaction()
570 573 linkrev = self.changelog.count()
571 574 for f in files:
572 575 try:
573 576 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
574 577 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
575 578 except IOError:
576 579 try:
577 580 del m1[f]
578 581 if update_dirstate:
579 582 self.dirstate.forget([f])
580 583 removed.append(f)
581 584 except:
582 585 # deleted from p2?
583 586 pass
584 587
585 588 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
586 589 user = user or self.ui.username()
587 590 n = self.changelog.add(mnode, changed + removed, text,
588 591 tr, p1, p2, user, date)
589 592 tr.close()
590 593 if update_dirstate:
591 594 self.dirstate.setparents(n, nullid)
592 595
593 596 def commit(self, files=None, text="", user=None, date=None,
594 597 match=util.always, force=False, lock=None, wlock=None,
595 598 force_editor=False):
596 599 commit = []
597 600 remove = []
598 601 changed = []
599 602
600 603 if files:
601 604 for f in files:
602 605 s = self.dirstate.state(f)
603 606 if s in 'nmai':
604 607 commit.append(f)
605 608 elif s == 'r':
606 609 remove.append(f)
607 610 else:
608 611 self.ui.warn(_("%s not tracked!\n") % f)
609 612 else:
610 613 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
611 614 commit = modified + added
612 615 remove = removed
613 616
614 617 p1, p2 = self.dirstate.parents()
615 618 c1 = self.changelog.read(p1)
616 619 c2 = self.changelog.read(p2)
617 620 m1 = self.manifest.read(c1[0]).copy()
618 621 m2 = self.manifest.read(c2[0])
619 622
620 623 branchname = self.workingctx().branch()
621 624 oldname = c1[5].get("branch", "")
622 625
623 626 if not commit and not remove and not force and p2 == nullid and \
624 627 branchname == oldname:
625 628 self.ui.status(_("nothing changed\n"))
626 629 return None
627 630
628 631 xp1 = hex(p1)
629 632 if p2 == nullid: xp2 = ''
630 633 else: xp2 = hex(p2)
631 634
632 635 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
633 636
634 637 if not wlock:
635 638 wlock = self.wlock()
636 639 if not lock:
637 640 lock = self.lock()
638 641 tr = self.transaction()
639 642
640 643 # check in files
641 644 new = {}
642 645 linkrev = self.changelog.count()
643 646 commit.sort()
644 647 for f in commit:
645 648 self.ui.note(f + "\n")
646 649 try:
647 650 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
648 651 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
649 652 except IOError:
650 653 self.ui.warn(_("trouble committing %s!\n") % f)
651 654 raise
652 655
653 656 # update manifest
654 657 m1.update(new)
655 658 for f in remove:
656 659 if f in m1:
657 660 del m1[f]
658 661 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
659 662
660 663 # add changeset
661 664 new = new.keys()
662 665 new.sort()
663 666
664 667 user = user or self.ui.username()
665 668 if not text or force_editor:
666 669 edittext = []
667 670 if text:
668 671 edittext.append(text)
669 672 edittext.append("")
670 673 if p2 != nullid:
671 674 edittext.append("HG: branch merge")
672 675 edittext.extend(["HG: changed %s" % f for f in changed])
673 676 edittext.extend(["HG: removed %s" % f for f in remove])
674 677 if not changed and not remove:
675 678 edittext.append("HG: no files changed")
676 679 edittext.append("")
677 680 # run editor in the repository root
678 681 olddir = os.getcwd()
679 682 os.chdir(self.root)
680 683 text = self.ui.edit("\n".join(edittext), user)
681 684 os.chdir(olddir)
682 685
683 686 lines = [line.rstrip() for line in text.rstrip().splitlines()]
684 687 while lines and not lines[0]:
685 688 del lines[0]
686 689 if not lines:
687 690 return None
688 691 text = '\n'.join(lines)
689 692 extra = {}
690 693 if branchname:
691 694 extra["branch"] = branchname
692 695 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
693 696 user, date, extra)
694 697 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
695 698 parent2=xp2)
696 699 tr.close()
697 700
698 701 self.dirstate.setparents(n)
699 702 self.dirstate.update(new, "n")
700 703 self.dirstate.forget(remove)
701 704
702 705 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
703 706 return n
704 707
705 708 def walk(self, node=None, files=[], match=util.always, badmatch=None):
706 709 if node:
707 710 fdict = dict.fromkeys(files)
708 711 for fn in self.manifest.read(self.changelog.read(node)[0]):
709 712 for ffn in fdict:
710 713 # match if the file is the exact name or a directory
711 714 if ffn == fn or fn.startswith("%s/" % ffn):
712 715 del fdict[ffn]
713 716 break
714 717 if match(fn):
715 718 yield 'm', fn
716 719 for fn in fdict:
717 720 if badmatch and badmatch(fn):
718 721 if match(fn):
719 722 yield 'b', fn
720 723 else:
721 724 self.ui.warn(_('%s: No such file in rev %s\n') % (
722 725 util.pathto(self.getcwd(), fn), short(node)))
723 726 else:
724 727 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
725 728 yield src, fn
726 729
727 730 def status(self, node1=None, node2=None, files=[], match=util.always,
728 731 wlock=None, list_ignored=False, list_clean=False):
729 732 """return status of files between two nodes or node and working directory
730 733
731 734 If node1 is None, use the first dirstate parent instead.
732 735 If node2 is None, compare node1 with working directory.
733 736 """
734 737
735 738 def fcmp(fn, mf):
736 739 t1 = self.wread(fn)
737 740 return self.file(fn).cmp(mf.get(fn, nullid), t1)
738 741
739 742 def mfmatches(node):
740 743 change = self.changelog.read(node)
741 744 mf = self.manifest.read(change[0]).copy()
742 745 for fn in mf.keys():
743 746 if not match(fn):
744 747 del mf[fn]
745 748 return mf
746 749
747 750 modified, added, removed, deleted, unknown = [], [], [], [], []
748 751 ignored, clean = [], []
749 752
750 753 compareworking = False
751 754 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
752 755 compareworking = True
753 756
754 757 if not compareworking:
755 758 # read the manifest from node1 before the manifest from node2,
756 759 # so that we'll hit the manifest cache if we're going through
757 760 # all the revisions in parent->child order.
758 761 mf1 = mfmatches(node1)
759 762
760 763 # are we comparing the working directory?
761 764 if not node2:
762 765 if not wlock:
763 766 try:
764 767 wlock = self.wlock(wait=0)
765 768 except lock.LockException:
766 769 wlock = None
767 770 (lookup, modified, added, removed, deleted, unknown,
768 771 ignored, clean) = self.dirstate.status(files, match,
769 772 list_ignored, list_clean)
770 773
771 774 # are we comparing working dir against its parent?
772 775 if compareworking:
773 776 if lookup:
774 777 # do a full compare of any files that might have changed
775 778 mf2 = mfmatches(self.dirstate.parents()[0])
776 779 for f in lookup:
777 780 if fcmp(f, mf2):
778 781 modified.append(f)
779 782 else:
780 783 clean.append(f)
781 784 if wlock is not None:
782 785 self.dirstate.update([f], "n")
783 786 else:
784 787 # we are comparing working dir against non-parent
785 788 # generate a pseudo-manifest for the working dir
786 789 # XXX: create it in dirstate.py ?
787 790 mf2 = mfmatches(self.dirstate.parents()[0])
788 791 for f in lookup + modified + added:
789 792 mf2[f] = ""
790 793 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
791 794 for f in removed:
792 795 if f in mf2:
793 796 del mf2[f]
794 797 else:
795 798 # we are comparing two revisions
796 799 mf2 = mfmatches(node2)
797 800
798 801 if not compareworking:
799 802 # flush lists from dirstate before comparing manifests
800 803 modified, added, clean = [], [], []
801 804
802 805 # make sure to sort the files so we talk to the disk in a
803 806 # reasonable order
804 807 mf2keys = mf2.keys()
805 808 mf2keys.sort()
806 809 for fn in mf2keys:
807 810 if mf1.has_key(fn):
808 811 if mf1.flags(fn) != mf2.flags(fn) or \
809 812 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
810 813 modified.append(fn)
811 814 elif list_clean:
812 815 clean.append(fn)
813 816 del mf1[fn]
814 817 else:
815 818 added.append(fn)
816 819
817 820 removed = mf1.keys()
818 821
819 822 # sort and return results:
820 823 for l in modified, added, removed, deleted, unknown, ignored, clean:
821 824 l.sort()
822 825 return (modified, added, removed, deleted, unknown, ignored, clean)
823 826
824 827 def add(self, list, wlock=None):
825 828 if not wlock:
826 829 wlock = self.wlock()
827 830 for f in list:
828 831 p = self.wjoin(f)
829 832 if not os.path.exists(p):
830 833 self.ui.warn(_("%s does not exist!\n") % f)
831 834 elif not os.path.isfile(p):
832 835 self.ui.warn(_("%s not added: only files supported currently\n")
833 836 % f)
834 837 elif self.dirstate.state(f) in 'an':
835 838 self.ui.warn(_("%s already tracked!\n") % f)
836 839 else:
837 840 self.dirstate.update([f], "a")
838 841
839 842 def forget(self, list, wlock=None):
840 843 if not wlock:
841 844 wlock = self.wlock()
842 845 for f in list:
843 846 if self.dirstate.state(f) not in 'ai':
844 847 self.ui.warn(_("%s not added!\n") % f)
845 848 else:
846 849 self.dirstate.forget([f])
847 850
848 851 def remove(self, list, unlink=False, wlock=None):
849 852 if unlink:
850 853 for f in list:
851 854 try:
852 855 util.unlink(self.wjoin(f))
853 856 except OSError, inst:
854 857 if inst.errno != errno.ENOENT:
855 858 raise
856 859 if not wlock:
857 860 wlock = self.wlock()
858 861 for f in list:
859 862 p = self.wjoin(f)
860 863 if os.path.exists(p):
861 864 self.ui.warn(_("%s still exists!\n") % f)
862 865 elif self.dirstate.state(f) == 'a':
863 866 self.dirstate.forget([f])
864 867 elif f not in self.dirstate:
865 868 self.ui.warn(_("%s not tracked!\n") % f)
866 869 else:
867 870 self.dirstate.update([f], "r")
868 871
869 872 def undelete(self, list, wlock=None):
870 873 p = self.dirstate.parents()[0]
871 874 mn = self.changelog.read(p)[0]
872 875 m = self.manifest.read(mn)
873 876 if not wlock:
874 877 wlock = self.wlock()
875 878 for f in list:
876 879 if self.dirstate.state(f) not in "r":
877 880 self.ui.warn("%s not removed!\n" % f)
878 881 else:
879 882 t = self.file(f).read(m[f])
880 883 self.wwrite(f, t)
881 884 util.set_exec(self.wjoin(f), m.execf(f))
882 885 self.dirstate.update([f], "n")
883 886
884 887 def copy(self, source, dest, wlock=None):
885 888 p = self.wjoin(dest)
886 889 if not os.path.exists(p):
887 890 self.ui.warn(_("%s does not exist!\n") % dest)
888 891 elif not os.path.isfile(p):
889 892 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
890 893 else:
891 894 if not wlock:
892 895 wlock = self.wlock()
893 896 if self.dirstate.state(dest) == '?':
894 897 self.dirstate.update([dest], "a")
895 898 self.dirstate.copy(source, dest)
896 899
897 900 def heads(self, start=None):
898 901 heads = self.changelog.heads(start)
899 902 # sort the output in rev descending order
900 903 heads = [(-self.changelog.rev(h), h) for h in heads]
901 904 heads.sort()
902 905 return [n for (r, n) in heads]
903 906
904 907 # branchlookup returns a dict giving a list of branches for
905 908 # each head. A branch is defined as the tag of a node or
906 909 # the branch of the node's parents. If a node has multiple
907 910 # branch tags, tags are eliminated if they are visible from other
908 911 # branch tags.
909 912 #
910 913 # So, for this graph: a->b->c->d->e
911 914 # \ /
912 915 # aa -----/
913 916 # a has tag 2.6.12
914 917 # d has tag 2.6.13
915 918 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
916 919 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
917 920 # from the list.
918 921 #
919 922 # It is possible that more than one head will have the same branch tag.
920 923 # callers need to check the result for multiple heads under the same
921 924 # branch tag if that is a problem for them (ie checkout of a specific
922 925 # branch).
923 926 #
924 927 # passing in a specific branch will limit the depth of the search
925 928 # through the parents. It won't limit the branches returned in the
926 929 # result though.
927 930 def branchlookup(self, heads=None, branch=None):
928 931 if not heads:
929 932 heads = self.heads()
930 933 headt = [ h for h in heads ]
931 934 chlog = self.changelog
932 935 branches = {}
933 936 merges = []
934 937 seenmerge = {}
935 938
936 939 # traverse the tree once for each head, recording in the branches
937 940 # dict which tags are visible from this head. The branches
938 941 # dict also records which tags are visible from each tag
939 942 # while we traverse.
940 943 while headt or merges:
941 944 if merges:
942 945 n, found = merges.pop()
943 946 visit = [n]
944 947 else:
945 948 h = headt.pop()
946 949 visit = [h]
947 950 found = [h]
948 951 seen = {}
949 952 while visit:
950 953 n = visit.pop()
951 954 if n in seen:
952 955 continue
953 956 pp = chlog.parents(n)
954 957 tags = self.nodetags(n)
955 958 if tags:
956 959 for x in tags:
957 960 if x == 'tip':
958 961 continue
959 962 for f in found:
960 963 branches.setdefault(f, {})[n] = 1
961 964 branches.setdefault(n, {})[n] = 1
962 965 break
963 966 if n not in found:
964 967 found.append(n)
965 968 if branch in tags:
966 969 continue
967 970 seen[n] = 1
968 971 if pp[1] != nullid and n not in seenmerge:
969 972 merges.append((pp[1], [x for x in found]))
970 973 seenmerge[n] = 1
971 974 if pp[0] != nullid:
972 975 visit.append(pp[0])
973 976 # traverse the branches dict, eliminating branch tags from each
974 977 # head that are visible from another branch tag for that head.
975 978 out = {}
976 979 viscache = {}
977 980 for h in heads:
978 981 def visible(node):
979 982 if node in viscache:
980 983 return viscache[node]
981 984 ret = {}
982 985 visit = [node]
983 986 while visit:
984 987 x = visit.pop()
985 988 if x in viscache:
986 989 ret.update(viscache[x])
987 990 elif x not in ret:
988 991 ret[x] = 1
989 992 if x in branches:
990 993 visit[len(visit):] = branches[x].keys()
991 994 viscache[node] = ret
992 995 return ret
993 996 if h not in branches:
994 997 continue
995 998 # O(n^2), but somewhat limited. This only searches the
996 999 # tags visible from a specific head, not all the tags in the
997 1000 # whole repo.
998 1001 for b in branches[h]:
999 1002 vis = False
1000 1003 for bb in branches[h].keys():
1001 1004 if b != bb:
1002 1005 if b in visible(bb):
1003 1006 vis = True
1004 1007 break
1005 1008 if not vis:
1006 1009 l = out.setdefault(h, [])
1007 1010 l[len(l):] = self.nodetags(b)
1008 1011 return out
1009 1012
1010 1013 def branches(self, nodes):
1011 1014 if not nodes:
1012 1015 nodes = [self.changelog.tip()]
1013 1016 b = []
1014 1017 for n in nodes:
1015 1018 t = n
1016 1019 while 1:
1017 1020 p = self.changelog.parents(n)
1018 1021 if p[1] != nullid or p[0] == nullid:
1019 1022 b.append((t, n, p[0], p[1]))
1020 1023 break
1021 1024 n = p[0]
1022 1025 return b
1023 1026
1024 1027 def between(self, pairs):
1025 1028 r = []
1026 1029
1027 1030 for top, bottom in pairs:
1028 1031 n, l, i = top, [], 0
1029 1032 f = 1
1030 1033
1031 1034 while n != bottom:
1032 1035 p = self.changelog.parents(n)[0]
1033 1036 if i == f:
1034 1037 l.append(n)
1035 1038 f = f * 2
1036 1039 n = p
1037 1040 i += 1
1038 1041
1039 1042 r.append(l)
1040 1043
1041 1044 return r
1042 1045
1043 1046 def findincoming(self, remote, base=None, heads=None, force=False):
1044 1047 """Return list of roots of the subsets of missing nodes from remote
1045 1048
1046 1049 If base dict is specified, assume that these nodes and their parents
1047 1050 exist on the remote side and that no child of a node of base exists
1048 1051 in both remote and self.
1049 1052 Furthermore base will be updated to include the nodes that exists
1050 1053 in self and remote but no children exists in self and remote.
1051 1054 If a list of heads is specified, return only nodes which are heads
1052 1055 or ancestors of these heads.
1053 1056
1054 1057 All the ancestors of base are in self and in remote.
1055 1058 All the descendants of the list returned are missing in self.
1056 1059 (and so we know that the rest of the nodes are missing in remote, see
1057 1060 outgoing)
1058 1061 """
1059 1062 m = self.changelog.nodemap
1060 1063 search = []
1061 1064 fetch = {}
1062 1065 seen = {}
1063 1066 seenbranch = {}
1064 1067 if base == None:
1065 1068 base = {}
1066 1069
1067 1070 if not heads:
1068 1071 heads = remote.heads()
1069 1072
1070 1073 if self.changelog.tip() == nullid:
1071 1074 base[nullid] = 1
1072 1075 if heads != [nullid]:
1073 1076 return [nullid]
1074 1077 return []
1075 1078
1076 1079 # assume we're closer to the tip than the root
1077 1080 # and start by examining the heads
1078 1081 self.ui.status(_("searching for changes\n"))
1079 1082
1080 1083 unknown = []
1081 1084 for h in heads:
1082 1085 if h not in m:
1083 1086 unknown.append(h)
1084 1087 else:
1085 1088 base[h] = 1
1086 1089
1087 1090 if not unknown:
1088 1091 return []
1089 1092
1090 1093 req = dict.fromkeys(unknown)
1091 1094 reqcnt = 0
1092 1095
1093 1096 # search through remote branches
1094 1097 # a 'branch' here is a linear segment of history, with four parts:
1095 1098 # head, root, first parent, second parent
1096 1099 # (a branch always has two parents (or none) by definition)
1097 1100 unknown = remote.branches(unknown)
1098 1101 while unknown:
1099 1102 r = []
1100 1103 while unknown:
1101 1104 n = unknown.pop(0)
1102 1105 if n[0] in seen:
1103 1106 continue
1104 1107
1105 1108 self.ui.debug(_("examining %s:%s\n")
1106 1109 % (short(n[0]), short(n[1])))
1107 1110 if n[0] == nullid: # found the end of the branch
1108 1111 pass
1109 1112 elif n in seenbranch:
1110 1113 self.ui.debug(_("branch already found\n"))
1111 1114 continue
1112 1115 elif n[1] and n[1] in m: # do we know the base?
1113 1116 self.ui.debug(_("found incomplete branch %s:%s\n")
1114 1117 % (short(n[0]), short(n[1])))
1115 1118 search.append(n) # schedule branch range for scanning
1116 1119 seenbranch[n] = 1
1117 1120 else:
1118 1121 if n[1] not in seen and n[1] not in fetch:
1119 1122 if n[2] in m and n[3] in m:
1120 1123 self.ui.debug(_("found new changeset %s\n") %
1121 1124 short(n[1]))
1122 1125 fetch[n[1]] = 1 # earliest unknown
1123 1126 for p in n[2:4]:
1124 1127 if p in m:
1125 1128 base[p] = 1 # latest known
1126 1129
1127 1130 for p in n[2:4]:
1128 1131 if p not in req and p not in m:
1129 1132 r.append(p)
1130 1133 req[p] = 1
1131 1134 seen[n[0]] = 1
1132 1135
1133 1136 if r:
1134 1137 reqcnt += 1
1135 1138 self.ui.debug(_("request %d: %s\n") %
1136 1139 (reqcnt, " ".join(map(short, r))))
1137 1140 for p in range(0, len(r), 10):
1138 1141 for b in remote.branches(r[p:p+10]):
1139 1142 self.ui.debug(_("received %s:%s\n") %
1140 1143 (short(b[0]), short(b[1])))
1141 1144 unknown.append(b)
1142 1145
1143 1146 # do binary search on the branches we found
1144 1147 while search:
1145 1148 n = search.pop(0)
1146 1149 reqcnt += 1
1147 1150 l = remote.between([(n[0], n[1])])[0]
1148 1151 l.append(n[1])
1149 1152 p = n[0]
1150 1153 f = 1
1151 1154 for i in l:
1152 1155 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1153 1156 if i in m:
1154 1157 if f <= 2:
1155 1158 self.ui.debug(_("found new branch changeset %s\n") %
1156 1159 short(p))
1157 1160 fetch[p] = 1
1158 1161 base[i] = 1
1159 1162 else:
1160 1163 self.ui.debug(_("narrowed branch search to %s:%s\n")
1161 1164 % (short(p), short(i)))
1162 1165 search.append((p, i))
1163 1166 break
1164 1167 p, f = i, f * 2
1165 1168
1166 1169 # sanity check our fetch list
1167 1170 for f in fetch.keys():
1168 1171 if f in m:
1169 1172 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1170 1173
1171 1174 if base.keys() == [nullid]:
1172 1175 if force:
1173 1176 self.ui.warn(_("warning: repository is unrelated\n"))
1174 1177 else:
1175 1178 raise util.Abort(_("repository is unrelated"))
1176 1179
1177 1180 self.ui.debug(_("found new changesets starting at ") +
1178 1181 " ".join([short(f) for f in fetch]) + "\n")
1179 1182
1180 1183 self.ui.debug(_("%d total queries\n") % reqcnt)
1181 1184
1182 1185 return fetch.keys()
1183 1186
1184 1187 def findoutgoing(self, remote, base=None, heads=None, force=False):
1185 1188 """Return list of nodes that are roots of subsets not in remote
1186 1189
1187 1190 If base dict is specified, assume that these nodes and their parents
1188 1191 exist on the remote side.
1189 1192 If a list of heads is specified, return only nodes which are heads
1190 1193 or ancestors of these heads, and return a second element which
1191 1194 contains all remote heads which get new children.
1192 1195 """
1193 1196 if base == None:
1194 1197 base = {}
1195 1198 self.findincoming(remote, base, heads, force=force)
1196 1199
1197 1200 self.ui.debug(_("common changesets up to ")
1198 1201 + " ".join(map(short, base.keys())) + "\n")
1199 1202
1200 1203 remain = dict.fromkeys(self.changelog.nodemap)
1201 1204
1202 1205 # prune everything remote has from the tree
1203 1206 del remain[nullid]
1204 1207 remove = base.keys()
1205 1208 while remove:
1206 1209 n = remove.pop(0)
1207 1210 if n in remain:
1208 1211 del remain[n]
1209 1212 for p in self.changelog.parents(n):
1210 1213 remove.append(p)
1211 1214
1212 1215 # find every node whose parents have been pruned
1213 1216 subset = []
1214 1217 # find every remote head that will get new children
1215 1218 updated_heads = {}
1216 1219 for n in remain:
1217 1220 p1, p2 = self.changelog.parents(n)
1218 1221 if p1 not in remain and p2 not in remain:
1219 1222 subset.append(n)
1220 1223 if heads:
1221 1224 if p1 in heads:
1222 1225 updated_heads[p1] = True
1223 1226 if p2 in heads:
1224 1227 updated_heads[p2] = True
1225 1228
1226 1229 # this is the set of all roots we have to push
1227 1230 if heads:
1228 1231 return subset, updated_heads.keys()
1229 1232 else:
1230 1233 return subset
1231 1234
1232 1235 def pull(self, remote, heads=None, force=False, lock=None):
1233 1236 mylock = False
1234 1237 if not lock:
1235 1238 lock = self.lock()
1236 1239 mylock = True
1237 1240
1238 1241 try:
1239 1242 fetch = self.findincoming(remote, force=force)
1240 1243 if fetch == [nullid]:
1241 1244 self.ui.status(_("requesting all changes\n"))
1242 1245
1243 1246 if not fetch:
1244 1247 self.ui.status(_("no changes found\n"))
1245 1248 return 0
1246 1249
1247 1250 if heads is None:
1248 1251 cg = remote.changegroup(fetch, 'pull')
1249 1252 else:
1250 1253 if 'changegroupsubset' not in remote.capabilities:
1251 1254 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1252 1255 cg = remote.changegroupsubset(fetch, heads, 'pull')
1253 1256 return self.addchangegroup(cg, 'pull', remote.url())
1254 1257 finally:
1255 1258 if mylock:
1256 1259 lock.release()
1257 1260
1258 1261 def push(self, remote, force=False, revs=None):
1259 1262 # there are two ways to push to remote repo:
1260 1263 #
1261 1264 # addchangegroup assumes local user can lock remote
1262 1265 # repo (local filesystem, old ssh servers).
1263 1266 #
1264 1267 # unbundle assumes local user cannot lock remote repo (new ssh
1265 1268 # servers, http servers).
1266 1269
1267 1270 if remote.capable('unbundle'):
1268 1271 return self.push_unbundle(remote, force, revs)
1269 1272 return self.push_addchangegroup(remote, force, revs)
1270 1273
1271 1274 def prepush(self, remote, force, revs):
1272 1275 base = {}
1273 1276 remote_heads = remote.heads()
1274 1277 inc = self.findincoming(remote, base, remote_heads, force=force)
1275 1278 if not force and inc:
1276 1279 self.ui.warn(_("abort: unsynced remote changes!\n"))
1277 1280 self.ui.status(_("(did you forget to sync?"
1278 1281 " use push -f to force)\n"))
1279 1282 return None, 1
1280 1283
1281 1284 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1282 1285 if revs is not None:
1283 1286 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1284 1287 else:
1285 1288 bases, heads = update, self.changelog.heads()
1286 1289
1287 1290 if not bases:
1288 1291 self.ui.status(_("no changes found\n"))
1289 1292 return None, 1
1290 1293 elif not force:
1291 1294 # FIXME we don't properly detect creation of new heads
1292 1295 # in the push -r case, assume the user knows what he's doing
1293 1296 if not revs and len(remote_heads) < len(heads) \
1294 1297 and remote_heads != [nullid]:
1295 1298 self.ui.warn(_("abort: push creates new remote branches!\n"))
1296 1299 self.ui.status(_("(did you forget to merge?"
1297 1300 " use push -f to force)\n"))
1298 1301 return None, 1
1299 1302
1300 1303 if revs is None:
1301 1304 cg = self.changegroup(update, 'push')
1302 1305 else:
1303 1306 cg = self.changegroupsubset(update, revs, 'push')
1304 1307 return cg, remote_heads
1305 1308
1306 1309 def push_addchangegroup(self, remote, force, revs):
1307 1310 lock = remote.lock()
1308 1311
1309 1312 ret = self.prepush(remote, force, revs)
1310 1313 if ret[0] is not None:
1311 1314 cg, remote_heads = ret
1312 1315 return remote.addchangegroup(cg, 'push', self.url())
1313 1316 return ret[1]
1314 1317
1315 1318 def push_unbundle(self, remote, force, revs):
1316 1319 # local repo finds heads on server, finds out what revs it
1317 1320 # must push. once revs transferred, if server finds it has
1318 1321 # different heads (someone else won commit/push race), server
1319 1322 # aborts.
1320 1323
1321 1324 ret = self.prepush(remote, force, revs)
1322 1325 if ret[0] is not None:
1323 1326 cg, remote_heads = ret
1324 1327 if force: remote_heads = ['force']
1325 1328 return remote.unbundle(cg, remote_heads, 'push')
1326 1329 return ret[1]
1327 1330
1328 1331 def changegroupsubset(self, bases, heads, source):
1329 1332 """This function generates a changegroup consisting of all the nodes
1330 1333 that are descendents of any of the bases, and ancestors of any of
1331 1334 the heads.
1332 1335
1333 1336 It is fairly complex as determining which filenodes and which
1334 1337 manifest nodes need to be included for the changeset to be complete
1335 1338 is non-trivial.
1336 1339
1337 1340 Another wrinkle is doing the reverse, figuring out which changeset in
1338 1341 the changegroup a particular filenode or manifestnode belongs to."""
1339 1342
1340 1343 self.hook('preoutgoing', throw=True, source=source)
1341 1344
1342 1345 # Set up some initial variables
1343 1346 # Make it easy to refer to self.changelog
1344 1347 cl = self.changelog
1345 1348 # msng is short for missing - compute the list of changesets in this
1346 1349 # changegroup.
1347 1350 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1348 1351 # Some bases may turn out to be superfluous, and some heads may be
1349 1352 # too. nodesbetween will return the minimal set of bases and heads
1350 1353 # necessary to re-create the changegroup.
1351 1354
1352 1355 # Known heads are the list of heads that it is assumed the recipient
1353 1356 # of this changegroup will know about.
1354 1357 knownheads = {}
1355 1358 # We assume that all parents of bases are known heads.
1356 1359 for n in bases:
1357 1360 for p in cl.parents(n):
1358 1361 if p != nullid:
1359 1362 knownheads[p] = 1
1360 1363 knownheads = knownheads.keys()
1361 1364 if knownheads:
1362 1365 # Now that we know what heads are known, we can compute which
1363 1366 # changesets are known. The recipient must know about all
1364 1367 # changesets required to reach the known heads from the null
1365 1368 # changeset.
1366 1369 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1367 1370 junk = None
1368 1371 # Transform the list into an ersatz set.
1369 1372 has_cl_set = dict.fromkeys(has_cl_set)
1370 1373 else:
1371 1374 # If there were no known heads, the recipient cannot be assumed to
1372 1375 # know about any changesets.
1373 1376 has_cl_set = {}
1374 1377
1375 1378 # Make it easy to refer to self.manifest
1376 1379 mnfst = self.manifest
1377 1380 # We don't know which manifests are missing yet
1378 1381 msng_mnfst_set = {}
1379 1382 # Nor do we know which filenodes are missing.
1380 1383 msng_filenode_set = {}
1381 1384
1382 1385 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1383 1386 junk = None
1384 1387
1385 1388 # A changeset always belongs to itself, so the changenode lookup
1386 1389 # function for a changenode is identity.
1387 1390 def identity(x):
1388 1391 return x
1389 1392
1390 1393 # A function generating function. Sets up an environment for the
1391 1394 # inner function.
1392 1395 def cmp_by_rev_func(revlog):
1393 1396 # Compare two nodes by their revision number in the environment's
1394 1397 # revision history. Since the revision number both represents the
1395 1398 # most efficient order to read the nodes in, and represents a
1396 1399 # topological sorting of the nodes, this function is often useful.
1397 1400 def cmp_by_rev(a, b):
1398 1401 return cmp(revlog.rev(a), revlog.rev(b))
1399 1402 return cmp_by_rev
1400 1403
1401 1404 # If we determine that a particular file or manifest node must be a
1402 1405 # node that the recipient of the changegroup will already have, we can
1403 1406 # also assume the recipient will have all the parents. This function
1404 1407 # prunes them from the set of missing nodes.
1405 1408 def prune_parents(revlog, hasset, msngset):
1406 1409 haslst = hasset.keys()
1407 1410 haslst.sort(cmp_by_rev_func(revlog))
1408 1411 for node in haslst:
1409 1412 parentlst = [p for p in revlog.parents(node) if p != nullid]
1410 1413 while parentlst:
1411 1414 n = parentlst.pop()
1412 1415 if n not in hasset:
1413 1416 hasset[n] = 1
1414 1417 p = [p for p in revlog.parents(n) if p != nullid]
1415 1418 parentlst.extend(p)
1416 1419 for n in hasset:
1417 1420 msngset.pop(n, None)
1418 1421
1419 1422 # This is a function generating function used to set up an environment
1420 1423 # for the inner function to execute in.
1421 1424 def manifest_and_file_collector(changedfileset):
1422 1425 # This is an information gathering function that gathers
1423 1426 # information from each changeset node that goes out as part of
1424 1427 # the changegroup. The information gathered is a list of which
1425 1428 # manifest nodes are potentially required (the recipient may
1426 1429 # already have them) and total list of all files which were
1427 1430 # changed in any changeset in the changegroup.
1428 1431 #
1429 1432 # We also remember the first changenode we saw any manifest
1430 1433 # referenced by so we can later determine which changenode 'owns'
1431 1434 # the manifest.
1432 1435 def collect_manifests_and_files(clnode):
1433 1436 c = cl.read(clnode)
1434 1437 for f in c[3]:
1435 1438 # This is to make sure we only have one instance of each
1436 1439 # filename string for each filename.
1437 1440 changedfileset.setdefault(f, f)
1438 1441 msng_mnfst_set.setdefault(c[0], clnode)
1439 1442 return collect_manifests_and_files
1440 1443
1441 1444 # Figure out which manifest nodes (of the ones we think might be part
1442 1445 # of the changegroup) the recipient must know about and remove them
1443 1446 # from the changegroup.
1444 1447 def prune_manifests():
1445 1448 has_mnfst_set = {}
1446 1449 for n in msng_mnfst_set:
1447 1450 # If a 'missing' manifest thinks it belongs to a changenode
1448 1451 # the recipient is assumed to have, obviously the recipient
1449 1452 # must have that manifest.
1450 1453 linknode = cl.node(mnfst.linkrev(n))
1451 1454 if linknode in has_cl_set:
1452 1455 has_mnfst_set[n] = 1
1453 1456 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1454 1457
1455 1458 # Use the information collected in collect_manifests_and_files to say
1456 1459 # which changenode any manifestnode belongs to.
1457 1460 def lookup_manifest_link(mnfstnode):
1458 1461 return msng_mnfst_set[mnfstnode]
1459 1462
1460 1463 # A function generating function that sets up the initial environment
1461 1464 # the inner function.
1462 1465 def filenode_collector(changedfiles):
1463 1466 next_rev = [0]
1464 1467 # This gathers information from each manifestnode included in the
1465 1468 # changegroup about which filenodes the manifest node references
1466 1469 # so we can include those in the changegroup too.
1467 1470 #
1468 1471 # It also remembers which changenode each filenode belongs to. It
1469 1472 # does this by assuming the a filenode belongs to the changenode
1470 1473 # the first manifest that references it belongs to.
1471 1474 def collect_msng_filenodes(mnfstnode):
1472 1475 r = mnfst.rev(mnfstnode)
1473 1476 if r == next_rev[0]:
1474 1477 # If the last rev we looked at was the one just previous,
1475 1478 # we only need to see a diff.
1476 1479 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1477 1480 # For each line in the delta
1478 1481 for dline in delta.splitlines():
1479 1482 # get the filename and filenode for that line
1480 1483 f, fnode = dline.split('\0')
1481 1484 fnode = bin(fnode[:40])
1482 1485 f = changedfiles.get(f, None)
1483 1486 # And if the file is in the list of files we care
1484 1487 # about.
1485 1488 if f is not None:
1486 1489 # Get the changenode this manifest belongs to
1487 1490 clnode = msng_mnfst_set[mnfstnode]
1488 1491 # Create the set of filenodes for the file if
1489 1492 # there isn't one already.
1490 1493 ndset = msng_filenode_set.setdefault(f, {})
1491 1494 # And set the filenode's changelog node to the
1492 1495 # manifest's if it hasn't been set already.
1493 1496 ndset.setdefault(fnode, clnode)
1494 1497 else:
1495 1498 # Otherwise we need a full manifest.
1496 1499 m = mnfst.read(mnfstnode)
1497 1500 # For every file in we care about.
1498 1501 for f in changedfiles:
1499 1502 fnode = m.get(f, None)
1500 1503 # If it's in the manifest
1501 1504 if fnode is not None:
1502 1505 # See comments above.
1503 1506 clnode = msng_mnfst_set[mnfstnode]
1504 1507 ndset = msng_filenode_set.setdefault(f, {})
1505 1508 ndset.setdefault(fnode, clnode)
1506 1509 # Remember the revision we hope to see next.
1507 1510 next_rev[0] = r + 1
1508 1511 return collect_msng_filenodes
1509 1512
1510 1513 # We have a list of filenodes we think we need for a file, lets remove
1511 1514 # all those we now the recipient must have.
1512 1515 def prune_filenodes(f, filerevlog):
1513 1516 msngset = msng_filenode_set[f]
1514 1517 hasset = {}
1515 1518 # If a 'missing' filenode thinks it belongs to a changenode we
1516 1519 # assume the recipient must have, then the recipient must have
1517 1520 # that filenode.
1518 1521 for n in msngset:
1519 1522 clnode = cl.node(filerevlog.linkrev(n))
1520 1523 if clnode in has_cl_set:
1521 1524 hasset[n] = 1
1522 1525 prune_parents(filerevlog, hasset, msngset)
1523 1526
1524 1527 # A function generator function that sets up the a context for the
1525 1528 # inner function.
1526 1529 def lookup_filenode_link_func(fname):
1527 1530 msngset = msng_filenode_set[fname]
1528 1531 # Lookup the changenode the filenode belongs to.
1529 1532 def lookup_filenode_link(fnode):
1530 1533 return msngset[fnode]
1531 1534 return lookup_filenode_link
1532 1535
1533 1536 # Now that we have all theses utility functions to help out and
1534 1537 # logically divide up the task, generate the group.
1535 1538 def gengroup():
1536 1539 # The set of changed files starts empty.
1537 1540 changedfiles = {}
1538 1541 # Create a changenode group generator that will call our functions
1539 1542 # back to lookup the owning changenode and collect information.
1540 1543 group = cl.group(msng_cl_lst, identity,
1541 1544 manifest_and_file_collector(changedfiles))
1542 1545 for chnk in group:
1543 1546 yield chnk
1544 1547
1545 1548 # The list of manifests has been collected by the generator
1546 1549 # calling our functions back.
1547 1550 prune_manifests()
1548 1551 msng_mnfst_lst = msng_mnfst_set.keys()
1549 1552 # Sort the manifestnodes by revision number.
1550 1553 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1551 1554 # Create a generator for the manifestnodes that calls our lookup
1552 1555 # and data collection functions back.
1553 1556 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1554 1557 filenode_collector(changedfiles))
1555 1558 for chnk in group:
1556 1559 yield chnk
1557 1560
1558 1561 # These are no longer needed, dereference and toss the memory for
1559 1562 # them.
1560 1563 msng_mnfst_lst = None
1561 1564 msng_mnfst_set.clear()
1562 1565
1563 1566 changedfiles = changedfiles.keys()
1564 1567 changedfiles.sort()
1565 1568 # Go through all our files in order sorted by name.
1566 1569 for fname in changedfiles:
1567 1570 filerevlog = self.file(fname)
1568 1571 # Toss out the filenodes that the recipient isn't really
1569 1572 # missing.
1570 1573 if msng_filenode_set.has_key(fname):
1571 1574 prune_filenodes(fname, filerevlog)
1572 1575 msng_filenode_lst = msng_filenode_set[fname].keys()
1573 1576 else:
1574 1577 msng_filenode_lst = []
1575 1578 # If any filenodes are left, generate the group for them,
1576 1579 # otherwise don't bother.
1577 1580 if len(msng_filenode_lst) > 0:
1578 1581 yield changegroup.genchunk(fname)
1579 1582 # Sort the filenodes by their revision #
1580 1583 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1581 1584 # Create a group generator and only pass in a changenode
1582 1585 # lookup function as we need to collect no information
1583 1586 # from filenodes.
1584 1587 group = filerevlog.group(msng_filenode_lst,
1585 1588 lookup_filenode_link_func(fname))
1586 1589 for chnk in group:
1587 1590 yield chnk
1588 1591 if msng_filenode_set.has_key(fname):
1589 1592 # Don't need this anymore, toss it to free memory.
1590 1593 del msng_filenode_set[fname]
1591 1594 # Signal that no more groups are left.
1592 1595 yield changegroup.closechunk()
1593 1596
1594 1597 if msng_cl_lst:
1595 1598 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1596 1599
1597 1600 return util.chunkbuffer(gengroup())
1598 1601
1599 1602 def changegroup(self, basenodes, source):
1600 1603 """Generate a changegroup of all nodes that we have that a recipient
1601 1604 doesn't.
1602 1605
1603 1606 This is much easier than the previous function as we can assume that
1604 1607 the recipient has any changenode we aren't sending them."""
1605 1608
1606 1609 self.hook('preoutgoing', throw=True, source=source)
1607 1610
1608 1611 cl = self.changelog
1609 1612 nodes = cl.nodesbetween(basenodes, None)[0]
1610 1613 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1611 1614
1612 1615 def identity(x):
1613 1616 return x
1614 1617
1615 1618 def gennodelst(revlog):
1616 1619 for r in xrange(0, revlog.count()):
1617 1620 n = revlog.node(r)
1618 1621 if revlog.linkrev(n) in revset:
1619 1622 yield n
1620 1623
1621 1624 def changed_file_collector(changedfileset):
1622 1625 def collect_changed_files(clnode):
1623 1626 c = cl.read(clnode)
1624 1627 for fname in c[3]:
1625 1628 changedfileset[fname] = 1
1626 1629 return collect_changed_files
1627 1630
1628 1631 def lookuprevlink_func(revlog):
1629 1632 def lookuprevlink(n):
1630 1633 return cl.node(revlog.linkrev(n))
1631 1634 return lookuprevlink
1632 1635
1633 1636 def gengroup():
1634 1637 # construct a list of all changed files
1635 1638 changedfiles = {}
1636 1639
1637 1640 for chnk in cl.group(nodes, identity,
1638 1641 changed_file_collector(changedfiles)):
1639 1642 yield chnk
1640 1643 changedfiles = changedfiles.keys()
1641 1644 changedfiles.sort()
1642 1645
1643 1646 mnfst = self.manifest
1644 1647 nodeiter = gennodelst(mnfst)
1645 1648 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1646 1649 yield chnk
1647 1650
1648 1651 for fname in changedfiles:
1649 1652 filerevlog = self.file(fname)
1650 1653 nodeiter = gennodelst(filerevlog)
1651 1654 nodeiter = list(nodeiter)
1652 1655 if nodeiter:
1653 1656 yield changegroup.genchunk(fname)
1654 1657 lookup = lookuprevlink_func(filerevlog)
1655 1658 for chnk in filerevlog.group(nodeiter, lookup):
1656 1659 yield chnk
1657 1660
1658 1661 yield changegroup.closechunk()
1659 1662
1660 1663 if nodes:
1661 1664 self.hook('outgoing', node=hex(nodes[0]), source=source)
1662 1665
1663 1666 return util.chunkbuffer(gengroup())
1664 1667
1665 1668 def addchangegroup(self, source, srctype, url):
1666 1669 """add changegroup to repo.
1667 1670 returns number of heads modified or added + 1."""
1668 1671
1669 1672 def csmap(x):
1670 1673 self.ui.debug(_("add changeset %s\n") % short(x))
1671 1674 return cl.count()
1672 1675
1673 1676 def revmap(x):
1674 1677 return cl.rev(x)
1675 1678
1676 1679 if not source:
1677 1680 return 0
1678 1681
1679 1682 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1680 1683
1681 1684 changesets = files = revisions = 0
1682 1685
1683 1686 tr = self.transaction()
1684 1687
1685 1688 # write changelog data to temp files so concurrent readers will not see
1686 1689 # inconsistent view
1687 1690 cl = None
1688 1691 try:
1689 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1692 cl = appendfile.appendchangelog(self.sopener,
1693 self.changelog.version)
1690 1694
1691 1695 oldheads = len(cl.heads())
1692 1696
1693 1697 # pull off the changeset group
1694 1698 self.ui.status(_("adding changesets\n"))
1695 1699 cor = cl.count() - 1
1696 1700 chunkiter = changegroup.chunkiter(source)
1697 1701 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1698 1702 raise util.Abort(_("received changelog group is empty"))
1699 1703 cnr = cl.count() - 1
1700 1704 changesets = cnr - cor
1701 1705
1702 1706 # pull off the manifest group
1703 1707 self.ui.status(_("adding manifests\n"))
1704 1708 chunkiter = changegroup.chunkiter(source)
1705 1709 # no need to check for empty manifest group here:
1706 1710 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1707 1711 # no new manifest will be created and the manifest group will
1708 1712 # be empty during the pull
1709 1713 self.manifest.addgroup(chunkiter, revmap, tr)
1710 1714
1711 1715 # process the files
1712 1716 self.ui.status(_("adding file changes\n"))
1713 1717 while 1:
1714 1718 f = changegroup.getchunk(source)
1715 1719 if not f:
1716 1720 break
1717 1721 self.ui.debug(_("adding %s revisions\n") % f)
1718 1722 fl = self.file(f)
1719 1723 o = fl.count()
1720 1724 chunkiter = changegroup.chunkiter(source)
1721 1725 if fl.addgroup(chunkiter, revmap, tr) is None:
1722 1726 raise util.Abort(_("received file revlog group is empty"))
1723 1727 revisions += fl.count() - o
1724 1728 files += 1
1725 1729
1726 1730 cl.writedata()
1727 1731 finally:
1728 1732 if cl:
1729 1733 cl.cleanup()
1730 1734
1731 1735 # make changelog see real files again
1732 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1736 self.changelog = changelog.changelog(self.sopener,
1737 self.changelog.version)
1733 1738 self.changelog.checkinlinesize(tr)
1734 1739
1735 1740 newheads = len(self.changelog.heads())
1736 1741 heads = ""
1737 1742 if oldheads and newheads != oldheads:
1738 1743 heads = _(" (%+d heads)") % (newheads - oldheads)
1739 1744
1740 1745 self.ui.status(_("added %d changesets"
1741 1746 " with %d changes to %d files%s\n")
1742 1747 % (changesets, revisions, files, heads))
1743 1748
1744 1749 if changesets > 0:
1745 1750 self.hook('pretxnchangegroup', throw=True,
1746 1751 node=hex(self.changelog.node(cor+1)), source=srctype,
1747 1752 url=url)
1748 1753
1749 1754 tr.close()
1750 1755
1751 1756 if changesets > 0:
1752 1757 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1753 1758 source=srctype, url=url)
1754 1759
1755 1760 for i in range(cor + 1, cnr + 1):
1756 1761 self.hook("incoming", node=hex(self.changelog.node(i)),
1757 1762 source=srctype, url=url)
1758 1763
1759 1764 return newheads - oldheads + 1
1760 1765
1761 1766
1762 1767 def stream_in(self, remote):
1763 1768 fp = remote.stream_out()
1764 1769 resp = int(fp.readline())
1765 1770 if resp != 0:
1766 1771 raise util.Abort(_('operation forbidden by server'))
1767 1772 self.ui.status(_('streaming all changes\n'))
1768 1773 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1769 1774 self.ui.status(_('%d files to transfer, %s of data\n') %
1770 1775 (total_files, util.bytecount(total_bytes)))
1771 1776 start = time.time()
1772 1777 for i in xrange(total_files):
1773 1778 name, size = fp.readline().split('\0', 1)
1774 1779 size = int(size)
1775 1780 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1776 ofp = self.opener(name, 'w')
1781 ofp = self.sopener(name, 'w')
1777 1782 for chunk in util.filechunkiter(fp, limit=size):
1778 1783 ofp.write(chunk)
1779 1784 ofp.close()
1780 1785 elapsed = time.time() - start
1781 1786 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1782 1787 (util.bytecount(total_bytes), elapsed,
1783 1788 util.bytecount(total_bytes / elapsed)))
1784 1789 self.reload()
1785 1790 return len(self.heads()) + 1
1786 1791
1787 1792 def clone(self, remote, heads=[], stream=False):
1788 1793 '''clone remote repository.
1789 1794
1790 1795 keyword arguments:
1791 1796 heads: list of revs to clone (forces use of pull)
1792 1797 stream: use streaming clone if possible'''
1793 1798
1794 1799 # now, all clients that can request uncompressed clones can
1795 1800 # read repo formats supported by all servers that can serve
1796 1801 # them.
1797 1802
1798 1803 # if revlog format changes, client will have to check version
1799 1804 # and format flags on "stream" capability, and use
1800 1805 # uncompressed only if compatible.
1801 1806
1802 1807 if stream and not heads and remote.capable('stream'):
1803 1808 return self.stream_in(remote)
1804 1809 return self.pull(remote, heads)
1805 1810
1806 1811 # used to avoid circular references so destructors work
1807 1812 def aftertrans(base):
1808 1813 p = base
1809 1814 def a():
1810 1815 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1811 1816 util.rename(os.path.join(p, "journal.dirstate"),
1812 1817 os.path.join(p, "undo.dirstate"))
1813 1818 return a
1814 1819
1815 1820 def instance(ui, path, create):
1816 1821 return localrepository(ui, util.drop_scheme('file', path), create)
1817 1822
1818 1823 def islocal(path):
1819 1824 return True
@@ -1,64 +1,65 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from demandload import *
11 11 from i18n import gettext as _
12 12 demandload(globals(), "changelog filelog httprangereader")
13 13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14 14
15 15 class rangereader(httprangereader.httprangereader):
16 16 def read(self, size=None):
17 17 try:
18 18 return httprangereader.httprangereader.read(self, size)
19 19 except urllib2.HTTPError, inst:
20 20 raise IOError(None, inst)
21 21 except urllib2.URLError, inst:
22 22 raise IOError(None, inst.reason[1])
23 23
24 24 def opener(base):
25 25 """return a function that opens files over http"""
26 26 p = base
27 27 def o(path, mode="r"):
28 28 f = os.path.join(p, urllib.quote(path))
29 29 return rangereader(f)
30 30 return o
31 31
32 32 class statichttprepository(localrepo.localrepository):
33 33 def __init__(self, ui, path):
34 34 self._url = path
35 35 self.path = (path + "/.hg")
36 36 self.ui = ui
37 37 self.revlogversion = 0
38 38 self.opener = opener(self.path)
39 self.sopener = opener(self.path)
39 40 self.manifest = manifest.manifest(self.opener)
40 41 self.changelog = changelog.changelog(self.opener)
41 42 self.tagscache = None
42 43 self.nodetagscache = None
43 44 self.encodepats = None
44 45 self.decodepats = None
45 46
46 47 def url(self):
47 48 return 'static-' + self._url
48 49
49 50 def dev(self):
50 51 return -1
51 52
52 53 def local(self):
53 54 return False
54 55
55 56 def instance(ui, path, create):
56 57 if create:
57 58 raise util.Abort(_('cannot create new static-http repository'))
58 59 if path.startswith('old-http:'):
59 60 ui.warn(_("old-http:// syntax is deprecated, "
60 61 "please use static-http:// instead\n"))
61 62 path = path[4:]
62 63 else:
63 64 path = path[7:]
64 65 return statichttprepository(ui, path)
General Comments 0
You need to be logged in to leave comments. Login now