##// END OF EJS Templates
Fixed --repository option when handling relative path...
tksoh@users.sf.net -
r933:9c43d68a default
parent child Browse files
Show More
@@ -1,2225 +1,2225 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect errno select stat")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 return (path
26 26 .replace(".hg/", ".hg.hg/")
27 27 .replace(".i/", ".i.hg/")
28 28 .replace(".d/", ".d.hg/"))
29 29
30 30 def decodedir(self, path):
31 31 return (path
32 32 .replace(".d.hg/", ".d/")
33 33 .replace(".i.hg/", ".i/")
34 34 .replace(".hg.hg/", ".hg/"))
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 275 files = l[3:]
276 276 return (manifest, user, date, files, desc)
277 277
278 278 def read(self, node):
279 279 return self.extract(self.revision(node))
280 280
281 281 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
282 282 user=None, date=None):
283 283 date = date or "%d %d" % (time.time(), time.timezone)
284 284 list.sort()
285 285 l = [hex(manifest), user, date] + list + ["", desc]
286 286 text = "\n".join(l)
287 287 return self.addrevision(text, transaction, self.count(), p1, p2)
288 288
289 289 class dirstate:
290 290 def __init__(self, opener, ui, root):
291 291 self.opener = opener
292 292 self.root = root
293 293 self.dirty = 0
294 294 self.ui = ui
295 295 self.map = None
296 296 self.pl = None
297 297 self.copies = {}
298 298 self.ignorefunc = None
299 299
300 300 def wjoin(self, f):
301 301 return os.path.join(self.root, f)
302 302
303 303 def getcwd(self):
304 304 cwd = os.getcwd()
305 305 if cwd == self.root: return ''
306 306 return cwd[len(self.root) + 1:]
307 307
308 308 def ignore(self, f):
309 309 if not self.ignorefunc:
310 310 bigpat = []
311 311 try:
312 312 l = file(self.wjoin(".hgignore"))
313 313 for pat in l:
314 314 p = pat.rstrip()
315 315 if p:
316 316 try:
317 317 re.compile(p)
318 318 except:
319 319 self.ui.warn("ignoring invalid ignore"
320 320 + " regular expression '%s'\n" % p)
321 321 else:
322 322 bigpat.append(p)
323 323 except IOError: pass
324 324
325 325 if bigpat:
326 326 s = "(?:%s)" % (")|(?:".join(bigpat))
327 327 r = re.compile(s)
328 328 self.ignorefunc = r.search
329 329 else:
330 330 self.ignorefunc = util.never
331 331
332 332 return self.ignorefunc(f)
333 333
334 334 def __del__(self):
335 335 if self.dirty:
336 336 self.write()
337 337
338 338 def __getitem__(self, key):
339 339 try:
340 340 return self.map[key]
341 341 except TypeError:
342 342 self.read()
343 343 return self[key]
344 344
345 345 def __contains__(self, key):
346 346 if not self.map: self.read()
347 347 return key in self.map
348 348
349 349 def parents(self):
350 350 if not self.pl:
351 351 self.read()
352 352 return self.pl
353 353
354 354 def markdirty(self):
355 355 if not self.dirty:
356 356 self.dirty = 1
357 357
358 358 def setparents(self, p1, p2 = nullid):
359 359 self.markdirty()
360 360 self.pl = p1, p2
361 361
362 362 def state(self, key):
363 363 try:
364 364 return self[key][0]
365 365 except KeyError:
366 366 return "?"
367 367
368 368 def read(self):
369 369 if self.map is not None: return self.map
370 370
371 371 self.map = {}
372 372 self.pl = [nullid, nullid]
373 373 try:
374 374 st = self.opener("dirstate").read()
375 375 if not st: return
376 376 except: return
377 377
378 378 self.pl = [st[:20], st[20: 40]]
379 379
380 380 pos = 40
381 381 while pos < len(st):
382 382 e = struct.unpack(">cllll", st[pos:pos+17])
383 383 l = e[4]
384 384 pos += 17
385 385 f = st[pos:pos + l]
386 386 if '\0' in f:
387 387 f, c = f.split('\0')
388 388 self.copies[f] = c
389 389 self.map[f] = e[:4]
390 390 pos += l
391 391
392 392 def copy(self, source, dest):
393 393 self.read()
394 394 self.markdirty()
395 395 self.copies[dest] = source
396 396
397 397 def copied(self, file):
398 398 return self.copies.get(file, None)
399 399
400 400 def update(self, files, state, **kw):
401 401 ''' current states:
402 402 n normal
403 403 m needs merging
404 404 r marked for removal
405 405 a marked for addition'''
406 406
407 407 if not files: return
408 408 self.read()
409 409 self.markdirty()
410 410 for f in files:
411 411 if state == "r":
412 412 self.map[f] = ('r', 0, 0, 0)
413 413 else:
414 414 s = os.stat(os.path.join(self.root, f))
415 415 st_size = kw.get('st_size', s.st_size)
416 416 st_mtime = kw.get('st_mtime', s.st_mtime)
417 417 self.map[f] = (state, s.st_mode, st_size, st_mtime)
418 418
419 419 def forget(self, files):
420 420 if not files: return
421 421 self.read()
422 422 self.markdirty()
423 423 for f in files:
424 424 try:
425 425 del self.map[f]
426 426 except KeyError:
427 427 self.ui.warn("not in dirstate: %s!\n" % f)
428 428 pass
429 429
430 430 def clear(self):
431 431 self.map = {}
432 432 self.markdirty()
433 433
434 434 def write(self):
435 435 st = self.opener("dirstate", "w")
436 436 st.write("".join(self.pl))
437 437 for f, e in self.map.items():
438 438 c = self.copied(f)
439 439 if c:
440 440 f = f + "\0" + c
441 441 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
442 442 st.write(e + f)
443 443 self.dirty = 0
444 444
445 445 def filterfiles(self, files):
446 446 ret = {}
447 447 unknown = []
448 448
449 449 for x in files:
450 450 if x is '.':
451 451 return self.map.copy()
452 452 if x not in self.map:
453 453 unknown.append(x)
454 454 else:
455 455 ret[x] = self.map[x]
456 456
457 457 if not unknown:
458 458 return ret
459 459
460 460 b = self.map.keys()
461 461 b.sort()
462 462 blen = len(b)
463 463
464 464 for x in unknown:
465 465 bs = bisect.bisect(b, x)
466 466 if bs != 0 and b[bs-1] == x:
467 467 ret[x] = self.map[x]
468 468 continue
469 469 while bs < blen:
470 470 s = b[bs]
471 471 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
472 472 ret[s] = self.map[s]
473 473 else:
474 474 break
475 475 bs += 1
476 476 return ret
477 477
478 478 def walk(self, files = None, match = util.always, dc=None):
479 479 self.read()
480 480
481 481 # walk all files by default
482 482 if not files:
483 483 files = [self.root]
484 484 if not dc:
485 485 dc = self.map.copy()
486 486 elif not dc:
487 487 dc = self.filterfiles(files)
488 488
489 489 known = {'.hg': 1}
490 490 def seen(fn):
491 491 if fn in known: return True
492 492 known[fn] = 1
493 493 def traverse():
494 494 for ff in util.unique(files):
495 495 f = os.path.join(self.root, ff)
496 496 try:
497 497 st = os.stat(f)
498 498 except OSError, inst:
499 499 if ff not in dc: self.ui.warn('%s: %s\n' % (
500 500 util.pathto(self.getcwd(), ff),
501 501 inst.strerror))
502 502 continue
503 503 if stat.S_ISDIR(st.st_mode):
504 504 for dir, subdirs, fl in os.walk(f):
505 505 d = dir[len(self.root) + 1:]
506 506 nd = util.normpath(d)
507 507 if nd == '.': nd = ''
508 508 if seen(nd):
509 509 subdirs[:] = []
510 510 continue
511 511 for sd in subdirs:
512 512 ds = os.path.join(nd, sd +'/')
513 513 if self.ignore(ds) or not match(ds):
514 514 subdirs.remove(sd)
515 515 subdirs.sort()
516 516 fl.sort()
517 517 for fn in fl:
518 518 fn = util.pconvert(os.path.join(d, fn))
519 519 yield 'f', fn
520 520 elif stat.S_ISREG(st.st_mode):
521 521 yield 'f', ff
522 522 else:
523 523 kind = 'unknown'
524 524 if stat.S_ISCHR(st.st_mode): kind = 'character device'
525 525 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
526 526 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
527 527 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
528 528 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
529 529 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
530 530 util.pathto(self.getcwd(), ff),
531 531 kind))
532 532
533 533 ks = dc.keys()
534 534 ks.sort()
535 535 for k in ks:
536 536 yield 'm', k
537 537
538 538 # yield only files that match: all in dirstate, others only if
539 539 # not in .hgignore
540 540
541 541 for src, fn in util.unique(traverse()):
542 542 fn = util.normpath(fn)
543 543 if seen(fn): continue
544 544 if fn not in dc and self.ignore(fn):
545 545 continue
546 546 if match(fn):
547 547 yield src, fn
548 548
549 549 def changes(self, files=None, match=util.always):
550 550 self.read()
551 551 if not files:
552 552 dc = self.map.copy()
553 553 else:
554 554 dc = self.filterfiles(files)
555 555 lookup, modified, added, unknown = [], [], [], []
556 556 removed, deleted = [], []
557 557
558 558 for src, fn in self.walk(files, match, dc=dc):
559 559 try:
560 560 s = os.stat(os.path.join(self.root, fn))
561 561 except OSError:
562 562 continue
563 563 if not stat.S_ISREG(s.st_mode):
564 564 continue
565 565 c = dc.get(fn)
566 566 if c:
567 567 del dc[fn]
568 568 if c[0] == 'm':
569 569 modified.append(fn)
570 570 elif c[0] == 'a':
571 571 added.append(fn)
572 572 elif c[0] == 'r':
573 573 unknown.append(fn)
574 574 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
575 575 modified.append(fn)
576 576 elif c[3] != s.st_mtime:
577 577 lookup.append(fn)
578 578 else:
579 579 unknown.append(fn)
580 580
581 581 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
582 582 if c[0] == 'r':
583 583 removed.append(fn)
584 584 else:
585 585 deleted.append(fn)
586 586 return (lookup, modified, added, removed + deleted, unknown)
587 587
588 588 # used to avoid circular references so destructors work
589 589 def opener(base):
590 590 p = base
591 591 def o(path, mode="r"):
592 592 if p.startswith("http://"):
593 593 f = os.path.join(p, urllib.quote(path))
594 594 return httprangereader.httprangereader(f)
595 595
596 596 f = os.path.join(p, path)
597 597
598 598 mode += "b" # for that other OS
599 599
600 600 if mode[0] != "r":
601 601 try:
602 602 s = os.stat(f)
603 603 except OSError:
604 604 d = os.path.dirname(f)
605 605 if not os.path.isdir(d):
606 606 os.makedirs(d)
607 607 else:
608 608 if s.st_nlink > 1:
609 609 file(f + ".tmp", "wb").write(file(f, "rb").read())
610 610 util.rename(f+".tmp", f)
611 611
612 612 return file(f, mode)
613 613
614 614 return o
615 615
616 616 class RepoError(Exception): pass
617 617
618 618 class localrepository:
619 619 def __init__(self, ui, path=None, create=0):
620 620 self.remote = 0
621 621 if path and path.startswith("http://"):
622 622 self.remote = 1
623 623 self.path = path
624 624 else:
625 625 if not path:
626 626 p = os.getcwd()
627 627 while not os.path.isdir(os.path.join(p, ".hg")):
628 628 oldp = p
629 629 p = os.path.dirname(p)
630 630 if p == oldp: raise RepoError("no repo found")
631 631 path = p
632 632 self.path = os.path.join(path, ".hg")
633 633
634 634 if not create and not os.path.isdir(self.path):
635 635 raise RepoError("repository %s not found" % self.path)
636 636
637 self.root = path
637 self.root = os.path.abspath(path)
638 638 self.ui = ui
639 639
640 640 if create:
641 641 os.mkdir(self.path)
642 642 os.mkdir(self.join("data"))
643 643
644 644 self.opener = opener(self.path)
645 645 self.wopener = opener(self.root)
646 646 self.manifest = manifest(self.opener)
647 647 self.changelog = changelog(self.opener)
648 648 self.tagscache = None
649 649 self.nodetagscache = None
650 650
651 651 if not self.remote:
652 652 self.dirstate = dirstate(self.opener, ui, self.root)
653 653 try:
654 654 self.ui.readconfig(self.opener("hgrc"))
655 655 except IOError: pass
656 656
657 657 def hook(self, name, **args):
658 658 s = self.ui.config("hooks", name)
659 659 if s:
660 660 self.ui.note("running hook %s: %s\n" % (name, s))
661 661 old = {}
662 662 for k, v in args.items():
663 663 k = k.upper()
664 664 old[k] = os.environ.get(k, None)
665 665 os.environ[k] = v
666 666
667 667 r = os.system(s)
668 668
669 669 for k, v in old.items():
670 670 if v != None:
671 671 os.environ[k] = v
672 672 else:
673 673 del os.environ[k]
674 674
675 675 if r:
676 676 self.ui.warn("abort: %s hook failed with status %d!\n" %
677 677 (name, r))
678 678 return False
679 679 return True
680 680
681 681 def tags(self):
682 682 '''return a mapping of tag to node'''
683 683 if not self.tagscache:
684 684 self.tagscache = {}
685 685 def addtag(self, k, n):
686 686 try:
687 687 bin_n = bin(n)
688 688 except TypeError:
689 689 bin_n = ''
690 690 self.tagscache[k.strip()] = bin_n
691 691
692 692 try:
693 693 # read each head of the tags file, ending with the tip
694 694 # and add each tag found to the map, with "newer" ones
695 695 # taking precedence
696 696 fl = self.file(".hgtags")
697 697 h = fl.heads()
698 698 h.reverse()
699 699 for r in h:
700 700 for l in fl.revision(r).splitlines():
701 701 if l:
702 702 n, k = l.split(" ", 1)
703 703 addtag(self, k, n)
704 704 except KeyError:
705 705 pass
706 706
707 707 try:
708 708 f = self.opener("localtags")
709 709 for l in f:
710 710 n, k = l.split(" ", 1)
711 711 addtag(self, k, n)
712 712 except IOError:
713 713 pass
714 714
715 715 self.tagscache['tip'] = self.changelog.tip()
716 716
717 717 return self.tagscache
718 718
719 719 def tagslist(self):
720 720 '''return a list of tags ordered by revision'''
721 721 l = []
722 722 for t, n in self.tags().items():
723 723 try:
724 724 r = self.changelog.rev(n)
725 725 except:
726 726 r = -2 # sort to the beginning of the list if unknown
727 727 l.append((r,t,n))
728 728 l.sort()
729 729 return [(t,n) for r,t,n in l]
730 730
731 731 def nodetags(self, node):
732 732 '''return the tags associated with a node'''
733 733 if not self.nodetagscache:
734 734 self.nodetagscache = {}
735 735 for t,n in self.tags().items():
736 736 self.nodetagscache.setdefault(n,[]).append(t)
737 737 return self.nodetagscache.get(node, [])
738 738
739 739 def lookup(self, key):
740 740 try:
741 741 return self.tags()[key]
742 742 except KeyError:
743 743 try:
744 744 return self.changelog.lookup(key)
745 745 except:
746 746 raise RepoError("unknown revision '%s'" % key)
747 747
748 748 def dev(self):
749 749 if self.remote: return -1
750 750 return os.stat(self.path).st_dev
751 751
752 752 def local(self):
753 753 return not self.remote
754 754
755 755 def join(self, f):
756 756 return os.path.join(self.path, f)
757 757
758 758 def wjoin(self, f):
759 759 return os.path.join(self.root, f)
760 760
761 761 def file(self, f):
762 762 if f[0] == '/': f = f[1:]
763 763 return filelog(self.opener, f)
764 764
765 765 def getcwd(self):
766 766 return self.dirstate.getcwd()
767 767
768 768 def wfile(self, f, mode='r'):
769 769 return self.wopener(f, mode)
770 770
771 771 def transaction(self):
772 772 # save dirstate for undo
773 773 try:
774 774 ds = self.opener("dirstate").read()
775 775 except IOError:
776 776 ds = ""
777 777 self.opener("journal.dirstate", "w").write(ds)
778 778
779 779 def after():
780 780 util.rename(self.join("journal"), self.join("undo"))
781 781 util.rename(self.join("journal.dirstate"),
782 782 self.join("undo.dirstate"))
783 783
784 784 return transaction.transaction(self.ui.warn, self.opener,
785 785 self.join("journal"), after)
786 786
787 787 def recover(self):
788 788 lock = self.lock()
789 789 if os.path.exists(self.join("journal")):
790 790 self.ui.status("rolling back interrupted transaction\n")
791 791 return transaction.rollback(self.opener, self.join("journal"))
792 792 else:
793 793 self.ui.warn("no interrupted transaction available\n")
794 794
795 795 def undo(self):
796 796 lock = self.lock()
797 797 if os.path.exists(self.join("undo")):
798 798 self.ui.status("rolling back last transaction\n")
799 799 transaction.rollback(self.opener, self.join("undo"))
800 800 self.dirstate = None
801 801 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
802 802 self.dirstate = dirstate(self.opener, self.ui, self.root)
803 803 else:
804 804 self.ui.warn("no undo information available\n")
805 805
806 806 def lock(self, wait = 1):
807 807 try:
808 808 return lock.lock(self.join("lock"), 0)
809 809 except lock.LockHeld, inst:
810 810 if wait:
811 811 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
812 812 return lock.lock(self.join("lock"), wait)
813 813 raise inst
814 814
815 815 def rawcommit(self, files, text, user, date, p1=None, p2=None):
816 816 orig_parent = self.dirstate.parents()[0] or nullid
817 817 p1 = p1 or self.dirstate.parents()[0] or nullid
818 818 p2 = p2 or self.dirstate.parents()[1] or nullid
819 819 c1 = self.changelog.read(p1)
820 820 c2 = self.changelog.read(p2)
821 821 m1 = self.manifest.read(c1[0])
822 822 mf1 = self.manifest.readflags(c1[0])
823 823 m2 = self.manifest.read(c2[0])
824 824
825 825 if orig_parent == p1:
826 826 update_dirstate = 1
827 827 else:
828 828 update_dirstate = 0
829 829
830 830 tr = self.transaction()
831 831 mm = m1.copy()
832 832 mfm = mf1.copy()
833 833 linkrev = self.changelog.count()
834 834 for f in files:
835 835 try:
836 836 t = self.wfile(f).read()
837 837 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
838 838 r = self.file(f)
839 839 mfm[f] = tm
840 840 mm[f] = r.add(t, {}, tr, linkrev,
841 841 m1.get(f, nullid), m2.get(f, nullid))
842 842 if update_dirstate:
843 843 self.dirstate.update([f], "n")
844 844 except IOError:
845 845 try:
846 846 del mm[f]
847 847 del mfm[f]
848 848 if update_dirstate:
849 849 self.dirstate.forget([f])
850 850 except:
851 851 # deleted from p2?
852 852 pass
853 853
854 854 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
855 855 user = user or self.ui.username()
856 856 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
857 857 tr.close()
858 858 if update_dirstate:
859 859 self.dirstate.setparents(n, nullid)
860 860
861 861 def commit(self, files = None, text = "", user = None, date = None,
862 862 match = util.always, force=False):
863 863 commit = []
864 864 remove = []
865 865 if files:
866 866 for f in files:
867 867 s = self.dirstate.state(f)
868 868 if s in 'nmai':
869 869 commit.append(f)
870 870 elif s == 'r':
871 871 remove.append(f)
872 872 else:
873 873 self.ui.warn("%s not tracked!\n" % f)
874 874 else:
875 875 (c, a, d, u) = self.changes(match = match)
876 876 commit = c + a
877 877 remove = d
878 878
879 879 if not commit and not remove and not force:
880 880 self.ui.status("nothing changed\n")
881 881 return None
882 882
883 883 if not self.hook("precommit"):
884 884 return None
885 885
886 886 p1, p2 = self.dirstate.parents()
887 887 c1 = self.changelog.read(p1)
888 888 c2 = self.changelog.read(p2)
889 889 m1 = self.manifest.read(c1[0])
890 890 mf1 = self.manifest.readflags(c1[0])
891 891 m2 = self.manifest.read(c2[0])
892 892 lock = self.lock()
893 893 tr = self.transaction()
894 894
895 895 # check in files
896 896 new = {}
897 897 linkrev = self.changelog.count()
898 898 commit.sort()
899 899 for f in commit:
900 900 self.ui.note(f + "\n")
901 901 try:
902 902 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
903 903 t = self.wfile(f).read()
904 904 except IOError:
905 905 self.ui.warn("trouble committing %s!\n" % f)
906 906 raise
907 907
908 908 meta = {}
909 909 cp = self.dirstate.copied(f)
910 910 if cp:
911 911 meta["copy"] = cp
912 912 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
913 913 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
914 914
915 915 r = self.file(f)
916 916 fp1 = m1.get(f, nullid)
917 917 fp2 = m2.get(f, nullid)
918 918 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
919 919
920 920 # update manifest
921 921 m1.update(new)
922 922 for f in remove:
923 923 if f in m1:
924 924 del m1[f]
925 925 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
926 926 (new, remove))
927 927
928 928 # add changeset
929 929 new = new.keys()
930 930 new.sort()
931 931
932 932 if not text:
933 933 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
934 934 edittext += "".join(["HG: changed %s\n" % f for f in new])
935 935 edittext += "".join(["HG: removed %s\n" % f for f in remove])
936 936 edittext = self.ui.edit(edittext)
937 937 if not edittext.rstrip():
938 938 return None
939 939 text = edittext
940 940
941 941 user = user or self.ui.username()
942 942 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
943 943 tr.close()
944 944
945 945 self.dirstate.setparents(n)
946 946 self.dirstate.update(new, "n")
947 947 self.dirstate.forget(remove)
948 948
949 949 if not self.hook("commit", node=hex(n)):
950 950 return None
951 951 return n
952 952
953 953 def walk(self, node = None, files = [], match = util.always):
954 954 if node:
955 955 for fn in self.manifest.read(self.changelog.read(node)[0]):
956 956 if match(fn): yield 'm', fn
957 957 else:
958 958 for src, fn in self.dirstate.walk(files, match):
959 959 yield src, fn
960 960
961 961 def changes(self, node1 = None, node2 = None, files = [],
962 962 match = util.always):
963 963 mf2, u = None, []
964 964
965 965 def fcmp(fn, mf):
966 966 t1 = self.wfile(fn).read()
967 967 t2 = self.file(fn).revision(mf[fn])
968 968 return cmp(t1, t2)
969 969
970 970 def mfmatches(node):
971 971 mf = dict(self.manifest.read(node))
972 972 for fn in mf.keys():
973 973 if not match(fn):
974 974 del mf[fn]
975 975 return mf
976 976
977 977 # are we comparing the working directory?
978 978 if not node2:
979 979 l, c, a, d, u = self.dirstate.changes(files, match)
980 980
981 981 # are we comparing working dir against its parent?
982 982 if not node1:
983 983 if l:
984 984 # do a full compare of any files that might have changed
985 985 change = self.changelog.read(self.dirstate.parents()[0])
986 986 mf2 = mfmatches(change[0])
987 987 for f in l:
988 988 if fcmp(f, mf2):
989 989 c.append(f)
990 990
991 991 for l in c, a, d, u:
992 992 l.sort()
993 993
994 994 return (c, a, d, u)
995 995
996 996 # are we comparing working dir against non-tip?
997 997 # generate a pseudo-manifest for the working dir
998 998 if not node2:
999 999 if not mf2:
1000 1000 change = self.changelog.read(self.dirstate.parents()[0])
1001 1001 mf2 = mfmatches(change[0])
1002 1002 for f in a + c + l:
1003 1003 mf2[f] = ""
1004 1004 for f in d:
1005 1005 if f in mf2: del mf2[f]
1006 1006 else:
1007 1007 change = self.changelog.read(node2)
1008 1008 mf2 = mfmatches(change[0])
1009 1009
1010 1010 # flush lists from dirstate before comparing manifests
1011 1011 c, a = [], []
1012 1012
1013 1013 change = self.changelog.read(node1)
1014 1014 mf1 = mfmatches(change[0])
1015 1015
1016 1016 for fn in mf2:
1017 1017 if mf1.has_key(fn):
1018 1018 if mf1[fn] != mf2[fn]:
1019 1019 if mf2[fn] != "" or fcmp(fn, mf1):
1020 1020 c.append(fn)
1021 1021 del mf1[fn]
1022 1022 else:
1023 1023 a.append(fn)
1024 1024
1025 1025 d = mf1.keys()
1026 1026
1027 1027 for l in c, a, d, u:
1028 1028 l.sort()
1029 1029
1030 1030 return (c, a, d, u)
1031 1031
1032 1032 def add(self, list):
1033 1033 for f in list:
1034 1034 p = self.wjoin(f)
1035 1035 if not os.path.exists(p):
1036 1036 self.ui.warn("%s does not exist!\n" % f)
1037 1037 elif not os.path.isfile(p):
1038 1038 self.ui.warn("%s not added: only files supported currently\n" % f)
1039 1039 elif self.dirstate.state(f) in 'an':
1040 1040 self.ui.warn("%s already tracked!\n" % f)
1041 1041 else:
1042 1042 self.dirstate.update([f], "a")
1043 1043
1044 1044 def forget(self, list):
1045 1045 for f in list:
1046 1046 if self.dirstate.state(f) not in 'ai':
1047 1047 self.ui.warn("%s not added!\n" % f)
1048 1048 else:
1049 1049 self.dirstate.forget([f])
1050 1050
1051 1051 def remove(self, list):
1052 1052 for f in list:
1053 1053 p = self.wjoin(f)
1054 1054 if os.path.exists(p):
1055 1055 self.ui.warn("%s still exists!\n" % f)
1056 1056 elif self.dirstate.state(f) == 'a':
1057 1057 self.ui.warn("%s never committed!\n" % f)
1058 1058 self.dirstate.forget([f])
1059 1059 elif f not in self.dirstate:
1060 1060 self.ui.warn("%s not tracked!\n" % f)
1061 1061 else:
1062 1062 self.dirstate.update([f], "r")
1063 1063
1064 1064 def copy(self, source, dest):
1065 1065 p = self.wjoin(dest)
1066 1066 if not os.path.exists(p):
1067 1067 self.ui.warn("%s does not exist!\n" % dest)
1068 1068 elif not os.path.isfile(p):
1069 1069 self.ui.warn("copy failed: %s is not a file\n" % dest)
1070 1070 else:
1071 1071 if self.dirstate.state(dest) == '?':
1072 1072 self.dirstate.update([dest], "a")
1073 1073 self.dirstate.copy(source, dest)
1074 1074
1075 1075 def heads(self):
1076 1076 return self.changelog.heads()
1077 1077
1078 1078 # branchlookup returns a dict giving a list of branches for
1079 1079 # each head. A branch is defined as the tag of a node or
1080 1080 # the branch of the node's parents. If a node has multiple
1081 1081 # branch tags, tags are eliminated if they are visible from other
1082 1082 # branch tags.
1083 1083 #
1084 1084 # So, for this graph: a->b->c->d->e
1085 1085 # \ /
1086 1086 # aa -----/
1087 1087 # a has tag 2.6.12
1088 1088 # d has tag 2.6.13
1089 1089 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1090 1090 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1091 1091 # from the list.
1092 1092 #
1093 1093 # It is possible that more than one head will have the same branch tag.
1094 1094 # callers need to check the result for multiple heads under the same
1095 1095 # branch tag if that is a problem for them (ie checkout of a specific
1096 1096 # branch).
1097 1097 #
1098 1098 # passing in a specific branch will limit the depth of the search
1099 1099 # through the parents. It won't limit the branches returned in the
1100 1100 # result though.
1101 1101 def branchlookup(self, heads=None, branch=None):
1102 1102 if not heads:
1103 1103 heads = self.heads()
1104 1104 headt = [ h for h in heads ]
1105 1105 chlog = self.changelog
1106 1106 branches = {}
1107 1107 merges = []
1108 1108 seenmerge = {}
1109 1109
1110 1110 # traverse the tree once for each head, recording in the branches
1111 1111 # dict which tags are visible from this head. The branches
1112 1112 # dict also records which tags are visible from each tag
1113 1113 # while we traverse.
1114 1114 while headt or merges:
1115 1115 if merges:
1116 1116 n, found = merges.pop()
1117 1117 visit = [n]
1118 1118 else:
1119 1119 h = headt.pop()
1120 1120 visit = [h]
1121 1121 found = [h]
1122 1122 seen = {}
1123 1123 while visit:
1124 1124 n = visit.pop()
1125 1125 if n in seen:
1126 1126 continue
1127 1127 pp = chlog.parents(n)
1128 1128 tags = self.nodetags(n)
1129 1129 if tags:
1130 1130 for x in tags:
1131 1131 if x == 'tip':
1132 1132 continue
1133 1133 for f in found:
1134 1134 branches.setdefault(f, {})[n] = 1
1135 1135 branches.setdefault(n, {})[n] = 1
1136 1136 break
1137 1137 if n not in found:
1138 1138 found.append(n)
1139 1139 if branch in tags:
1140 1140 continue
1141 1141 seen[n] = 1
1142 1142 if pp[1] != nullid and n not in seenmerge:
1143 1143 merges.append((pp[1], [x for x in found]))
1144 1144 seenmerge[n] = 1
1145 1145 if pp[0] != nullid:
1146 1146 visit.append(pp[0])
1147 1147 # traverse the branches dict, eliminating branch tags from each
1148 1148 # head that are visible from another branch tag for that head.
1149 1149 out = {}
1150 1150 viscache = {}
1151 1151 for h in heads:
1152 1152 def visible(node):
1153 1153 if node in viscache:
1154 1154 return viscache[node]
1155 1155 ret = {}
1156 1156 visit = [node]
1157 1157 while visit:
1158 1158 x = visit.pop()
1159 1159 if x in viscache:
1160 1160 ret.update(viscache[x])
1161 1161 elif x not in ret:
1162 1162 ret[x] = 1
1163 1163 if x in branches:
1164 1164 visit[len(visit):] = branches[x].keys()
1165 1165 viscache[node] = ret
1166 1166 return ret
1167 1167 if h not in branches:
1168 1168 continue
1169 1169 # O(n^2), but somewhat limited. This only searches the
1170 1170 # tags visible from a specific head, not all the tags in the
1171 1171 # whole repo.
1172 1172 for b in branches[h]:
1173 1173 vis = False
1174 1174 for bb in branches[h].keys():
1175 1175 if b != bb:
1176 1176 if b in visible(bb):
1177 1177 vis = True
1178 1178 break
1179 1179 if not vis:
1180 1180 l = out.setdefault(h, [])
1181 1181 l[len(l):] = self.nodetags(b)
1182 1182 return out
1183 1183
1184 1184 def branches(self, nodes):
1185 1185 if not nodes: nodes = [self.changelog.tip()]
1186 1186 b = []
1187 1187 for n in nodes:
1188 1188 t = n
1189 1189 while n:
1190 1190 p = self.changelog.parents(n)
1191 1191 if p[1] != nullid or p[0] == nullid:
1192 1192 b.append((t, n, p[0], p[1]))
1193 1193 break
1194 1194 n = p[0]
1195 1195 return b
1196 1196
1197 1197 def between(self, pairs):
1198 1198 r = []
1199 1199
1200 1200 for top, bottom in pairs:
1201 1201 n, l, i = top, [], 0
1202 1202 f = 1
1203 1203
1204 1204 while n != bottom:
1205 1205 p = self.changelog.parents(n)[0]
1206 1206 if i == f:
1207 1207 l.append(n)
1208 1208 f = f * 2
1209 1209 n = p
1210 1210 i += 1
1211 1211
1212 1212 r.append(l)
1213 1213
1214 1214 return r
1215 1215
1216 1216 def newer(self, nodes):
1217 1217 m = {}
1218 1218 nl = []
1219 1219 pm = {}
1220 1220 cl = self.changelog
1221 1221 t = l = cl.count()
1222 1222
1223 1223 # find the lowest numbered node
1224 1224 for n in nodes:
1225 1225 l = min(l, cl.rev(n))
1226 1226 m[n] = 1
1227 1227
1228 1228 for i in xrange(l, t):
1229 1229 n = cl.node(i)
1230 1230 if n in m: # explicitly listed
1231 1231 pm[n] = 1
1232 1232 nl.append(n)
1233 1233 continue
1234 1234 for p in cl.parents(n):
1235 1235 if p in pm: # parent listed
1236 1236 pm[n] = 1
1237 1237 nl.append(n)
1238 1238 break
1239 1239
1240 1240 return nl
1241 1241
1242 1242 def findincoming(self, remote, base=None, heads=None):
1243 1243 m = self.changelog.nodemap
1244 1244 search = []
1245 1245 fetch = []
1246 1246 seen = {}
1247 1247 seenbranch = {}
1248 1248 if base == None:
1249 1249 base = {}
1250 1250
1251 1251 # assume we're closer to the tip than the root
1252 1252 # and start by examining the heads
1253 1253 self.ui.status("searching for changes\n")
1254 1254
1255 1255 if not heads:
1256 1256 heads = remote.heads()
1257 1257
1258 1258 unknown = []
1259 1259 for h in heads:
1260 1260 if h not in m:
1261 1261 unknown.append(h)
1262 1262 else:
1263 1263 base[h] = 1
1264 1264
1265 1265 if not unknown:
1266 1266 return None
1267 1267
1268 1268 rep = {}
1269 1269 reqcnt = 0
1270 1270
1271 1271 # search through remote branches
1272 1272 # a 'branch' here is a linear segment of history, with four parts:
1273 1273 # head, root, first parent, second parent
1274 1274 # (a branch always has two parents (or none) by definition)
1275 1275 unknown = remote.branches(unknown)
1276 1276 while unknown:
1277 1277 r = []
1278 1278 while unknown:
1279 1279 n = unknown.pop(0)
1280 1280 if n[0] in seen:
1281 1281 continue
1282 1282
1283 1283 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1284 1284 if n[0] == nullid:
1285 1285 break
1286 1286 if n in seenbranch:
1287 1287 self.ui.debug("branch already found\n")
1288 1288 continue
1289 1289 if n[1] and n[1] in m: # do we know the base?
1290 1290 self.ui.debug("found incomplete branch %s:%s\n"
1291 1291 % (short(n[0]), short(n[1])))
1292 1292 search.append(n) # schedule branch range for scanning
1293 1293 seenbranch[n] = 1
1294 1294 else:
1295 1295 if n[1] not in seen and n[1] not in fetch:
1296 1296 if n[2] in m and n[3] in m:
1297 1297 self.ui.debug("found new changeset %s\n" %
1298 1298 short(n[1]))
1299 1299 fetch.append(n[1]) # earliest unknown
1300 1300 base[n[2]] = 1 # latest known
1301 1301 continue
1302 1302
1303 1303 for a in n[2:4]:
1304 1304 if a not in rep:
1305 1305 r.append(a)
1306 1306 rep[a] = 1
1307 1307
1308 1308 seen[n[0]] = 1
1309 1309
1310 1310 if r:
1311 1311 reqcnt += 1
1312 1312 self.ui.debug("request %d: %s\n" %
1313 1313 (reqcnt, " ".join(map(short, r))))
1314 1314 for p in range(0, len(r), 10):
1315 1315 for b in remote.branches(r[p:p+10]):
1316 1316 self.ui.debug("received %s:%s\n" %
1317 1317 (short(b[0]), short(b[1])))
1318 1318 if b[0] not in m and b[0] not in seen:
1319 1319 unknown.append(b)
1320 1320
1321 1321 # do binary search on the branches we found
1322 1322 while search:
1323 1323 n = search.pop(0)
1324 1324 reqcnt += 1
1325 1325 l = remote.between([(n[0], n[1])])[0]
1326 1326 l.append(n[1])
1327 1327 p = n[0]
1328 1328 f = 1
1329 1329 for i in l:
1330 1330 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1331 1331 if i in m:
1332 1332 if f <= 2:
1333 1333 self.ui.debug("found new branch changeset %s\n" %
1334 1334 short(p))
1335 1335 fetch.append(p)
1336 1336 base[i] = 1
1337 1337 else:
1338 1338 self.ui.debug("narrowed branch search to %s:%s\n"
1339 1339 % (short(p), short(i)))
1340 1340 search.append((p, i))
1341 1341 break
1342 1342 p, f = i, f * 2
1343 1343
1344 1344 # sanity check our fetch list
1345 1345 for f in fetch:
1346 1346 if f in m:
1347 1347 raise RepoError("already have changeset " + short(f[:4]))
1348 1348
1349 1349 if base.keys() == [nullid]:
1350 1350 self.ui.warn("warning: pulling from an unrelated repository!\n")
1351 1351
1352 1352 self.ui.note("adding new changesets starting at " +
1353 1353 " ".join([short(f) for f in fetch]) + "\n")
1354 1354
1355 1355 self.ui.debug("%d total queries\n" % reqcnt)
1356 1356
1357 1357 return fetch
1358 1358
1359 1359 def findoutgoing(self, remote, base=None, heads=None):
1360 1360 if base == None:
1361 1361 base = {}
1362 1362 self.findincoming(remote, base, heads)
1363 1363
1364 1364 remain = dict.fromkeys(self.changelog.nodemap)
1365 1365
1366 1366 # prune everything remote has from the tree
1367 1367 del remain[nullid]
1368 1368 remove = base.keys()
1369 1369 while remove:
1370 1370 n = remove.pop(0)
1371 1371 if n in remain:
1372 1372 del remain[n]
1373 1373 for p in self.changelog.parents(n):
1374 1374 remove.append(p)
1375 1375
1376 1376 # find every node whose parents have been pruned
1377 1377 subset = []
1378 1378 for n in remain:
1379 1379 p1, p2 = self.changelog.parents(n)
1380 1380 if p1 not in remain and p2 not in remain:
1381 1381 subset.append(n)
1382 1382
1383 1383 # this is the set of all roots we have to push
1384 1384 return subset
1385 1385
1386 1386 def pull(self, remote):
1387 1387 lock = self.lock()
1388 1388
1389 1389 # if we have an empty repo, fetch everything
1390 1390 if self.changelog.tip() == nullid:
1391 1391 self.ui.status("requesting all changes\n")
1392 1392 fetch = [nullid]
1393 1393 else:
1394 1394 fetch = self.findincoming(remote)
1395 1395
1396 1396 if not fetch:
1397 1397 self.ui.status("no changes found\n")
1398 1398 return 1
1399 1399
1400 1400 cg = remote.changegroup(fetch)
1401 1401 return self.addchangegroup(cg)
1402 1402
1403 1403 def push(self, remote, force=False):
1404 1404 lock = remote.lock()
1405 1405
1406 1406 base = {}
1407 1407 heads = remote.heads()
1408 1408 inc = self.findincoming(remote, base, heads)
1409 1409 if not force and inc:
1410 1410 self.ui.warn("abort: unsynced remote changes!\n")
1411 1411 self.ui.status("(did you forget to sync? use push -f to force)\n")
1412 1412 return 1
1413 1413
1414 1414 update = self.findoutgoing(remote, base)
1415 1415 if not update:
1416 1416 self.ui.status("no changes found\n")
1417 1417 return 1
1418 1418 elif not force:
1419 1419 if len(heads) < len(self.changelog.heads()):
1420 1420 self.ui.warn("abort: push creates new remote branches!\n")
1421 1421 self.ui.status("(did you forget to merge?" +
1422 1422 " use push -f to force)\n")
1423 1423 return 1
1424 1424
1425 1425 cg = self.changegroup(update)
1426 1426 return remote.addchangegroup(cg)
1427 1427
1428 1428 def changegroup(self, basenodes):
1429 1429 class genread:
1430 1430 def __init__(self, generator):
1431 1431 self.g = generator
1432 1432 self.buf = ""
1433 1433 def fillbuf(self):
1434 1434 self.buf += "".join(self.g)
1435 1435
1436 1436 def read(self, l):
1437 1437 while l > len(self.buf):
1438 1438 try:
1439 1439 self.buf += self.g.next()
1440 1440 except StopIteration:
1441 1441 break
1442 1442 d, self.buf = self.buf[:l], self.buf[l:]
1443 1443 return d
1444 1444
1445 1445 def gengroup():
1446 1446 nodes = self.newer(basenodes)
1447 1447
1448 1448 # construct the link map
1449 1449 linkmap = {}
1450 1450 for n in nodes:
1451 1451 linkmap[self.changelog.rev(n)] = n
1452 1452
1453 1453 # construct a list of all changed files
1454 1454 changed = {}
1455 1455 for n in nodes:
1456 1456 c = self.changelog.read(n)
1457 1457 for f in c[3]:
1458 1458 changed[f] = 1
1459 1459 changed = changed.keys()
1460 1460 changed.sort()
1461 1461
1462 1462 # the changegroup is changesets + manifests + all file revs
1463 1463 revs = [ self.changelog.rev(n) for n in nodes ]
1464 1464
1465 1465 for y in self.changelog.group(linkmap): yield y
1466 1466 for y in self.manifest.group(linkmap): yield y
1467 1467 for f in changed:
1468 1468 yield struct.pack(">l", len(f) + 4) + f
1469 1469 g = self.file(f).group(linkmap)
1470 1470 for y in g:
1471 1471 yield y
1472 1472
1473 1473 yield struct.pack(">l", 0)
1474 1474
1475 1475 return genread(gengroup())
1476 1476
1477 1477 def addchangegroup(self, source):
1478 1478
1479 1479 def getchunk():
1480 1480 d = source.read(4)
1481 1481 if not d: return ""
1482 1482 l = struct.unpack(">l", d)[0]
1483 1483 if l <= 4: return ""
1484 1484 return source.read(l - 4)
1485 1485
1486 1486 def getgroup():
1487 1487 while 1:
1488 1488 c = getchunk()
1489 1489 if not c: break
1490 1490 yield c
1491 1491
1492 1492 def csmap(x):
1493 1493 self.ui.debug("add changeset %s\n" % short(x))
1494 1494 return self.changelog.count()
1495 1495
1496 1496 def revmap(x):
1497 1497 return self.changelog.rev(x)
1498 1498
1499 1499 if not source: return
1500 1500 changesets = files = revisions = 0
1501 1501
1502 1502 tr = self.transaction()
1503 1503
1504 1504 # pull off the changeset group
1505 1505 self.ui.status("adding changesets\n")
1506 1506 co = self.changelog.tip()
1507 1507 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1508 1508 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1509 1509
1510 1510 # pull off the manifest group
1511 1511 self.ui.status("adding manifests\n")
1512 1512 mm = self.manifest.tip()
1513 1513 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1514 1514
1515 1515 # process the files
1516 1516 self.ui.status("adding file changes\n")
1517 1517 while 1:
1518 1518 f = getchunk()
1519 1519 if not f: break
1520 1520 self.ui.debug("adding %s revisions\n" % f)
1521 1521 fl = self.file(f)
1522 1522 o = fl.count()
1523 1523 n = fl.addgroup(getgroup(), revmap, tr)
1524 1524 revisions += fl.count() - o
1525 1525 files += 1
1526 1526
1527 1527 self.ui.status(("added %d changesets" +
1528 1528 " with %d changes to %d files\n")
1529 1529 % (changesets, revisions, files))
1530 1530
1531 1531 tr.close()
1532 1532
1533 1533 if not self.hook("changegroup"):
1534 1534 return 1
1535 1535
1536 1536 return
1537 1537
1538 1538 def update(self, node, allow=False, force=False, choose=None,
1539 1539 moddirstate=True):
1540 1540 pl = self.dirstate.parents()
1541 1541 if not force and pl[1] != nullid:
1542 1542 self.ui.warn("aborting: outstanding uncommitted merges\n")
1543 1543 return 1
1544 1544
1545 1545 p1, p2 = pl[0], node
1546 1546 pa = self.changelog.ancestor(p1, p2)
1547 1547 m1n = self.changelog.read(p1)[0]
1548 1548 m2n = self.changelog.read(p2)[0]
1549 1549 man = self.manifest.ancestor(m1n, m2n)
1550 1550 m1 = self.manifest.read(m1n)
1551 1551 mf1 = self.manifest.readflags(m1n)
1552 1552 m2 = self.manifest.read(m2n)
1553 1553 mf2 = self.manifest.readflags(m2n)
1554 1554 ma = self.manifest.read(man)
1555 1555 mfa = self.manifest.readflags(man)
1556 1556
1557 1557 (c, a, d, u) = self.changes()
1558 1558
1559 1559 # is this a jump, or a merge? i.e. is there a linear path
1560 1560 # from p1 to p2?
1561 1561 linear_path = (pa == p1 or pa == p2)
1562 1562
1563 1563 # resolve the manifest to determine which files
1564 1564 # we care about merging
1565 1565 self.ui.note("resolving manifests\n")
1566 1566 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1567 1567 (force, allow, moddirstate, linear_path))
1568 1568 self.ui.debug(" ancestor %s local %s remote %s\n" %
1569 1569 (short(man), short(m1n), short(m2n)))
1570 1570
1571 1571 merge = {}
1572 1572 get = {}
1573 1573 remove = []
1574 1574 mark = {}
1575 1575
1576 1576 # construct a working dir manifest
1577 1577 mw = m1.copy()
1578 1578 mfw = mf1.copy()
1579 1579 umap = dict.fromkeys(u)
1580 1580
1581 1581 for f in a + c + u:
1582 1582 mw[f] = ""
1583 1583 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1584 1584
1585 1585 for f in d:
1586 1586 if f in mw: del mw[f]
1587 1587
1588 1588 # If we're jumping between revisions (as opposed to merging),
1589 1589 # and if neither the working directory nor the target rev has
1590 1590 # the file, then we need to remove it from the dirstate, to
1591 1591 # prevent the dirstate from listing the file when it is no
1592 1592 # longer in the manifest.
1593 1593 if moddirstate and linear_path and f not in m2:
1594 1594 self.dirstate.forget((f,))
1595 1595
1596 1596 # Compare manifests
1597 1597 for f, n in mw.iteritems():
1598 1598 if choose and not choose(f): continue
1599 1599 if f in m2:
1600 1600 s = 0
1601 1601
1602 1602 # is the wfile new since m1, and match m2?
1603 1603 if f not in m1:
1604 1604 t1 = self.wfile(f).read()
1605 1605 t2 = self.file(f).revision(m2[f])
1606 1606 if cmp(t1, t2) == 0:
1607 1607 mark[f] = 1
1608 1608 n = m2[f]
1609 1609 del t1, t2
1610 1610
1611 1611 # are files different?
1612 1612 if n != m2[f]:
1613 1613 a = ma.get(f, nullid)
1614 1614 # are both different from the ancestor?
1615 1615 if n != a and m2[f] != a:
1616 1616 self.ui.debug(" %s versions differ, resolve\n" % f)
1617 1617 # merge executable bits
1618 1618 # "if we changed or they changed, change in merge"
1619 1619 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1620 1620 mode = ((a^b) | (a^c)) ^ a
1621 1621 merge[f] = (m1.get(f, nullid), m2[f], mode)
1622 1622 s = 1
1623 1623 # are we clobbering?
1624 1624 # is remote's version newer?
1625 1625 # or are we going back in time?
1626 1626 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1627 1627 self.ui.debug(" remote %s is newer, get\n" % f)
1628 1628 get[f] = m2[f]
1629 1629 s = 1
1630 1630 else:
1631 1631 mark[f] = 1
1632 1632 elif f in umap:
1633 1633 # this unknown file is the same as the checkout
1634 1634 get[f] = m2[f]
1635 1635
1636 1636 if not s and mfw[f] != mf2[f]:
1637 1637 if force:
1638 1638 self.ui.debug(" updating permissions for %s\n" % f)
1639 1639 util.set_exec(self.wjoin(f), mf2[f])
1640 1640 else:
1641 1641 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1642 1642 mode = ((a^b) | (a^c)) ^ a
1643 1643 if mode != b:
1644 1644 self.ui.debug(" updating permissions for %s\n" % f)
1645 1645 util.set_exec(self.wjoin(f), mode)
1646 1646 mark[f] = 1
1647 1647 del m2[f]
1648 1648 elif f in ma:
1649 1649 if n != ma[f]:
1650 1650 r = "d"
1651 1651 if not force and (linear_path or allow):
1652 1652 r = self.ui.prompt(
1653 1653 (" local changed %s which remote deleted\n" % f) +
1654 1654 "(k)eep or (d)elete?", "[kd]", "k")
1655 1655 if r == "d":
1656 1656 remove.append(f)
1657 1657 else:
1658 1658 self.ui.debug("other deleted %s\n" % f)
1659 1659 remove.append(f) # other deleted it
1660 1660 else:
1661 1661 if n == m1.get(f, nullid): # same as parent
1662 1662 if p2 == pa: # going backwards?
1663 1663 self.ui.debug("remote deleted %s\n" % f)
1664 1664 remove.append(f)
1665 1665 else:
1666 1666 self.ui.debug("local created %s, keeping\n" % f)
1667 1667 else:
1668 1668 self.ui.debug("working dir created %s, keeping\n" % f)
1669 1669
1670 1670 for f, n in m2.iteritems():
1671 1671 if choose and not choose(f): continue
1672 1672 if f[0] == "/": continue
1673 1673 if f in ma and n != ma[f]:
1674 1674 r = "k"
1675 1675 if not force and (linear_path or allow):
1676 1676 r = self.ui.prompt(
1677 1677 ("remote changed %s which local deleted\n" % f) +
1678 1678 "(k)eep or (d)elete?", "[kd]", "k")
1679 1679 if r == "k": get[f] = n
1680 1680 elif f not in ma:
1681 1681 self.ui.debug("remote created %s\n" % f)
1682 1682 get[f] = n
1683 1683 else:
1684 1684 if force or p2 == pa: # going backwards?
1685 1685 self.ui.debug("local deleted %s, recreating\n" % f)
1686 1686 get[f] = n
1687 1687 else:
1688 1688 self.ui.debug("local deleted %s\n" % f)
1689 1689
1690 1690 del mw, m1, m2, ma
1691 1691
1692 1692 if force:
1693 1693 for f in merge:
1694 1694 get[f] = merge[f][1]
1695 1695 merge = {}
1696 1696
1697 1697 if linear_path or force:
1698 1698 # we don't need to do any magic, just jump to the new rev
1699 1699 mode = 'n'
1700 1700 p1, p2 = p2, nullid
1701 1701 else:
1702 1702 if not allow:
1703 1703 self.ui.status("this update spans a branch" +
1704 1704 " affecting the following files:\n")
1705 1705 fl = merge.keys() + get.keys()
1706 1706 fl.sort()
1707 1707 for f in fl:
1708 1708 cf = ""
1709 1709 if f in merge: cf = " (resolve)"
1710 1710 self.ui.status(" %s%s\n" % (f, cf))
1711 1711 self.ui.warn("aborting update spanning branches!\n")
1712 1712 self.ui.status("(use update -m to merge across branches" +
1713 1713 " or -C to lose changes)\n")
1714 1714 return 1
1715 1715 # we have to remember what files we needed to get/change
1716 1716 # because any file that's different from either one of its
1717 1717 # parents must be in the changeset
1718 1718 mode = 'm'
1719 1719 if moddirstate:
1720 1720 self.dirstate.update(mark.keys(), "m")
1721 1721
1722 1722 if moddirstate:
1723 1723 self.dirstate.setparents(p1, p2)
1724 1724
1725 1725 # get the files we don't need to change
1726 1726 files = get.keys()
1727 1727 files.sort()
1728 1728 for f in files:
1729 1729 if f[0] == "/": continue
1730 1730 self.ui.note("getting %s\n" % f)
1731 1731 t = self.file(f).read(get[f])
1732 1732 try:
1733 1733 self.wfile(f, "w").write(t)
1734 1734 except IOError:
1735 1735 os.makedirs(os.path.dirname(self.wjoin(f)))
1736 1736 self.wfile(f, "w").write(t)
1737 1737 util.set_exec(self.wjoin(f), mf2[f])
1738 1738 if moddirstate:
1739 1739 self.dirstate.update([f], mode)
1740 1740
1741 1741 # merge the tricky bits
1742 1742 files = merge.keys()
1743 1743 files.sort()
1744 1744 for f in files:
1745 1745 self.ui.status("merging %s\n" % f)
1746 1746 m, o, flag = merge[f]
1747 1747 self.merge3(f, m, o)
1748 1748 util.set_exec(self.wjoin(f), flag)
1749 1749 if moddirstate:
1750 1750 if mode == 'm':
1751 1751 # only update dirstate on branch merge, otherwise we
1752 1752 # could mark files with changes as unchanged
1753 1753 self.dirstate.update([f], mode)
1754 1754 elif p2 == nullid:
1755 1755 # update dirstate from parent1's manifest
1756 1756 m1n = self.changelog.read(p1)[0]
1757 1757 m1 = self.manifest.read(m1n)
1758 1758 f_len = len(self.file(f).read(m1[f]))
1759 1759 self.dirstate.update([f], mode, st_size=f_len, st_mtime=0)
1760 1760 else:
1761 1761 self.ui.warn("Second parent without branch merge!?\n"
1762 1762 "Dirstate for file %s may be wrong.\n" % f)
1763 1763
1764 1764 remove.sort()
1765 1765 for f in remove:
1766 1766 self.ui.note("removing %s\n" % f)
1767 1767 try:
1768 1768 os.unlink(f)
1769 1769 except OSError, inst:
1770 1770 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1771 1771 # try removing directories that might now be empty
1772 1772 try: os.removedirs(os.path.dirname(f))
1773 1773 except: pass
1774 1774 if moddirstate:
1775 1775 if mode == 'n':
1776 1776 self.dirstate.forget(remove)
1777 1777 else:
1778 1778 self.dirstate.update(remove, 'r')
1779 1779
1780 1780 def merge3(self, fn, my, other):
1781 1781 """perform a 3-way merge in the working directory"""
1782 1782
1783 1783 def temp(prefix, node):
1784 1784 pre = "%s~%s." % (os.path.basename(fn), prefix)
1785 1785 (fd, name) = tempfile.mkstemp("", pre)
1786 1786 f = os.fdopen(fd, "wb")
1787 1787 f.write(fl.revision(node))
1788 1788 f.close()
1789 1789 return name
1790 1790
1791 1791 fl = self.file(fn)
1792 1792 base = fl.ancestor(my, other)
1793 1793 a = self.wjoin(fn)
1794 1794 b = temp("base", base)
1795 1795 c = temp("other", other)
1796 1796
1797 1797 self.ui.note("resolving %s\n" % fn)
1798 1798 self.ui.debug("file %s: other %s ancestor %s\n" %
1799 1799 (fn, short(other), short(base)))
1800 1800
1801 1801 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1802 1802 or "hgmerge")
1803 1803 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1804 1804 if r:
1805 1805 self.ui.warn("merging %s failed!\n" % fn)
1806 1806
1807 1807 os.unlink(b)
1808 1808 os.unlink(c)
1809 1809
1810 1810 def verify(self):
1811 1811 filelinkrevs = {}
1812 1812 filenodes = {}
1813 1813 changesets = revisions = files = 0
1814 1814 errors = 0
1815 1815
1816 1816 seen = {}
1817 1817 self.ui.status("checking changesets\n")
1818 1818 for i in range(self.changelog.count()):
1819 1819 changesets += 1
1820 1820 n = self.changelog.node(i)
1821 1821 if n in seen:
1822 1822 self.ui.warn("duplicate changeset at revision %d\n" % i)
1823 1823 errors += 1
1824 1824 seen[n] = 1
1825 1825
1826 1826 for p in self.changelog.parents(n):
1827 1827 if p not in self.changelog.nodemap:
1828 1828 self.ui.warn("changeset %s has unknown parent %s\n" %
1829 1829 (short(n), short(p)))
1830 1830 errors += 1
1831 1831 try:
1832 1832 changes = self.changelog.read(n)
1833 1833 except Exception, inst:
1834 1834 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1835 1835 errors += 1
1836 1836
1837 1837 for f in changes[3]:
1838 1838 filelinkrevs.setdefault(f, []).append(i)
1839 1839
1840 1840 seen = {}
1841 1841 self.ui.status("checking manifests\n")
1842 1842 for i in range(self.manifest.count()):
1843 1843 n = self.manifest.node(i)
1844 1844 if n in seen:
1845 1845 self.ui.warn("duplicate manifest at revision %d\n" % i)
1846 1846 errors += 1
1847 1847 seen[n] = 1
1848 1848
1849 1849 for p in self.manifest.parents(n):
1850 1850 if p not in self.manifest.nodemap:
1851 1851 self.ui.warn("manifest %s has unknown parent %s\n" %
1852 1852 (short(n), short(p)))
1853 1853 errors += 1
1854 1854
1855 1855 try:
1856 1856 delta = mdiff.patchtext(self.manifest.delta(n))
1857 1857 except KeyboardInterrupt:
1858 1858 self.ui.warn("aborted")
1859 1859 sys.exit(0)
1860 1860 except Exception, inst:
1861 1861 self.ui.warn("unpacking manifest %s: %s\n"
1862 1862 % (short(n), inst))
1863 1863 errors += 1
1864 1864
1865 1865 ff = [ l.split('\0') for l in delta.splitlines() ]
1866 1866 for f, fn in ff:
1867 1867 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1868 1868
1869 1869 self.ui.status("crosschecking files in changesets and manifests\n")
1870 1870 for f in filenodes:
1871 1871 if f not in filelinkrevs:
1872 1872 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1873 1873 errors += 1
1874 1874
1875 1875 for f in filelinkrevs:
1876 1876 if f not in filenodes:
1877 1877 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1878 1878 errors += 1
1879 1879
1880 1880 self.ui.status("checking files\n")
1881 1881 ff = filenodes.keys()
1882 1882 ff.sort()
1883 1883 for f in ff:
1884 1884 if f == "/dev/null": continue
1885 1885 files += 1
1886 1886 fl = self.file(f)
1887 1887 nodes = { nullid: 1 }
1888 1888 seen = {}
1889 1889 for i in range(fl.count()):
1890 1890 revisions += 1
1891 1891 n = fl.node(i)
1892 1892
1893 1893 if n in seen:
1894 1894 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1895 1895 errors += 1
1896 1896
1897 1897 if n not in filenodes[f]:
1898 1898 self.ui.warn("%s: %d:%s not in manifests\n"
1899 1899 % (f, i, short(n)))
1900 1900 errors += 1
1901 1901 else:
1902 1902 del filenodes[f][n]
1903 1903
1904 1904 flr = fl.linkrev(n)
1905 1905 if flr not in filelinkrevs[f]:
1906 1906 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1907 1907 % (f, short(n), fl.linkrev(n)))
1908 1908 errors += 1
1909 1909 else:
1910 1910 filelinkrevs[f].remove(flr)
1911 1911
1912 1912 # verify contents
1913 1913 try:
1914 1914 t = fl.read(n)
1915 1915 except Exception, inst:
1916 1916 self.ui.warn("unpacking file %s %s: %s\n"
1917 1917 % (f, short(n), inst))
1918 1918 errors += 1
1919 1919
1920 1920 # verify parents
1921 1921 (p1, p2) = fl.parents(n)
1922 1922 if p1 not in nodes:
1923 1923 self.ui.warn("file %s:%s unknown parent 1 %s" %
1924 1924 (f, short(n), short(p1)))
1925 1925 errors += 1
1926 1926 if p2 not in nodes:
1927 1927 self.ui.warn("file %s:%s unknown parent 2 %s" %
1928 1928 (f, short(n), short(p1)))
1929 1929 errors += 1
1930 1930 nodes[n] = 1
1931 1931
1932 1932 # cross-check
1933 1933 for node in filenodes[f]:
1934 1934 self.ui.warn("node %s in manifests not in %s\n"
1935 1935 % (hex(node), f))
1936 1936 errors += 1
1937 1937
1938 1938 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1939 1939 (files, changesets, revisions))
1940 1940
1941 1941 if errors:
1942 1942 self.ui.warn("%d integrity errors encountered!\n" % errors)
1943 1943 return 1
1944 1944
1945 1945 class remoterepository:
1946 1946 def local(self):
1947 1947 return False
1948 1948
1949 1949 class httprepository(remoterepository):
1950 1950 def __init__(self, ui, path):
1951 1951 # fix missing / after hostname
1952 1952 s = urlparse.urlsplit(path)
1953 1953 partial = s[2]
1954 1954 if not partial: partial = "/"
1955 1955 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
1956 1956 self.ui = ui
1957 1957 no_list = [ "localhost", "127.0.0.1" ]
1958 1958 host = ui.config("http_proxy", "host")
1959 1959 if host is None:
1960 1960 host = os.environ.get("http_proxy")
1961 1961 if host and host.startswith('http://'):
1962 1962 host = host[7:]
1963 1963 user = ui.config("http_proxy", "user")
1964 1964 passwd = ui.config("http_proxy", "passwd")
1965 1965 no = ui.config("http_proxy", "no")
1966 1966 if no is None:
1967 1967 no = os.environ.get("no_proxy")
1968 1968 if no:
1969 1969 no_list = no_list + no.split(",")
1970 1970
1971 1971 no_proxy = 0
1972 1972 for h in no_list:
1973 1973 if (path.startswith("http://" + h + "/") or
1974 1974 path.startswith("http://" + h + ":") or
1975 1975 path == "http://" + h):
1976 1976 no_proxy = 1
1977 1977
1978 1978 # Note: urllib2 takes proxy values from the environment and those will
1979 1979 # take precedence
1980 1980 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1981 1981 try:
1982 1982 if os.environ.has_key(env):
1983 1983 del os.environ[env]
1984 1984 except OSError:
1985 1985 pass
1986 1986
1987 1987 proxy_handler = urllib2.BaseHandler()
1988 1988 if host and not no_proxy:
1989 1989 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1990 1990
1991 1991 authinfo = None
1992 1992 if user and passwd:
1993 1993 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1994 1994 passmgr.add_password(None, host, user, passwd)
1995 1995 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1996 1996
1997 1997 opener = urllib2.build_opener(proxy_handler, authinfo)
1998 1998 urllib2.install_opener(opener)
1999 1999
2000 2000 def dev(self):
2001 2001 return -1
2002 2002
2003 2003 def do_cmd(self, cmd, **args):
2004 2004 self.ui.debug("sending %s command\n" % cmd)
2005 2005 q = {"cmd": cmd}
2006 2006 q.update(args)
2007 2007 qs = urllib.urlencode(q)
2008 2008 cu = "%s?%s" % (self.url, qs)
2009 2009 resp = urllib2.urlopen(cu)
2010 2010 proto = resp.headers['content-type']
2011 2011
2012 2012 # accept old "text/plain" and "application/hg-changegroup" for now
2013 2013 if not proto.startswith('application/mercurial') and \
2014 2014 not proto.startswith('text/plain') and \
2015 2015 not proto.startswith('application/hg-changegroup'):
2016 2016 raise RepoError("'%s' does not appear to be an hg repository"
2017 2017 % self.url)
2018 2018
2019 2019 if proto.startswith('application/mercurial'):
2020 2020 version = proto[22:]
2021 2021 if float(version) > 0.1:
2022 2022 raise RepoError("'%s' uses newer protocol %s" %
2023 2023 (self.url, version))
2024 2024
2025 2025 return resp
2026 2026
2027 2027 def heads(self):
2028 2028 d = self.do_cmd("heads").read()
2029 2029 try:
2030 2030 return map(bin, d[:-1].split(" "))
2031 2031 except:
2032 2032 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2033 2033 raise
2034 2034
2035 2035 def branches(self, nodes):
2036 2036 n = " ".join(map(hex, nodes))
2037 2037 d = self.do_cmd("branches", nodes=n).read()
2038 2038 try:
2039 2039 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2040 2040 return br
2041 2041 except:
2042 2042 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2043 2043 raise
2044 2044
2045 2045 def between(self, pairs):
2046 2046 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2047 2047 d = self.do_cmd("between", pairs=n).read()
2048 2048 try:
2049 2049 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2050 2050 return p
2051 2051 except:
2052 2052 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2053 2053 raise
2054 2054
2055 2055 def changegroup(self, nodes):
2056 2056 n = " ".join(map(hex, nodes))
2057 2057 f = self.do_cmd("changegroup", roots=n)
2058 2058 bytes = 0
2059 2059
2060 2060 class zread:
2061 2061 def __init__(self, f):
2062 2062 self.zd = zlib.decompressobj()
2063 2063 self.f = f
2064 2064 self.buf = ""
2065 2065 def read(self, l):
2066 2066 while l > len(self.buf):
2067 2067 r = self.f.read(4096)
2068 2068 if r:
2069 2069 self.buf += self.zd.decompress(r)
2070 2070 else:
2071 2071 self.buf += self.zd.flush()
2072 2072 break
2073 2073 d, self.buf = self.buf[:l], self.buf[l:]
2074 2074 return d
2075 2075
2076 2076 return zread(f)
2077 2077
2078 2078 class remotelock:
2079 2079 def __init__(self, repo):
2080 2080 self.repo = repo
2081 2081 def release(self):
2082 2082 self.repo.unlock()
2083 2083 self.repo = None
2084 2084 def __del__(self):
2085 2085 if self.repo:
2086 2086 self.release()
2087 2087
2088 2088 class sshrepository(remoterepository):
2089 2089 def __init__(self, ui, path):
2090 2090 self.url = path
2091 2091 self.ui = ui
2092 2092
2093 2093 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))', path)
2094 2094 if not m:
2095 2095 raise RepoError("couldn't parse destination %s" % path)
2096 2096
2097 2097 self.user = m.group(2)
2098 2098 self.host = m.group(3)
2099 2099 self.port = m.group(5)
2100 2100 self.path = m.group(7)
2101 2101
2102 2102 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2103 2103 args = self.port and ("%s -p %s") % (args, self.port) or args
2104 2104 path = self.path or ""
2105 2105
2106 2106 if not path:
2107 2107 raise RepoError("no remote repository path specified")
2108 2108
2109 2109 cmd = "ssh %s 'hg -R %s serve --stdio'"
2110 2110 cmd = cmd % (args, path)
2111 2111
2112 2112 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2113 2113
2114 2114 def readerr(self):
2115 2115 while 1:
2116 2116 r,w,x = select.select([self.pipee], [], [], 0)
2117 2117 if not r: break
2118 2118 l = self.pipee.readline()
2119 2119 if not l: break
2120 2120 self.ui.status("remote: ", l)
2121 2121
2122 2122 def __del__(self):
2123 2123 try:
2124 2124 self.pipeo.close()
2125 2125 self.pipei.close()
2126 2126 for l in self.pipee:
2127 2127 self.ui.status("remote: ", l)
2128 2128 self.pipee.close()
2129 2129 except:
2130 2130 pass
2131 2131
2132 2132 def dev(self):
2133 2133 return -1
2134 2134
2135 2135 def do_cmd(self, cmd, **args):
2136 2136 self.ui.debug("sending %s command\n" % cmd)
2137 2137 self.pipeo.write("%s\n" % cmd)
2138 2138 for k, v in args.items():
2139 2139 self.pipeo.write("%s %d\n" % (k, len(v)))
2140 2140 self.pipeo.write(v)
2141 2141 self.pipeo.flush()
2142 2142
2143 2143 return self.pipei
2144 2144
2145 2145 def call(self, cmd, **args):
2146 2146 r = self.do_cmd(cmd, **args)
2147 2147 l = r.readline()
2148 2148 self.readerr()
2149 2149 try:
2150 2150 l = int(l)
2151 2151 except:
2152 2152 raise RepoError("unexpected response '%s'" % l)
2153 2153 return r.read(l)
2154 2154
2155 2155 def lock(self):
2156 2156 self.call("lock")
2157 2157 return remotelock(self)
2158 2158
2159 2159 def unlock(self):
2160 2160 self.call("unlock")
2161 2161
2162 2162 def heads(self):
2163 2163 d = self.call("heads")
2164 2164 try:
2165 2165 return map(bin, d[:-1].split(" "))
2166 2166 except:
2167 2167 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2168 2168
2169 2169 def branches(self, nodes):
2170 2170 n = " ".join(map(hex, nodes))
2171 2171 d = self.call("branches", nodes=n)
2172 2172 try:
2173 2173 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2174 2174 return br
2175 2175 except:
2176 2176 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2177 2177
2178 2178 def between(self, pairs):
2179 2179 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2180 2180 d = self.call("between", pairs=n)
2181 2181 try:
2182 2182 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2183 2183 return p
2184 2184 except:
2185 2185 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2186 2186
2187 2187 def changegroup(self, nodes):
2188 2188 n = " ".join(map(hex, nodes))
2189 2189 f = self.do_cmd("changegroup", roots=n)
2190 2190 return self.pipei
2191 2191
2192 2192 def addchangegroup(self, cg):
2193 2193 d = self.call("addchangegroup")
2194 2194 if d:
2195 2195 raise RepoError("push refused: %s", d)
2196 2196
2197 2197 while 1:
2198 2198 d = cg.read(4096)
2199 2199 if not d: break
2200 2200 self.pipeo.write(d)
2201 2201 self.readerr()
2202 2202
2203 2203 self.pipeo.flush()
2204 2204
2205 2205 self.readerr()
2206 2206 l = int(self.pipei.readline())
2207 2207 return self.pipei.read(l) != ""
2208 2208
2209 2209 class httpsrepository(httprepository):
2210 2210 pass
2211 2211
2212 2212 def repository(ui, path=None, create=0):
2213 2213 if path:
2214 2214 if path.startswith("http://"):
2215 2215 return httprepository(ui, path)
2216 2216 if path.startswith("https://"):
2217 2217 return httpsrepository(ui, path)
2218 2218 if path.startswith("hg://"):
2219 2219 return httprepository(ui, path.replace("hg://", "http://"))
2220 2220 if path.startswith("old-http://"):
2221 2221 return localrepository(ui, path.replace("old-http://", "http://"))
2222 2222 if path.startswith("ssh://"):
2223 2223 return sshrepository(ui, path)
2224 2224
2225 2225 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now