##// END OF EJS Templates
Warn about bogus ignore expressions...
Matt Mackall -
r656:147d2fa2 default
parent child Browse files
Show More
@@ -1,1868 +1,1875 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff")
14 14 demandload(globals(), "bisect select")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", path + ".i"),
20 20 os.path.join("data", path + ".d"))
21 21
22 22 def read(self, node):
23 23 t = self.revision(node)
24 24 if t[:2] != '\1\n':
25 25 return t
26 26 s = t.find('\1\n', 2)
27 27 return t[s+2:]
28 28
29 29 def readmeta(self, node):
30 30 t = self.revision(node)
31 31 if t[:2] != '\1\n':
32 32 return t
33 33 s = t.find('\1\n', 2)
34 34 mt = t[2:s]
35 35 for l in mt.splitlines():
36 36 k, v = l.split(": ", 1)
37 37 m[k] = v
38 38 return m
39 39
40 40 def add(self, text, meta, transaction, link, p1=None, p2=None):
41 41 if meta or text[:2] == '\1\n':
42 42 mt = ""
43 43 if meta:
44 44 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
45 45 text = "\1\n" + "".join(mt) + "\1\n" + text
46 46 return self.addrevision(text, transaction, link, p1, p2)
47 47
48 48 def annotate(self, node):
49 49
50 50 def decorate(text, rev):
51 51 return ([rev] * len(text.splitlines()), text)
52 52
53 53 def pair(parent, child):
54 54 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
55 55 child[0][b1:b2] = parent[0][a1:a2]
56 56 return child
57 57
58 58 # find all ancestors
59 59 needed = {node:1}
60 60 visit = [node]
61 61 while visit:
62 62 n = visit.pop(0)
63 63 for p in self.parents(n):
64 64 if p not in needed:
65 65 needed[p] = 1
66 66 visit.append(p)
67 67 else:
68 68 # count how many times we'll use this
69 69 needed[p] += 1
70 70
71 71 # sort by revision which is a topological order
72 72 visit = [ (self.rev(n), n) for n in needed.keys() ]
73 73 visit.sort()
74 74 hist = {}
75 75
76 76 for r,n in visit:
77 77 curr = decorate(self.read(n), self.linkrev(n))
78 78 for p in self.parents(n):
79 79 if p != nullid:
80 80 curr = pair(hist[p], curr)
81 81 # trim the history of unneeded revs
82 82 needed[p] -= 1
83 83 if not needed[p]:
84 84 del hist[p]
85 85 hist[n] = curr
86 86
87 87 return zip(hist[n][0], hist[n][1].splitlines(1))
88 88
89 89 class manifest(revlog):
90 90 def __init__(self, opener):
91 91 self.mapcache = None
92 92 self.listcache = None
93 93 self.addlist = None
94 94 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
95 95
96 96 def read(self, node):
97 97 if node == nullid: return {} # don't upset local cache
98 98 if self.mapcache and self.mapcache[0] == node:
99 99 return self.mapcache[1]
100 100 text = self.revision(node)
101 101 map = {}
102 102 flag = {}
103 103 self.listcache = (text, text.splitlines(1))
104 104 for l in self.listcache[1]:
105 105 (f, n) = l.split('\0')
106 106 map[f] = bin(n[:40])
107 107 flag[f] = (n[40:-1] == "x")
108 108 self.mapcache = (node, map, flag)
109 109 return map
110 110
111 111 def readflags(self, node):
112 112 if node == nullid: return {} # don't upset local cache
113 113 if not self.mapcache or self.mapcache[0] != node:
114 114 self.read(node)
115 115 return self.mapcache[2]
116 116
117 117 def diff(self, a, b):
118 118 # this is sneaky, as we're not actually using a and b
119 119 if self.listcache and self.addlist and self.listcache[0] == a:
120 120 d = mdiff.diff(self.listcache[1], self.addlist, 1)
121 121 if mdiff.patch(a, d) != b:
122 122 sys.stderr.write("*** sortdiff failed, falling back ***\n")
123 123 return mdiff.textdiff(a, b)
124 124 return d
125 125 else:
126 126 return mdiff.textdiff(a, b)
127 127
128 128 def add(self, map, flags, transaction, link, p1=None, p2=None,changed=None):
129 129 # directly generate the mdiff delta from the data collected during
130 130 # the bisect loop below
131 131 def gendelta(delta):
132 132 i = 0
133 133 result = []
134 134 while i < len(delta):
135 135 start = delta[i][2]
136 136 end = delta[i][3]
137 137 l = delta[i][4]
138 138 if l == None:
139 139 l = ""
140 140 while i < len(delta) - 1 and start <= delta[i+1][2] and end >= delta[i+1][2]:
141 141 if delta[i+1][3] > end:
142 142 end = delta[i+1][3]
143 143 if delta[i+1][4]:
144 144 l += delta[i+1][4]
145 145 i += 1
146 146 result.append(struct.pack(">lll", start, end, len(l)) + l)
147 147 i += 1
148 148 return result
149 149
150 150 # apply the changes collected during the bisect loop to our addlist
151 151 def addlistdelta(addlist, delta):
152 152 # apply the deltas to the addlist. start from the bottom up
153 153 # so changes to the offsets don't mess things up.
154 154 i = len(delta)
155 155 while i > 0:
156 156 i -= 1
157 157 start = delta[i][0]
158 158 end = delta[i][1]
159 159 if delta[i][4]:
160 160 addlist[start:end] = [delta[i][4]]
161 161 else:
162 162 del addlist[start:end]
163 163 return addlist
164 164
165 165 # calculate the byte offset of the start of each line in the
166 166 # manifest
167 167 def calcoffsets(addlist):
168 168 offsets = [0] * (len(addlist) + 1)
169 169 offset = 0
170 170 i = 0
171 171 while i < len(addlist):
172 172 offsets[i] = offset
173 173 offset += len(addlist[i])
174 174 i += 1
175 175 offsets[i] = offset
176 176 return offsets
177 177
178 178 # if we're using the listcache, make sure it is valid and
179 179 # parented by the same node we're diffing against
180 180 if not changed or not self.listcache or not p1 or self.mapcache[0] != p1:
181 181 files = map.keys()
182 182 files.sort()
183 183
184 184 self.addlist = ["%s\000%s%s\n" %
185 185 (f, hex(map[f]), flags[f] and "x" or '')
186 186 for f in files]
187 187 cachedelta = None
188 188 else:
189 189 addlist = self.listcache[1]
190 190
191 191 # find the starting offset for each line in the add list
192 192 offsets = calcoffsets(addlist)
193 193
194 194 # combine the changed lists into one list for sorting
195 195 work = [[x, 0] for x in changed[0]]
196 196 work[len(work):] = [[x, 1] for x in changed[1]]
197 197 work.sort()
198 198
199 199 delta = []
200 200 bs = 0
201 201
202 202 for w in work:
203 203 f = w[0]
204 204 # bs will either be the index of the item or the insertion point
205 205 bs = bisect.bisect(addlist, f, bs)
206 206 if bs < len(addlist):
207 207 fn = addlist[bs][:addlist[bs].index('\0')]
208 208 else:
209 209 fn = None
210 210 if w[1] == 0:
211 211 l = "%s\000%s%s\n" % (f, hex(map[f]), flags[f] and "x" or '')
212 212 else:
213 213 l = None
214 214 start = bs
215 215 if fn != f:
216 216 # item not found, insert a new one
217 217 end = bs
218 218 if w[1] == 1:
219 219 sys.stderr.write("failed to remove %s from manifest" % f)
220 220 sys.exit(1)
221 221 else:
222 222 # item is found, replace/delete the existing line
223 223 end = bs + 1
224 224 delta.append([start, end, offsets[start], offsets[end], l])
225 225
226 226 self.addlist = addlistdelta(addlist, delta)
227 227 if self.mapcache[0] == self.tip():
228 228 cachedelta = "".join(gendelta(delta))
229 229 else:
230 230 cachedelta = None
231 231
232 232 text = "".join(self.addlist)
233 233 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
234 234 sys.stderr.write("manifest delta failure")
235 235 sys.exit(1)
236 236 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
237 237 self.mapcache = (n, map, flags)
238 238 self.listcache = (text, self.addlist)
239 239 self.addlist = None
240 240
241 241 return n
242 242
243 243 class changelog(revlog):
244 244 def __init__(self, opener):
245 245 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
246 246
247 247 def extract(self, text):
248 248 if not text:
249 249 return (nullid, "", "0", [], "")
250 250 last = text.index("\n\n")
251 251 desc = text[last + 2:]
252 252 l = text[:last].splitlines()
253 253 manifest = bin(l[0])
254 254 user = l[1]
255 255 date = l[2]
256 256 files = l[3:]
257 257 return (manifest, user, date, files, desc)
258 258
259 259 def read(self, node):
260 260 return self.extract(self.revision(node))
261 261
262 262 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
263 263 user=None, date=None):
264 264 date = date or "%d %d" % (time.time(), time.timezone)
265 265 list.sort()
266 266 l = [hex(manifest), user, date] + list + ["", desc]
267 267 text = "\n".join(l)
268 268 return self.addrevision(text, transaction, self.count(), p1, p2)
269 269
270 270 class dirstate:
271 271 def __init__(self, opener, ui, root):
272 272 self.opener = opener
273 273 self.root = root
274 274 self.dirty = 0
275 275 self.ui = ui
276 276 self.map = None
277 277 self.pl = None
278 278 self.copies = {}
279 279
280 280 def __del__(self):
281 281 if self.dirty:
282 282 self.write()
283 283
284 284 def __getitem__(self, key):
285 285 try:
286 286 return self.map[key]
287 287 except TypeError:
288 288 self.read()
289 289 return self[key]
290 290
291 291 def __contains__(self, key):
292 292 if not self.map: self.read()
293 293 return key in self.map
294 294
295 295 def parents(self):
296 296 if not self.pl:
297 297 self.read()
298 298 return self.pl
299 299
300 300 def setparents(self, p1, p2 = nullid):
301 301 self.dirty = 1
302 302 self.pl = p1, p2
303 303
304 304 def state(self, key):
305 305 try:
306 306 return self[key][0]
307 307 except KeyError:
308 308 return "?"
309 309
310 310 def read(self):
311 311 if self.map is not None: return self.map
312 312
313 313 self.map = {}
314 314 self.pl = [nullid, nullid]
315 315 try:
316 316 st = self.opener("dirstate").read()
317 317 if not st: return
318 318 except: return
319 319
320 320 self.pl = [st[:20], st[20: 40]]
321 321
322 322 pos = 40
323 323 while pos < len(st):
324 324 e = struct.unpack(">cllll", st[pos:pos+17])
325 325 l = e[4]
326 326 pos += 17
327 327 f = st[pos:pos + l]
328 328 if '\0' in f:
329 329 f, c = f.split('\0')
330 330 self.copies[f] = c
331 331 self.map[f] = e[:4]
332 332 pos += l
333 333
334 334 def copy(self, source, dest):
335 335 self.read()
336 336 self.dirty = 1
337 337 self.copies[dest] = source
338 338
339 339 def copied(self, file):
340 340 return self.copies.get(file, None)
341 341
342 342 def update(self, files, state):
343 343 ''' current states:
344 344 n normal
345 345 m needs merging
346 346 r marked for removal
347 347 a marked for addition'''
348 348
349 349 if not files: return
350 350 self.read()
351 351 self.dirty = 1
352 352 for f in files:
353 353 if state == "r":
354 354 self.map[f] = ('r', 0, 0, 0)
355 355 else:
356 356 s = os.stat(os.path.join(self.root, f))
357 357 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
358 358
359 359 def forget(self, files):
360 360 if not files: return
361 361 self.read()
362 362 self.dirty = 1
363 363 for f in files:
364 364 try:
365 365 del self.map[f]
366 366 except KeyError:
367 367 self.ui.warn("not in dirstate: %s!\n" % f)
368 368 pass
369 369
370 370 def clear(self):
371 371 self.map = {}
372 372 self.dirty = 1
373 373
374 374 def write(self):
375 375 st = self.opener("dirstate", "w")
376 376 st.write("".join(self.pl))
377 377 for f, e in self.map.items():
378 378 c = self.copied(f)
379 379 if c:
380 380 f = f + "\0" + c
381 381 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
382 382 st.write(e + f)
383 383 self.dirty = 0
384 384
385 385 def changes(self, files, ignore):
386 386 self.read()
387 387 dc = self.map.copy()
388 388 lookup, changed, added, unknown = [], [], [], []
389 389
390 390 # compare all files by default
391 391 if not files: files = [self.root]
392 392
393 393 # recursive generator of all files listed
394 394 def walk(files):
395 395 for f in util.unique(files):
396 396 f = os.path.join(self.root, f)
397 397 if os.path.isdir(f):
398 398 for dir, subdirs, fl in os.walk(f):
399 399 d = dir[len(self.root) + 1:]
400 400 if ".hg" in subdirs: subdirs.remove(".hg")
401 401 for fn in fl:
402 402 fn = util.pconvert(os.path.join(d, fn))
403 403 yield fn
404 404 else:
405 405 yield f[len(self.root) + 1:]
406 406
407 407 for fn in util.unique(walk(files)):
408 408 try: s = os.stat(os.path.join(self.root, fn))
409 409 except: continue
410 410
411 411 if fn in dc:
412 412 c = dc[fn]
413 413 del dc[fn]
414 414
415 415 if c[0] == 'm':
416 416 changed.append(fn)
417 417 elif c[0] == 'a':
418 418 added.append(fn)
419 419 elif c[0] == 'r':
420 420 unknown.append(fn)
421 421 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
422 422 changed.append(fn)
423 423 elif c[1] != s.st_mode or c[3] != s.st_mtime:
424 424 lookup.append(fn)
425 425 else:
426 426 if not ignore(fn): unknown.append(fn)
427 427
428 428 return (lookup, changed, added, dc.keys(), unknown)
429 429
430 430 # used to avoid circular references so destructors work
431 431 def opener(base):
432 432 p = base
433 433 def o(path, mode="r"):
434 434 if p[:7] == "http://":
435 435 f = os.path.join(p, urllib.quote(path))
436 436 return httprangereader.httprangereader(f)
437 437
438 438 f = os.path.join(p, path)
439 439
440 440 mode += "b" # for that other OS
441 441
442 442 if mode[0] != "r":
443 443 try:
444 444 s = os.stat(f)
445 445 except OSError:
446 446 d = os.path.dirname(f)
447 447 if not os.path.isdir(d):
448 448 os.makedirs(d)
449 449 else:
450 450 if s.st_nlink > 1:
451 451 file(f + ".tmp", "wb").write(file(f, "rb").read())
452 452 util.rename(f+".tmp", f)
453 453
454 454 return file(f, mode)
455 455
456 456 return o
457 457
458 458 class RepoError(Exception): pass
459 459
460 460 class localrepository:
461 461 def __init__(self, ui, path=None, create=0):
462 462 self.remote = 0
463 463 if path and path[:7] == "http://":
464 464 self.remote = 1
465 465 self.path = path
466 466 else:
467 467 if not path:
468 468 p = os.getcwd()
469 469 while not os.path.isdir(os.path.join(p, ".hg")):
470 470 oldp = p
471 471 p = os.path.dirname(p)
472 472 if p == oldp: raise RepoError("no repo found")
473 473 path = p
474 474 self.path = os.path.join(path, ".hg")
475 475
476 476 if not create and not os.path.isdir(self.path):
477 477 raise RepoError("repository %s not found" % self.path)
478 478
479 479 self.root = path
480 480 self.ui = ui
481 481
482 482 if create:
483 483 os.mkdir(self.path)
484 484 os.mkdir(self.join("data"))
485 485
486 486 self.opener = opener(self.path)
487 487 self.wopener = opener(self.root)
488 488 self.manifest = manifest(self.opener)
489 489 self.changelog = changelog(self.opener)
490 490 self.ignorefunc = None
491 491 self.tagscache = None
492 492 self.nodetagscache = None
493 493
494 494 if not self.remote:
495 495 self.dirstate = dirstate(self.opener, ui, self.root)
496 496 try:
497 497 self.ui.readconfig(self.opener("hgrc"))
498 498 except IOError: pass
499 499
500 500 def ignore(self, f):
501 501 if not self.ignorefunc:
502 502 bigpat = []
503 503 try:
504 504 l = file(self.wjoin(".hgignore"))
505 505 for pat in l:
506 506 if pat != "\n":
507 bigpat.append(util.pconvert(pat[:-1]))
507 p = util.pconvert(pat[:-1])
508 try:
509 r = re.compile(p)
510 except:
511 self.ui.warn("ignoring invalid ignore"
512 + " regular expression '%s'\n" % p)
513 else:
514 bigpat.append(util.pconvert(pat[:-1]))
508 515 except IOError: pass
509 516 if bigpat:
510 517 s = "(?:%s)" % (")|(?:".join(bigpat))
511 518 r = re.compile(s)
512 519 self.ignorefunc = r.search
513 520 else:
514 521 self.ignorefunc = lambda x: False
515 522
516 523 return self.ignorefunc(f)
517 524
518 525 def hook(self, name, **args):
519 526 s = self.ui.config("hooks", name)
520 527 if s:
521 528 self.ui.note("running hook %s: %s\n" % (name, s))
522 529 old = {}
523 530 for k, v in args.items():
524 531 k = k.upper()
525 532 old[k] = os.environ.get(k, None)
526 533 os.environ[k] = v
527 534
528 535 r = os.system(s)
529 536
530 537 for k, v in old.items():
531 538 if v != None:
532 539 os.environ[k] = v
533 540 else:
534 541 del os.environ[k]
535 542
536 543 if r:
537 544 self.ui.warn("abort: %s hook failed with status %d!\n" %
538 545 (name, r))
539 546 return False
540 547 return True
541 548
542 549 def tags(self):
543 550 '''return a mapping of tag to node'''
544 551 if not self.tagscache:
545 552 self.tagscache = {}
546 553 def addtag(self, k, n):
547 554 try:
548 555 bin_n = bin(n)
549 556 except TypeError:
550 557 bin_n = ''
551 558 self.tagscache[k.strip()] = bin_n
552 559
553 560 try:
554 561 # read each head of the tags file, ending with the tip
555 562 # and add each tag found to the map, with "newer" ones
556 563 # taking precedence
557 564 fl = self.file(".hgtags")
558 565 h = fl.heads()
559 566 h.reverse()
560 567 for r in h:
561 568 for l in fl.revision(r).splitlines():
562 569 if l:
563 570 n, k = l.split(" ", 1)
564 571 addtag(self, k, n)
565 572 except KeyError:
566 573 pass
567 574
568 575 try:
569 576 f = self.opener("localtags")
570 577 for l in f:
571 578 n, k = l.split(" ", 1)
572 579 addtag(self, k, n)
573 580 except IOError:
574 581 pass
575 582
576 583 self.tagscache['tip'] = self.changelog.tip()
577 584
578 585 return self.tagscache
579 586
580 587 def tagslist(self):
581 588 '''return a list of tags ordered by revision'''
582 589 l = []
583 590 for t, n in self.tags().items():
584 591 try:
585 592 r = self.changelog.rev(n)
586 593 except:
587 594 r = -2 # sort to the beginning of the list if unknown
588 595 l.append((r,t,n))
589 596 l.sort()
590 597 return [(t,n) for r,t,n in l]
591 598
592 599 def nodetags(self, node):
593 600 '''return the tags associated with a node'''
594 601 if not self.nodetagscache:
595 602 self.nodetagscache = {}
596 603 for t,n in self.tags().items():
597 604 self.nodetagscache.setdefault(n,[]).append(t)
598 605 return self.nodetagscache.get(node, [])
599 606
600 607 def lookup(self, key):
601 608 try:
602 609 return self.tags()[key]
603 610 except KeyError:
604 611 return self.changelog.lookup(key)
605 612
606 613 def dev(self):
607 614 if self.remote: return -1
608 615 return os.stat(self.path).st_dev
609 616
610 617 def join(self, f):
611 618 return os.path.join(self.path, f)
612 619
613 620 def wjoin(self, f):
614 621 return os.path.join(self.root, f)
615 622
616 623 def file(self, f):
617 624 if f[0] == '/': f = f[1:]
618 625 return filelog(self.opener, f)
619 626
620 627 def getcwd(self):
621 628 cwd = os.getcwd()
622 629 if cwd == self.root: return ''
623 630 return cwd[len(self.root) + 1:]
624 631
625 632 def wfile(self, f, mode='r'):
626 633 return self.wopener(f, mode)
627 634
628 635 def transaction(self):
629 636 # save dirstate for undo
630 637 try:
631 638 ds = self.opener("dirstate").read()
632 639 except IOError:
633 640 ds = ""
634 641 self.opener("undo.dirstate", "w").write(ds)
635 642
636 643 return transaction.transaction(self.ui.warn,
637 644 self.opener, self.join("journal"),
638 645 self.join("undo"))
639 646
640 647 def recover(self):
641 648 lock = self.lock()
642 649 if os.path.exists(self.join("journal")):
643 650 self.ui.status("rolling back interrupted transaction\n")
644 651 return transaction.rollback(self.opener, self.join("journal"))
645 652 else:
646 653 self.ui.warn("no interrupted transaction available\n")
647 654
648 655 def undo(self):
649 656 lock = self.lock()
650 657 if os.path.exists(self.join("undo")):
651 658 self.ui.status("rolling back last transaction\n")
652 659 transaction.rollback(self.opener, self.join("undo"))
653 660 self.dirstate = None
654 661 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
655 662 self.dirstate = dirstate(self.opener, self.ui, self.root)
656 663 else:
657 664 self.ui.warn("no undo information available\n")
658 665
659 666 def lock(self, wait = 1):
660 667 try:
661 668 return lock.lock(self.join("lock"), 0)
662 669 except lock.LockHeld, inst:
663 670 if wait:
664 671 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
665 672 return lock.lock(self.join("lock"), wait)
666 673 raise inst
667 674
668 675 def rawcommit(self, files, text, user, date, p1=None, p2=None):
669 676 orig_parent = self.dirstate.parents()[0] or nullid
670 677 p1 = p1 or self.dirstate.parents()[0] or nullid
671 678 p2 = p2 or self.dirstate.parents()[1] or nullid
672 679 c1 = self.changelog.read(p1)
673 680 c2 = self.changelog.read(p2)
674 681 m1 = self.manifest.read(c1[0])
675 682 mf1 = self.manifest.readflags(c1[0])
676 683 m2 = self.manifest.read(c2[0])
677 684
678 685 if orig_parent == p1:
679 686 update_dirstate = 1
680 687 else:
681 688 update_dirstate = 0
682 689
683 690 tr = self.transaction()
684 691 mm = m1.copy()
685 692 mfm = mf1.copy()
686 693 linkrev = self.changelog.count()
687 694 for f in files:
688 695 try:
689 696 t = self.wfile(f).read()
690 697 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
691 698 r = self.file(f)
692 699 mfm[f] = tm
693 700 mm[f] = r.add(t, {}, tr, linkrev,
694 701 m1.get(f, nullid), m2.get(f, nullid))
695 702 if update_dirstate:
696 703 self.dirstate.update([f], "n")
697 704 except IOError:
698 705 try:
699 706 del mm[f]
700 707 del mfm[f]
701 708 if update_dirstate:
702 709 self.dirstate.forget([f])
703 710 except:
704 711 # deleted from p2?
705 712 pass
706 713
707 714 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
708 715 user = user or self.ui.username()
709 716 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
710 717 tr.close()
711 718 if update_dirstate:
712 719 self.dirstate.setparents(n, nullid)
713 720
714 721 def commit(self, files = None, text = "", user = None, date = None):
715 722 commit = []
716 723 remove = []
717 724 if files:
718 725 for f in files:
719 726 s = self.dirstate.state(f)
720 727 if s in 'nmai':
721 728 commit.append(f)
722 729 elif s == 'r':
723 730 remove.append(f)
724 731 else:
725 732 self.ui.warn("%s not tracked!\n" % f)
726 733 else:
727 734 (c, a, d, u) = self.changes(None, None)
728 735 commit = c + a
729 736 remove = d
730 737
731 738 if not commit and not remove:
732 739 self.ui.status("nothing changed\n")
733 740 return
734 741
735 742 if not self.hook("precommit"):
736 743 return 1
737 744
738 745 p1, p2 = self.dirstate.parents()
739 746 c1 = self.changelog.read(p1)
740 747 c2 = self.changelog.read(p2)
741 748 m1 = self.manifest.read(c1[0])
742 749 mf1 = self.manifest.readflags(c1[0])
743 750 m2 = self.manifest.read(c2[0])
744 751 lock = self.lock()
745 752 tr = self.transaction()
746 753
747 754 # check in files
748 755 new = {}
749 756 linkrev = self.changelog.count()
750 757 commit.sort()
751 758 for f in commit:
752 759 self.ui.note(f + "\n")
753 760 try:
754 761 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
755 762 t = self.wfile(f).read()
756 763 except IOError:
757 764 self.warn("trouble committing %s!\n" % f)
758 765 raise
759 766
760 767 meta = {}
761 768 cp = self.dirstate.copied(f)
762 769 if cp:
763 770 meta["copy"] = cp
764 771 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
765 772 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
766 773
767 774 r = self.file(f)
768 775 fp1 = m1.get(f, nullid)
769 776 fp2 = m2.get(f, nullid)
770 777 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
771 778
772 779 # update manifest
773 780 m1.update(new)
774 781 for f in remove:
775 782 if f in m1:
776 783 del m1[f]
777 784 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], (new,remove))
778 785
779 786 # add changeset
780 787 new = new.keys()
781 788 new.sort()
782 789
783 790 if not text:
784 791 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
785 792 edittext += "".join(["HG: changed %s\n" % f for f in new])
786 793 edittext += "".join(["HG: removed %s\n" % f for f in remove])
787 794 edittext = self.ui.edit(edittext)
788 795 if not edittext.rstrip():
789 796 return 1
790 797 text = edittext
791 798
792 799 user = user or self.ui.username()
793 800 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
794 801
795 802 if not self.hook("commit", node=hex(n)):
796 803 return 1
797 804
798 805 tr.close()
799 806
800 807 self.dirstate.setparents(n)
801 808 self.dirstate.update(new, "n")
802 809 self.dirstate.forget(remove)
803 810
804 811 def changes(self, node1, node2, files=None):
805 812 mf2, u = None, []
806 813
807 814 def fcmp(fn, mf):
808 815 t1 = self.wfile(fn).read()
809 816 t2 = self.file(fn).revision(mf[fn])
810 817 return cmp(t1, t2)
811 818
812 819 # are we comparing the working directory?
813 820 if not node2:
814 821 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
815 822
816 823 # are we comparing working dir against its parent?
817 824 if not node1:
818 825 if l:
819 826 # do a full compare of any files that might have changed
820 827 change = self.changelog.read(self.dirstate.parents()[0])
821 828 mf2 = self.manifest.read(change[0])
822 829 for f in l:
823 830 if fcmp(f, mf2):
824 831 c.append(f)
825 832
826 833 for l in c, a, d, u:
827 834 l.sort()
828 835
829 836 return (c, a, d, u)
830 837
831 838 # are we comparing working dir against non-tip?
832 839 # generate a pseudo-manifest for the working dir
833 840 if not node2:
834 841 if not mf2:
835 842 change = self.changelog.read(self.dirstate.parents()[0])
836 843 mf2 = self.manifest.read(change[0]).copy()
837 844 for f in a + c + l:
838 845 mf2[f] = ""
839 846 for f in d:
840 847 if f in mf2: del mf2[f]
841 848 else:
842 849 change = self.changelog.read(node2)
843 850 mf2 = self.manifest.read(change[0])
844 851
845 852 # flush lists from dirstate before comparing manifests
846 853 c, a = [], []
847 854
848 855 change = self.changelog.read(node1)
849 856 mf1 = self.manifest.read(change[0]).copy()
850 857
851 858 for fn in mf2:
852 859 if mf1.has_key(fn):
853 860 if mf1[fn] != mf2[fn]:
854 861 if mf2[fn] != "" or fcmp(fn, mf1):
855 862 c.append(fn)
856 863 del mf1[fn]
857 864 else:
858 865 a.append(fn)
859 866
860 867 d = mf1.keys()
861 868
862 869 for l in c, a, d, u:
863 870 l.sort()
864 871
865 872 return (c, a, d, u)
866 873
867 874 def add(self, list):
868 875 for f in list:
869 876 p = self.wjoin(f)
870 877 if not os.path.exists(p):
871 878 self.ui.warn("%s does not exist!\n" % f)
872 879 elif not os.path.isfile(p):
873 880 self.ui.warn("%s not added: mercurial only supports files currently\n" % f)
874 881 elif self.dirstate.state(f) == 'n':
875 882 self.ui.warn("%s already tracked!\n" % f)
876 883 else:
877 884 self.dirstate.update([f], "a")
878 885
879 886 def forget(self, list):
880 887 for f in list:
881 888 if self.dirstate.state(f) not in 'ai':
882 889 self.ui.warn("%s not added!\n" % f)
883 890 else:
884 891 self.dirstate.forget([f])
885 892
886 893 def remove(self, list):
887 894 for f in list:
888 895 p = self.wjoin(f)
889 896 if os.path.exists(p):
890 897 self.ui.warn("%s still exists!\n" % f)
891 898 elif self.dirstate.state(f) == 'a':
892 899 self.ui.warn("%s never committed!\n" % f)
893 900 self.dirstate.forget(f)
894 901 elif f not in self.dirstate:
895 902 self.ui.warn("%s not tracked!\n" % f)
896 903 else:
897 904 self.dirstate.update([f], "r")
898 905
899 906 def copy(self, source, dest):
900 907 p = self.wjoin(dest)
901 908 if not os.path.exists(dest):
902 909 self.ui.warn("%s does not exist!\n" % dest)
903 910 elif not os.path.isfile(dest):
904 911 self.ui.warn("copy failed: %s is not a file\n" % dest)
905 912 else:
906 913 if self.dirstate.state(dest) == '?':
907 914 self.dirstate.update([dest], "a")
908 915 self.dirstate.copy(source, dest)
909 916
910 917 def heads(self):
911 918 return self.changelog.heads()
912 919
913 920 def branches(self, nodes):
914 921 if not nodes: nodes = [self.changelog.tip()]
915 922 b = []
916 923 for n in nodes:
917 924 t = n
918 925 while n:
919 926 p = self.changelog.parents(n)
920 927 if p[1] != nullid or p[0] == nullid:
921 928 b.append((t, n, p[0], p[1]))
922 929 break
923 930 n = p[0]
924 931 return b
925 932
926 933 def between(self, pairs):
927 934 r = []
928 935
929 936 for top, bottom in pairs:
930 937 n, l, i = top, [], 0
931 938 f = 1
932 939
933 940 while n != bottom:
934 941 p = self.changelog.parents(n)[0]
935 942 if i == f:
936 943 l.append(n)
937 944 f = f * 2
938 945 n = p
939 946 i += 1
940 947
941 948 r.append(l)
942 949
943 950 return r
944 951
945 952 def newer(self, nodes):
946 953 m = {}
947 954 nl = []
948 955 pm = {}
949 956 cl = self.changelog
950 957 t = l = cl.count()
951 958
952 959 # find the lowest numbered node
953 960 for n in nodes:
954 961 l = min(l, cl.rev(n))
955 962 m[n] = 1
956 963
957 964 for i in xrange(l, t):
958 965 n = cl.node(i)
959 966 if n in m: # explicitly listed
960 967 pm[n] = 1
961 968 nl.append(n)
962 969 continue
963 970 for p in cl.parents(n):
964 971 if p in pm: # parent listed
965 972 pm[n] = 1
966 973 nl.append(n)
967 974 break
968 975
969 976 return nl
970 977
971 978 def findincoming(self, remote, base={}):
972 979 m = self.changelog.nodemap
973 980 search = []
974 981 fetch = []
975 982 seen = {}
976 983 seenbranch = {}
977 984
978 985 # assume we're closer to the tip than the root
979 986 # and start by examining the heads
980 987 self.ui.status("searching for changes\n")
981 988 heads = remote.heads()
982 989 unknown = []
983 990 for h in heads:
984 991 if h not in m:
985 992 unknown.append(h)
986 993 else:
987 994 base[h] = 1
988 995
989 996 if not unknown:
990 997 return None
991 998
992 999 rep = {}
993 1000 reqcnt = 0
994 1001
995 1002 # search through remote branches
996 1003 # a 'branch' here is a linear segment of history, with four parts:
997 1004 # head, root, first parent, second parent
998 1005 # (a branch always has two parents (or none) by definition)
999 1006 unknown = remote.branches(unknown)
1000 1007 while unknown:
1001 1008 r = []
1002 1009 while unknown:
1003 1010 n = unknown.pop(0)
1004 1011 if n[0] in seen:
1005 1012 continue
1006 1013
1007 1014 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1008 1015 if n[0] == nullid:
1009 1016 break
1010 1017 if n in seenbranch:
1011 1018 self.ui.debug("branch already found\n")
1012 1019 continue
1013 1020 if n[1] and n[1] in m: # do we know the base?
1014 1021 self.ui.debug("found incomplete branch %s:%s\n"
1015 1022 % (short(n[0]), short(n[1])))
1016 1023 search.append(n) # schedule branch range for scanning
1017 1024 seenbranch[n] = 1
1018 1025 else:
1019 1026 if n[1] not in seen and n[1] not in fetch:
1020 1027 if n[2] in m and n[3] in m:
1021 1028 self.ui.debug("found new changeset %s\n" %
1022 1029 short(n[1]))
1023 1030 fetch.append(n[1]) # earliest unknown
1024 1031 base[n[2]] = 1 # latest known
1025 1032 continue
1026 1033
1027 1034 for a in n[2:4]:
1028 1035 if a not in rep:
1029 1036 r.append(a)
1030 1037 rep[a] = 1
1031 1038
1032 1039 seen[n[0]] = 1
1033 1040
1034 1041 if r:
1035 1042 reqcnt += 1
1036 1043 self.ui.debug("request %d: %s\n" %
1037 1044 (reqcnt, " ".join(map(short, r))))
1038 1045 for p in range(0, len(r), 10):
1039 1046 for b in remote.branches(r[p:p+10]):
1040 1047 self.ui.debug("received %s:%s\n" %
1041 1048 (short(b[0]), short(b[1])))
1042 1049 if b[0] not in m and b[0] not in seen:
1043 1050 unknown.append(b)
1044 1051
1045 1052 # do binary search on the branches we found
1046 1053 while search:
1047 1054 n = search.pop(0)
1048 1055 reqcnt += 1
1049 1056 l = remote.between([(n[0], n[1])])[0]
1050 1057 l.append(n[1])
1051 1058 p = n[0]
1052 1059 f = 1
1053 1060 for i in l:
1054 1061 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1055 1062 if i in m:
1056 1063 if f <= 2:
1057 1064 self.ui.debug("found new branch changeset %s\n" %
1058 1065 short(p))
1059 1066 fetch.append(p)
1060 1067 base[i] = 1
1061 1068 else:
1062 1069 self.ui.debug("narrowed branch search to %s:%s\n"
1063 1070 % (short(p), short(i)))
1064 1071 search.append((p, i))
1065 1072 break
1066 1073 p, f = i, f * 2
1067 1074
1068 1075 # sanity check our fetch list
1069 1076 for f in fetch:
1070 1077 if f in m:
1071 1078 raise RepoError("already have changeset " + short(f[:4]))
1072 1079
1073 1080 if base.keys() == [nullid]:
1074 1081 self.ui.warn("warning: pulling from an unrelated repository!\n")
1075 1082
1076 1083 self.ui.note("adding new changesets starting at " +
1077 1084 " ".join([short(f) for f in fetch]) + "\n")
1078 1085
1079 1086 self.ui.debug("%d total queries\n" % reqcnt)
1080 1087
1081 1088 return fetch
1082 1089
1083 1090 def findoutgoing(self, remote):
1084 1091 base = {}
1085 1092 self.findincoming(remote, base)
1086 1093 remain = dict.fromkeys(self.changelog.nodemap)
1087 1094
1088 1095 # prune everything remote has from the tree
1089 1096 del remain[nullid]
1090 1097 remove = base.keys()
1091 1098 while remove:
1092 1099 n = remove.pop(0)
1093 1100 if n in remain:
1094 1101 del remain[n]
1095 1102 for p in self.changelog.parents(n):
1096 1103 remove.append(p)
1097 1104
1098 1105 # find every node whose parents have been pruned
1099 1106 subset = []
1100 1107 for n in remain:
1101 1108 p1, p2 = self.changelog.parents(n)
1102 1109 if p1 not in remain and p2 not in remain:
1103 1110 subset.append(n)
1104 1111
1105 1112 # this is the set of all roots we have to push
1106 1113 return subset
1107 1114
1108 1115 def pull(self, remote):
1109 1116 lock = self.lock()
1110 1117
1111 1118 # if we have an empty repo, fetch everything
1112 1119 if self.changelog.tip() == nullid:
1113 1120 self.ui.status("requesting all changes\n")
1114 1121 fetch = [nullid]
1115 1122 else:
1116 1123 fetch = self.findincoming(remote)
1117 1124
1118 1125 if not fetch:
1119 1126 self.ui.status("no changes found\n")
1120 1127 return 1
1121 1128
1122 1129 cg = remote.changegroup(fetch)
1123 1130 return self.addchangegroup(cg)
1124 1131
1125 1132 def push(self, remote):
1126 1133 lock = remote.lock()
1127 1134 update = self.findoutgoing(remote)
1128 1135 if not update:
1129 1136 self.ui.status("no changes found\n")
1130 1137 return 1
1131 1138
1132 1139 cg = self.changegroup(update)
1133 1140 return remote.addchangegroup(cg)
1134 1141
1135 1142 def changegroup(self, basenodes):
1136 1143 class genread:
1137 1144 def __init__(self, generator):
1138 1145 self.g = generator
1139 1146 self.buf = ""
1140 1147 def read(self, l):
1141 1148 while l > len(self.buf):
1142 1149 try:
1143 1150 self.buf += self.g.next()
1144 1151 except StopIteration:
1145 1152 break
1146 1153 d, self.buf = self.buf[:l], self.buf[l:]
1147 1154 return d
1148 1155
1149 1156 def gengroup():
1150 1157 nodes = self.newer(basenodes)
1151 1158
1152 1159 # construct the link map
1153 1160 linkmap = {}
1154 1161 for n in nodes:
1155 1162 linkmap[self.changelog.rev(n)] = n
1156 1163
1157 1164 # construct a list of all changed files
1158 1165 changed = {}
1159 1166 for n in nodes:
1160 1167 c = self.changelog.read(n)
1161 1168 for f in c[3]:
1162 1169 changed[f] = 1
1163 1170 changed = changed.keys()
1164 1171 changed.sort()
1165 1172
1166 1173 # the changegroup is changesets + manifests + all file revs
1167 1174 revs = [ self.changelog.rev(n) for n in nodes ]
1168 1175
1169 1176 for y in self.changelog.group(linkmap): yield y
1170 1177 for y in self.manifest.group(linkmap): yield y
1171 1178 for f in changed:
1172 1179 yield struct.pack(">l", len(f) + 4) + f
1173 1180 g = self.file(f).group(linkmap)
1174 1181 for y in g:
1175 1182 yield y
1176 1183
1177 1184 yield struct.pack(">l", 0)
1178 1185
1179 1186 return genread(gengroup())
1180 1187
1181 1188 def addchangegroup(self, source):
1182 1189
1183 1190 def getchunk():
1184 1191 d = source.read(4)
1185 1192 if not d: return ""
1186 1193 l = struct.unpack(">l", d)[0]
1187 1194 if l <= 4: return ""
1188 1195 return source.read(l - 4)
1189 1196
1190 1197 def getgroup():
1191 1198 while 1:
1192 1199 c = getchunk()
1193 1200 if not c: break
1194 1201 yield c
1195 1202
1196 1203 def csmap(x):
1197 1204 self.ui.debug("add changeset %s\n" % short(x))
1198 1205 return self.changelog.count()
1199 1206
1200 1207 def revmap(x):
1201 1208 return self.changelog.rev(x)
1202 1209
1203 1210 if not source: return
1204 1211 changesets = files = revisions = 0
1205 1212
1206 1213 tr = self.transaction()
1207 1214
1208 1215 # pull off the changeset group
1209 1216 self.ui.status("adding changesets\n")
1210 1217 co = self.changelog.tip()
1211 1218 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1212 1219 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1213 1220
1214 1221 # pull off the manifest group
1215 1222 self.ui.status("adding manifests\n")
1216 1223 mm = self.manifest.tip()
1217 1224 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1218 1225
1219 1226 # process the files
1220 1227 self.ui.status("adding file revisions\n")
1221 1228 while 1:
1222 1229 f = getchunk()
1223 1230 if not f: break
1224 1231 self.ui.debug("adding %s revisions\n" % f)
1225 1232 fl = self.file(f)
1226 1233 o = fl.count()
1227 1234 n = fl.addgroup(getgroup(), revmap, tr)
1228 1235 revisions += fl.count() - o
1229 1236 files += 1
1230 1237
1231 1238 self.ui.status(("modified %d files, added %d changesets" +
1232 1239 " and %d new revisions\n")
1233 1240 % (files, changesets, revisions))
1234 1241
1235 1242 tr.close()
1236 1243 return
1237 1244
1238 1245 def update(self, node, allow=False, force=False, choose=None,
1239 1246 moddirstate=True):
1240 1247 pl = self.dirstate.parents()
1241 1248 if not force and pl[1] != nullid:
1242 1249 self.ui.warn("aborting: outstanding uncommitted merges\n")
1243 1250 return
1244 1251
1245 1252 p1, p2 = pl[0], node
1246 1253 pa = self.changelog.ancestor(p1, p2)
1247 1254 m1n = self.changelog.read(p1)[0]
1248 1255 m2n = self.changelog.read(p2)[0]
1249 1256 man = self.manifest.ancestor(m1n, m2n)
1250 1257 m1 = self.manifest.read(m1n)
1251 1258 mf1 = self.manifest.readflags(m1n)
1252 1259 m2 = self.manifest.read(m2n)
1253 1260 mf2 = self.manifest.readflags(m2n)
1254 1261 ma = self.manifest.read(man)
1255 1262 mfa = self.manifest.readflags(man)
1256 1263
1257 1264 (c, a, d, u) = self.changes(None, None)
1258 1265
1259 1266 # is this a jump, or a merge? i.e. is there a linear path
1260 1267 # from p1 to p2?
1261 1268 linear_path = (pa == p1 or pa == p2)
1262 1269
1263 1270 # resolve the manifest to determine which files
1264 1271 # we care about merging
1265 1272 self.ui.note("resolving manifests\n")
1266 1273 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1267 1274 (force, allow, moddirstate, linear_path))
1268 1275 self.ui.debug(" ancestor %s local %s remote %s\n" %
1269 1276 (short(man), short(m1n), short(m2n)))
1270 1277
1271 1278 merge = {}
1272 1279 get = {}
1273 1280 remove = []
1274 1281 mark = {}
1275 1282
1276 1283 # construct a working dir manifest
1277 1284 mw = m1.copy()
1278 1285 mfw = mf1.copy()
1279 1286 umap = dict.fromkeys(u)
1280 1287
1281 1288 for f in a + c + u:
1282 1289 mw[f] = ""
1283 1290 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1284 1291
1285 1292 for f in d:
1286 1293 if f in mw: del mw[f]
1287 1294
1288 1295 # If we're jumping between revisions (as opposed to merging),
1289 1296 # and if neither the working directory nor the target rev has
1290 1297 # the file, then we need to remove it from the dirstate, to
1291 1298 # prevent the dirstate from listing the file when it is no
1292 1299 # longer in the manifest.
1293 1300 if moddirstate and linear_path and f not in m2:
1294 1301 self.dirstate.forget((f,))
1295 1302
1296 1303 # Compare manifests
1297 1304 for f, n in mw.iteritems():
1298 1305 if choose and not choose(f): continue
1299 1306 if f in m2:
1300 1307 s = 0
1301 1308
1302 1309 # is the wfile new since m1, and match m2?
1303 1310 if f not in m1:
1304 1311 t1 = self.wfile(f).read()
1305 1312 t2 = self.file(f).revision(m2[f])
1306 1313 if cmp(t1, t2) == 0:
1307 1314 mark[f] = 1
1308 1315 n = m2[f]
1309 1316 del t1, t2
1310 1317
1311 1318 # are files different?
1312 1319 if n != m2[f]:
1313 1320 a = ma.get(f, nullid)
1314 1321 # are both different from the ancestor?
1315 1322 if n != a and m2[f] != a:
1316 1323 self.ui.debug(" %s versions differ, resolve\n" % f)
1317 1324 # merge executable bits
1318 1325 # "if we changed or they changed, change in merge"
1319 1326 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1320 1327 mode = ((a^b) | (a^c)) ^ a
1321 1328 merge[f] = (m1.get(f, nullid), m2[f], mode)
1322 1329 s = 1
1323 1330 # are we clobbering?
1324 1331 # is remote's version newer?
1325 1332 # or are we going back in time?
1326 1333 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1327 1334 self.ui.debug(" remote %s is newer, get\n" % f)
1328 1335 get[f] = m2[f]
1329 1336 s = 1
1330 1337 else:
1331 1338 mark[f] = 1
1332 1339 elif f in umap:
1333 1340 # this unknown file is the same as the checkout
1334 1341 get[f] = m2[f]
1335 1342
1336 1343 if not s and mfw[f] != mf2[f]:
1337 1344 if force:
1338 1345 self.ui.debug(" updating permissions for %s\n" % f)
1339 1346 util.set_exec(self.wjoin(f), mf2[f])
1340 1347 else:
1341 1348 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1342 1349 mode = ((a^b) | (a^c)) ^ a
1343 1350 if mode != b:
1344 1351 self.ui.debug(" updating permissions for %s\n" % f)
1345 1352 util.set_exec(self.wjoin(f), mode)
1346 1353 mark[f] = 1
1347 1354 del m2[f]
1348 1355 elif f in ma:
1349 1356 if n != ma[f]:
1350 1357 r = "d"
1351 1358 if not force and (linear_path or allow):
1352 1359 r = self.ui.prompt(
1353 1360 (" local changed %s which remote deleted\n" % f) +
1354 1361 "(k)eep or (d)elete?", "[kd]", "k")
1355 1362 if r == "d":
1356 1363 remove.append(f)
1357 1364 else:
1358 1365 self.ui.debug("other deleted %s\n" % f)
1359 1366 remove.append(f) # other deleted it
1360 1367 else:
1361 1368 if n == m1.get(f, nullid): # same as parent
1362 1369 if p2 == pa: # going backwards?
1363 1370 self.ui.debug("remote deleted %s\n" % f)
1364 1371 remove.append(f)
1365 1372 else:
1366 1373 self.ui.debug("local created %s, keeping\n" % f)
1367 1374 else:
1368 1375 self.ui.debug("working dir created %s, keeping\n" % f)
1369 1376
1370 1377 for f, n in m2.iteritems():
1371 1378 if choose and not choose(f): continue
1372 1379 if f[0] == "/": continue
1373 1380 if f in ma and n != ma[f]:
1374 1381 r = "k"
1375 1382 if not force and (linear_path or allow):
1376 1383 r = self.ui.prompt(
1377 1384 ("remote changed %s which local deleted\n" % f) +
1378 1385 "(k)eep or (d)elete?", "[kd]", "k")
1379 1386 if r == "k": get[f] = n
1380 1387 elif f not in ma:
1381 1388 self.ui.debug("remote created %s\n" % f)
1382 1389 get[f] = n
1383 1390 else:
1384 1391 self.ui.debug("local deleted %s\n" % f)
1385 1392 if force:
1386 1393 get[f] = n
1387 1394
1388 1395 del mw, m1, m2, ma
1389 1396
1390 1397 if force:
1391 1398 for f in merge:
1392 1399 get[f] = merge[f][1]
1393 1400 merge = {}
1394 1401
1395 1402 if linear_path:
1396 1403 # we don't need to do any magic, just jump to the new rev
1397 1404 mode = 'n'
1398 1405 p1, p2 = p2, nullid
1399 1406 else:
1400 1407 if not allow:
1401 1408 self.ui.status("this update spans a branch" +
1402 1409 " affecting the following files:\n")
1403 1410 fl = merge.keys() + get.keys()
1404 1411 fl.sort()
1405 1412 for f in fl:
1406 1413 cf = ""
1407 1414 if f in merge: cf = " (resolve)"
1408 1415 self.ui.status(" %s%s\n" % (f, cf))
1409 1416 self.ui.warn("aborting update spanning branches!\n")
1410 1417 self.ui.status("(use update -m to perform a branch merge)\n")
1411 1418 return 1
1412 1419 # we have to remember what files we needed to get/change
1413 1420 # because any file that's different from either one of its
1414 1421 # parents must be in the changeset
1415 1422 mode = 'm'
1416 1423 if moddirstate:
1417 1424 self.dirstate.update(mark.keys(), "m")
1418 1425
1419 1426 if moddirstate:
1420 1427 self.dirstate.setparents(p1, p2)
1421 1428
1422 1429 # get the files we don't need to change
1423 1430 files = get.keys()
1424 1431 files.sort()
1425 1432 for f in files:
1426 1433 if f[0] == "/": continue
1427 1434 self.ui.note("getting %s\n" % f)
1428 1435 t = self.file(f).read(get[f])
1429 1436 try:
1430 1437 self.wfile(f, "w").write(t)
1431 1438 except IOError:
1432 1439 os.makedirs(os.path.dirname(self.wjoin(f)))
1433 1440 self.wfile(f, "w").write(t)
1434 1441 util.set_exec(self.wjoin(f), mf2[f])
1435 1442 if moddirstate:
1436 1443 self.dirstate.update([f], mode)
1437 1444
1438 1445 # merge the tricky bits
1439 1446 files = merge.keys()
1440 1447 files.sort()
1441 1448 for f in files:
1442 1449 self.ui.status("merging %s\n" % f)
1443 1450 m, o, flag = merge[f]
1444 1451 self.merge3(f, m, o)
1445 1452 util.set_exec(self.wjoin(f), flag)
1446 1453 if moddirstate:
1447 1454 self.dirstate.update([f], 'm')
1448 1455
1449 1456 for f in remove:
1450 1457 self.ui.note("removing %s\n" % f)
1451 1458 os.unlink(f)
1452 1459 # try removing directories that might now be empty
1453 1460 try: os.removedirs(os.path.dirname(f))
1454 1461 except: pass
1455 1462 if moddirstate:
1456 1463 if mode == 'n':
1457 1464 self.dirstate.forget(remove)
1458 1465 else:
1459 1466 self.dirstate.update(remove, 'r')
1460 1467
1461 1468 def merge3(self, fn, my, other):
1462 1469 """perform a 3-way merge in the working directory"""
1463 1470
1464 1471 def temp(prefix, node):
1465 1472 pre = "%s~%s." % (os.path.basename(fn), prefix)
1466 1473 (fd, name) = tempfile.mkstemp("", pre)
1467 1474 f = os.fdopen(fd, "wb")
1468 1475 f.write(fl.revision(node))
1469 1476 f.close()
1470 1477 return name
1471 1478
1472 1479 fl = self.file(fn)
1473 1480 base = fl.ancestor(my, other)
1474 1481 a = self.wjoin(fn)
1475 1482 b = temp("base", base)
1476 1483 c = temp("other", other)
1477 1484
1478 1485 self.ui.note("resolving %s\n" % fn)
1479 1486 self.ui.debug("file %s: other %s ancestor %s\n" %
1480 1487 (fn, short(other), short(base)))
1481 1488
1482 1489 cmd = self.ui.config("ui", "merge") or \
1483 1490 os.environ.get("HGMERGE", "hgmerge")
1484 1491 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1485 1492 if r:
1486 1493 self.ui.warn("merging %s failed!\n" % fn)
1487 1494
1488 1495 os.unlink(b)
1489 1496 os.unlink(c)
1490 1497
1491 1498 def verify(self):
1492 1499 filelinkrevs = {}
1493 1500 filenodes = {}
1494 1501 changesets = revisions = files = 0
1495 1502 errors = 0
1496 1503
1497 1504 seen = {}
1498 1505 self.ui.status("checking changesets\n")
1499 1506 for i in range(self.changelog.count()):
1500 1507 changesets += 1
1501 1508 n = self.changelog.node(i)
1502 1509 if n in seen:
1503 1510 self.ui.warn("duplicate changeset at revision %d\n" % i)
1504 1511 errors += 1
1505 1512 seen[n] = 1
1506 1513
1507 1514 for p in self.changelog.parents(n):
1508 1515 if p not in self.changelog.nodemap:
1509 1516 self.ui.warn("changeset %s has unknown parent %s\n" %
1510 1517 (short(n), short(p)))
1511 1518 errors += 1
1512 1519 try:
1513 1520 changes = self.changelog.read(n)
1514 1521 except Exception, inst:
1515 1522 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1516 1523 errors += 1
1517 1524
1518 1525 for f in changes[3]:
1519 1526 filelinkrevs.setdefault(f, []).append(i)
1520 1527
1521 1528 seen = {}
1522 1529 self.ui.status("checking manifests\n")
1523 1530 for i in range(self.manifest.count()):
1524 1531 n = self.manifest.node(i)
1525 1532 if n in seen:
1526 1533 self.ui.warn("duplicate manifest at revision %d\n" % i)
1527 1534 errors += 1
1528 1535 seen[n] = 1
1529 1536
1530 1537 for p in self.manifest.parents(n):
1531 1538 if p not in self.manifest.nodemap:
1532 1539 self.ui.warn("manifest %s has unknown parent %s\n" %
1533 1540 (short(n), short(p)))
1534 1541 errors += 1
1535 1542
1536 1543 try:
1537 1544 delta = mdiff.patchtext(self.manifest.delta(n))
1538 1545 except KeyboardInterrupt:
1539 1546 self.ui.warn("aborted")
1540 1547 sys.exit(0)
1541 1548 except Exception, inst:
1542 1549 self.ui.warn("unpacking manifest %s: %s\n"
1543 1550 % (short(n), inst))
1544 1551 errors += 1
1545 1552
1546 1553 ff = [ l.split('\0') for l in delta.splitlines() ]
1547 1554 for f, fn in ff:
1548 1555 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1549 1556
1550 1557 self.ui.status("crosschecking files in changesets and manifests\n")
1551 1558 for f in filenodes:
1552 1559 if f not in filelinkrevs:
1553 1560 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1554 1561 errors += 1
1555 1562
1556 1563 for f in filelinkrevs:
1557 1564 if f not in filenodes:
1558 1565 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1559 1566 errors += 1
1560 1567
1561 1568 self.ui.status("checking files\n")
1562 1569 ff = filenodes.keys()
1563 1570 ff.sort()
1564 1571 for f in ff:
1565 1572 if f == "/dev/null": continue
1566 1573 files += 1
1567 1574 fl = self.file(f)
1568 1575 nodes = { nullid: 1 }
1569 1576 seen = {}
1570 1577 for i in range(fl.count()):
1571 1578 revisions += 1
1572 1579 n = fl.node(i)
1573 1580
1574 1581 if n in seen:
1575 1582 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1576 1583 errors += 1
1577 1584
1578 1585 if n not in filenodes[f]:
1579 1586 self.ui.warn("%s: %d:%s not in manifests\n"
1580 1587 % (f, i, short(n)))
1581 1588 errors += 1
1582 1589 else:
1583 1590 del filenodes[f][n]
1584 1591
1585 1592 flr = fl.linkrev(n)
1586 1593 if flr not in filelinkrevs[f]:
1587 1594 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1588 1595 % (f, short(n), fl.linkrev(n)))
1589 1596 errors += 1
1590 1597 else:
1591 1598 filelinkrevs[f].remove(flr)
1592 1599
1593 1600 # verify contents
1594 1601 try:
1595 1602 t = fl.read(n)
1596 1603 except Exception, inst:
1597 1604 self.ui.warn("unpacking file %s %s: %s\n"
1598 1605 % (f, short(n), inst))
1599 1606 errors += 1
1600 1607
1601 1608 # verify parents
1602 1609 (p1, p2) = fl.parents(n)
1603 1610 if p1 not in nodes:
1604 1611 self.ui.warn("file %s:%s unknown parent 1 %s" %
1605 1612 (f, short(n), short(p1)))
1606 1613 errors += 1
1607 1614 if p2 not in nodes:
1608 1615 self.ui.warn("file %s:%s unknown parent 2 %s" %
1609 1616 (f, short(n), short(p1)))
1610 1617 errors += 1
1611 1618 nodes[n] = 1
1612 1619
1613 1620 # cross-check
1614 1621 for node in filenodes[f]:
1615 1622 self.ui.warn("node %s in manifests not in %s\n"
1616 1623 % (hex(n), f))
1617 1624 errors += 1
1618 1625
1619 1626 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1620 1627 (files, changesets, revisions))
1621 1628
1622 1629 if errors:
1623 1630 self.ui.warn("%d integrity errors encountered!\n" % errors)
1624 1631 return 1
1625 1632
1626 1633 class httprepository:
1627 1634 def __init__(self, ui, path):
1628 1635 self.url = path
1629 1636 self.ui = ui
1630 1637 no_list = [ "localhost", "127.0.0.1" ]
1631 1638 host = ui.config("http_proxy", "host")
1632 1639 if host is None:
1633 1640 host = os.environ.get("http_proxy")
1634 1641 if host and host.startswith('http://'):
1635 1642 host = host[7:]
1636 1643 user = ui.config("http_proxy", "user")
1637 1644 passwd = ui.config("http_proxy", "passwd")
1638 1645 no = ui.config("http_proxy", "no")
1639 1646 if no is None:
1640 1647 no = os.environ.get("no_proxy")
1641 1648 if no:
1642 1649 no_list = no_list + no.split(",")
1643 1650
1644 1651 no_proxy = 0
1645 1652 for h in no_list:
1646 1653 if (path.startswith("http://" + h + "/") or
1647 1654 path.startswith("http://" + h + ":") or
1648 1655 path == "http://" + h):
1649 1656 no_proxy = 1
1650 1657
1651 1658 # Note: urllib2 takes proxy values from the environment and those will
1652 1659 # take precedence
1653 1660 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1654 1661 if os.environ.has_key(env):
1655 1662 del os.environ[env]
1656 1663
1657 1664 proxy_handler = urllib2.BaseHandler()
1658 1665 if host and not no_proxy:
1659 1666 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1660 1667
1661 1668 authinfo = None
1662 1669 if user and passwd:
1663 1670 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1664 1671 passmgr.add_password(None, host, user, passwd)
1665 1672 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1666 1673
1667 1674 opener = urllib2.build_opener(proxy_handler, authinfo)
1668 1675 urllib2.install_opener(opener)
1669 1676
1670 1677 def dev(self):
1671 1678 return -1
1672 1679
1673 1680 def do_cmd(self, cmd, **args):
1674 1681 self.ui.debug("sending %s command\n" % cmd)
1675 1682 q = {"cmd": cmd}
1676 1683 q.update(args)
1677 1684 qs = urllib.urlencode(q)
1678 1685 cu = "%s?%s" % (self.url, qs)
1679 1686 return urllib2.urlopen(cu)
1680 1687
1681 1688 def heads(self):
1682 1689 d = self.do_cmd("heads").read()
1683 1690 try:
1684 1691 return map(bin, d[:-1].split(" "))
1685 1692 except:
1686 1693 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1687 1694 raise
1688 1695
1689 1696 def branches(self, nodes):
1690 1697 n = " ".join(map(hex, nodes))
1691 1698 d = self.do_cmd("branches", nodes=n).read()
1692 1699 try:
1693 1700 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1694 1701 return br
1695 1702 except:
1696 1703 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1697 1704 raise
1698 1705
1699 1706 def between(self, pairs):
1700 1707 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1701 1708 d = self.do_cmd("between", pairs=n).read()
1702 1709 try:
1703 1710 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1704 1711 return p
1705 1712 except:
1706 1713 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1707 1714 raise
1708 1715
1709 1716 def changegroup(self, nodes):
1710 1717 n = " ".join(map(hex, nodes))
1711 1718 f = self.do_cmd("changegroup", roots=n)
1712 1719 bytes = 0
1713 1720
1714 1721 class zread:
1715 1722 def __init__(self, f):
1716 1723 self.zd = zlib.decompressobj()
1717 1724 self.f = f
1718 1725 self.buf = ""
1719 1726 def read(self, l):
1720 1727 while l > len(self.buf):
1721 1728 r = f.read(4096)
1722 1729 if r:
1723 1730 self.buf += self.zd.decompress(r)
1724 1731 else:
1725 1732 self.buf += self.zd.flush()
1726 1733 break
1727 1734 d, self.buf = self.buf[:l], self.buf[l:]
1728 1735 return d
1729 1736
1730 1737 return zread(f)
1731 1738
1732 1739 class remotelock:
1733 1740 def __init__(self, repo):
1734 1741 self.repo = repo
1735 1742 def release(self):
1736 1743 self.repo.unlock()
1737 1744 self.repo = None
1738 1745 def __del__(self):
1739 1746 if self.repo:
1740 1747 self.release()
1741 1748
1742 1749 class sshrepository:
1743 1750 def __init__(self, ui, path):
1744 1751 self.url = path
1745 1752 self.ui = ui
1746 1753
1747 1754 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
1748 1755 if not m:
1749 1756 raise RepoError("couldn't parse destination %s\n" % path)
1750 1757
1751 1758 self.user = m.group(2)
1752 1759 self.host = m.group(3)
1753 1760 self.port = m.group(5)
1754 1761 self.path = m.group(7)
1755 1762
1756 1763 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1757 1764 args = self.port and ("%s -p %s") % (args, self.port) or args
1758 1765 path = self.path or ""
1759 1766
1760 1767 cmd = "ssh %s 'hg -R %s serve --stdio'"
1761 1768 cmd = cmd % (args, path)
1762 1769
1763 1770 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
1764 1771
1765 1772 def readerr(self):
1766 1773 while 1:
1767 1774 r,w,x = select.select([self.pipee], [], [], 0)
1768 1775 if not r: break
1769 1776 l = self.pipee.readline()
1770 1777 if not l: break
1771 1778 self.ui.status("remote: ", l)
1772 1779
1773 1780 def __del__(self):
1774 1781 self.pipeo.close()
1775 1782 self.pipei.close()
1776 1783 for l in self.pipee:
1777 1784 self.ui.status("remote: ", l)
1778 1785 self.pipee.close()
1779 1786
1780 1787 def dev(self):
1781 1788 return -1
1782 1789
1783 1790 def do_cmd(self, cmd, **args):
1784 1791 self.ui.debug("sending %s command\n" % cmd)
1785 1792 self.pipeo.write("%s\n" % cmd)
1786 1793 for k, v in args.items():
1787 1794 self.pipeo.write("%s %d\n" % (k, len(v)))
1788 1795 self.pipeo.write(v)
1789 1796 self.pipeo.flush()
1790 1797
1791 1798 return self.pipei
1792 1799
1793 1800 def call(self, cmd, **args):
1794 1801 r = self.do_cmd(cmd, **args)
1795 1802 l = r.readline()
1796 1803 self.readerr()
1797 1804 try:
1798 1805 l = int(l)
1799 1806 except:
1800 1807 raise RepoError("unexpected response '%s'" % l)
1801 1808 return r.read(l)
1802 1809
1803 1810 def lock(self):
1804 1811 self.call("lock")
1805 1812 return remotelock(self)
1806 1813
1807 1814 def unlock(self):
1808 1815 self.call("unlock")
1809 1816
1810 1817 def heads(self):
1811 1818 d = self.call("heads")
1812 1819 try:
1813 1820 return map(bin, d[:-1].split(" "))
1814 1821 except:
1815 1822 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1816 1823
1817 1824 def branches(self, nodes):
1818 1825 n = " ".join(map(hex, nodes))
1819 1826 d = self.call("branches", nodes=n)
1820 1827 try:
1821 1828 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1822 1829 return br
1823 1830 except:
1824 1831 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1825 1832
1826 1833 def between(self, pairs):
1827 1834 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1828 1835 d = self.call("between", pairs=n)
1829 1836 try:
1830 1837 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1831 1838 return p
1832 1839 except:
1833 1840 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1834 1841
1835 1842 def changegroup(self, nodes):
1836 1843 n = " ".join(map(hex, nodes))
1837 1844 f = self.do_cmd("changegroup", roots=n)
1838 1845 return self.pipei
1839 1846
1840 1847 def addchangegroup(self, cg):
1841 1848 d = self.call("addchangegroup")
1842 1849 if d:
1843 1850 raise RepoError("push refused: %s", d)
1844 1851
1845 1852 while 1:
1846 1853 d = cg.read(4096)
1847 1854 if not d: break
1848 1855 self.pipeo.write(d)
1849 1856 self.readerr()
1850 1857
1851 1858 self.pipeo.flush()
1852 1859
1853 1860 self.readerr()
1854 1861 l = int(self.pipei.readline())
1855 1862 return self.pipei.read(l) != ""
1856 1863
1857 1864 def repository(ui, path=None, create=0):
1858 1865 if path:
1859 1866 if path.startswith("http://"):
1860 1867 return httprepository(ui, path)
1861 1868 if path.startswith("hg://"):
1862 1869 return httprepository(ui, path.replace("hg://", "http://"))
1863 1870 if path.startswith("old-http://"):
1864 1871 return localrepository(ui, path.replace("old-http://", "http://"))
1865 1872 if path.startswith("ssh://"):
1866 1873 return sshrepository(ui, path)
1867 1874
1868 1875 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now