##// END OF EJS Templates
Performance enhancements for manifest.add()...
mason@suse.com -
r644:6ebe1182 default
parent child Browse files
Show More
@@ -1,1744 +1,1844 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import sys, struct, os
8 import sys, struct, os
9 import util
9 import util
10 from revlog import *
10 from revlog import *
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 demandload(globals(), "tempfile httprangereader bdiff")
13 demandload(globals(), "tempfile httprangereader bdiff")
14 demandload(globals(), "bisect")
14
15
15 class filelog(revlog):
16 class filelog(revlog):
16 def __init__(self, opener, path):
17 def __init__(self, opener, path):
17 revlog.__init__(self, opener,
18 revlog.__init__(self, opener,
18 os.path.join("data", path + ".i"),
19 os.path.join("data", path + ".i"),
19 os.path.join("data", path + ".d"))
20 os.path.join("data", path + ".d"))
20
21
21 def read(self, node):
22 def read(self, node):
22 t = self.revision(node)
23 t = self.revision(node)
23 if t[:2] != '\1\n':
24 if t[:2] != '\1\n':
24 return t
25 return t
25 s = t.find('\1\n', 2)
26 s = t.find('\1\n', 2)
26 return t[s+2:]
27 return t[s+2:]
27
28
28 def readmeta(self, node):
29 def readmeta(self, node):
29 t = self.revision(node)
30 t = self.revision(node)
30 if t[:2] != '\1\n':
31 if t[:2] != '\1\n':
31 return t
32 return t
32 s = t.find('\1\n', 2)
33 s = t.find('\1\n', 2)
33 mt = t[2:s]
34 mt = t[2:s]
34 for l in mt.splitlines():
35 for l in mt.splitlines():
35 k, v = l.split(": ", 1)
36 k, v = l.split(": ", 1)
36 m[k] = v
37 m[k] = v
37 return m
38 return m
38
39
39 def add(self, text, meta, transaction, link, p1=None, p2=None):
40 def add(self, text, meta, transaction, link, p1=None, p2=None):
40 if meta or text[:2] == '\1\n':
41 if meta or text[:2] == '\1\n':
41 mt = ""
42 mt = ""
42 if meta:
43 if meta:
43 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
44 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
44 text = "\1\n" + "".join(mt) + "\1\n" + text
45 text = "\1\n" + "".join(mt) + "\1\n" + text
45 return self.addrevision(text, transaction, link, p1, p2)
46 return self.addrevision(text, transaction, link, p1, p2)
46
47
47 def annotate(self, node):
48 def annotate(self, node):
48
49
49 def decorate(text, rev):
50 def decorate(text, rev):
50 return ([rev] * len(text.splitlines()), text)
51 return ([rev] * len(text.splitlines()), text)
51
52
52 def pair(parent, child):
53 def pair(parent, child):
53 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
54 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
54 child[0][b1:b2] = parent[0][a1:a2]
55 child[0][b1:b2] = parent[0][a1:a2]
55 return child
56 return child
56
57
57 # find all ancestors
58 # find all ancestors
58 needed = {node:1}
59 needed = {node:1}
59 visit = [node]
60 visit = [node]
60 while visit:
61 while visit:
61 n = visit.pop(0)
62 n = visit.pop(0)
62 for p in self.parents(n):
63 for p in self.parents(n):
63 if p not in needed:
64 if p not in needed:
64 needed[p] = 1
65 needed[p] = 1
65 visit.append(p)
66 visit.append(p)
66 else:
67 else:
67 # count how many times we'll use this
68 # count how many times we'll use this
68 needed[p] += 1
69 needed[p] += 1
69
70
70 # sort by revision which is a topological order
71 # sort by revision which is a topological order
71 visit = [ (self.rev(n), n) for n in needed.keys() ]
72 visit = [ (self.rev(n), n) for n in needed.keys() ]
72 visit.sort()
73 visit.sort()
73 hist = {}
74 hist = {}
74
75
75 for r,n in visit:
76 for r,n in visit:
76 curr = decorate(self.read(n), self.linkrev(n))
77 curr = decorate(self.read(n), self.linkrev(n))
77 for p in self.parents(n):
78 for p in self.parents(n):
78 if p != nullid:
79 if p != nullid:
79 curr = pair(hist[p], curr)
80 curr = pair(hist[p], curr)
80 # trim the history of unneeded revs
81 # trim the history of unneeded revs
81 needed[p] -= 1
82 needed[p] -= 1
82 if not needed[p]:
83 if not needed[p]:
83 del hist[p]
84 del hist[p]
84 hist[n] = curr
85 hist[n] = curr
85
86
86 return zip(hist[n][0], hist[n][1].splitlines(1))
87 return zip(hist[n][0], hist[n][1].splitlines(1))
87
88
88 class manifest(revlog):
89 class manifest(revlog):
89 def __init__(self, opener):
90 def __init__(self, opener):
90 self.mapcache = None
91 self.mapcache = None
91 self.listcache = None
92 self.listcache = None
92 self.addlist = None
93 self.addlist = None
93 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
94 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
94
95
95 def read(self, node):
96 def read(self, node):
96 if node == nullid: return {} # don't upset local cache
97 if node == nullid: return {} # don't upset local cache
97 if self.mapcache and self.mapcache[0] == node:
98 if self.mapcache and self.mapcache[0] == node:
98 return self.mapcache[1]
99 return self.mapcache[1]
99 text = self.revision(node)
100 text = self.revision(node)
100 map = {}
101 map = {}
101 flag = {}
102 flag = {}
102 self.listcache = (text, text.splitlines(1))
103 self.listcache = (text, text.splitlines(1))
103 for l in self.listcache[1]:
104 for l in self.listcache[1]:
104 (f, n) = l.split('\0')
105 (f, n) = l.split('\0')
105 map[f] = bin(n[:40])
106 map[f] = bin(n[:40])
106 flag[f] = (n[40:-1] == "x")
107 flag[f] = (n[40:-1] == "x")
107 self.mapcache = (node, map, flag)
108 self.mapcache = (node, map, flag)
108 return map
109 return map
109
110
110 def readflags(self, node):
111 def readflags(self, node):
111 if node == nullid: return {} # don't upset local cache
112 if node == nullid: return {} # don't upset local cache
112 if not self.mapcache or self.mapcache[0] != node:
113 if not self.mapcache or self.mapcache[0] != node:
113 self.read(node)
114 self.read(node)
114 return self.mapcache[2]
115 return self.mapcache[2]
115
116
116 def diff(self, a, b):
117 def diff(self, a, b):
117 # this is sneaky, as we're not actually using a and b
118 # this is sneaky, as we're not actually using a and b
118 if self.listcache and self.addlist and self.listcache[0] == a:
119 if self.listcache and self.addlist and self.listcache[0] == a:
119 d = mdiff.diff(self.listcache[1], self.addlist, 1)
120 d = mdiff.diff(self.listcache[1], self.addlist, 1)
120 if mdiff.patch(a, d) != b:
121 if mdiff.patch(a, d) != b:
121 sys.stderr.write("*** sortdiff failed, falling back ***\n")
122 sys.stderr.write("*** sortdiff failed, falling back ***\n")
122 return mdiff.textdiff(a, b)
123 return mdiff.textdiff(a, b)
123 return d
124 return d
124 else:
125 else:
125 return mdiff.textdiff(a, b)
126 return mdiff.textdiff(a, b)
126
127
127 def add(self, map, flags, transaction, link, p1=None, p2=None):
128 def add(self, map, flags, transaction, link, p1=None, p2=None,changed=None):
128 files = map.keys()
129 # directly generate the mdiff delta from the data collected during
129 files.sort()
130 # the bisect loop below
131 def gendelta(delta):
132 i = 0
133 result = []
134 while i < len(delta):
135 start = delta[i][2]
136 end = delta[i][3]
137 l = delta[i][4]
138 if l == None:
139 l = ""
140 while i < len(delta) - 1 and start <= delta[i+1][2] and end >= delta[i+1][2]:
141 if delta[i+1][3] > end:
142 end = delta[i+1][3]
143 if delta[i+1][4]:
144 l += delta[i+1][4]
145 i += 1
146 result.append(struct.pack(">lll", start, end, len(l)) + l)
147 i += 1
148 return result
149
150 # apply the changes collected during the bisect loop to our addlist
151 def addlistdelta(addlist, delta):
152 # apply the deltas to the addlist. start from the bottom up
153 # so changes to the offsets don't mess things up.
154 i = len(delta)
155 while i > 0:
156 i -= 1
157 start = delta[i][0]
158 end = delta[i][1]
159 if delta[i][4]:
160 addlist[start:end] = [delta[i][4]]
161 else:
162 del addlist[start:end]
163 return addlist
164
165 # calculate the byte offset of the start of each line in the
166 # manifest
167 def calcoffsets(addlist):
168 offsets = [0] * (len(addlist) + 1)
169 offset = 0
170 i = 0
171 while i < len(addlist):
172 offsets[i] = offset
173 offset += len(addlist[i])
174 i += 1
175 offsets[i] = offset
176 return offsets
130
177
131 self.addlist = ["%s\000%s%s\n" %
178 # if we're using the listcache, make sure it is valid and
132 (f, hex(map[f]), flags[f] and "x" or '')
179 # parented by the same node we're diffing against
133 for f in files]
180 if not changed or not self.listcache or not p1 or self.mapcache[0] != p1:
181 files = map.keys()
182 files.sort()
183
184 self.addlist = ["%s\000%s%s\n" %
185 (f, hex(map[f]), flags[f] and "x" or '')
186 for f in files]
187 cachedelta = None
188 else:
189 addlist = self.listcache[1]
190
191 # find the starting offset for each line in the add list
192 offsets = calcoffsets(addlist)
193
194 # combine the changed lists into one list for sorting
195 work = [[x, 0] for x in changed[0]]
196 work[len(work):] = [[x, 1] for x in changed[1]]
197 work.sort()
198
199 delta = []
200 bs = 0
201
202 for w in work:
203 f = w[0]
204 # bs will either be the index of the item or the insertion point
205 bs = bisect.bisect(addlist, f, bs)
206 if bs < len(addlist):
207 fn = addlist[bs][:addlist[bs].index('\0')]
208 else:
209 fn = None
210 if w[1] == 0:
211 l = "%s\000%s%s\n" % (f, hex(map[f]), flags[f] and "x" or '')
212 else:
213 l = None
214 start = bs
215 if fn != f:
216 # item not found, insert a new one
217 end = bs
218 if w[1] == 1:
219 sys.stderr.write("failed to remove %s from manifest" % f)
220 sys.exit(1)
221 else:
222 # item is found, replace/delete the existing line
223 end = bs + 1
224 delta.append([start, end, offsets[start], offsets[end], l])
225
226 self.addlist = addlistdelta(addlist, delta)
227 if self.mapcache[0] == self.tip():
228 cachedelta = "".join(gendelta(delta))
229 else:
230 cachedelta = None
231
134 text = "".join(self.addlist)
232 text = "".join(self.addlist)
135
233 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
136 n = self.addrevision(text, transaction, link, p1, p2)
234 sys.stderr.write("manifest delta failure")
235 sys.exit(1)
236 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
137 self.mapcache = (n, map, flags)
237 self.mapcache = (n, map, flags)
138 self.listcache = (text, self.addlist)
238 self.listcache = (text, self.addlist)
139 self.addlist = None
239 self.addlist = None
140
240
141 return n
241 return n
142
242
143 class changelog(revlog):
243 class changelog(revlog):
144 def __init__(self, opener):
244 def __init__(self, opener):
145 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
245 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
146
246
147 def extract(self, text):
247 def extract(self, text):
148 if not text:
248 if not text:
149 return (nullid, "", "0", [], "")
249 return (nullid, "", "0", [], "")
150 last = text.index("\n\n")
250 last = text.index("\n\n")
151 desc = text[last + 2:]
251 desc = text[last + 2:]
152 l = text[:last].splitlines()
252 l = text[:last].splitlines()
153 manifest = bin(l[0])
253 manifest = bin(l[0])
154 user = l[1]
254 user = l[1]
155 date = l[2]
255 date = l[2]
156 files = l[3:]
256 files = l[3:]
157 return (manifest, user, date, files, desc)
257 return (manifest, user, date, files, desc)
158
258
159 def read(self, node):
259 def read(self, node):
160 return self.extract(self.revision(node))
260 return self.extract(self.revision(node))
161
261
162 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
262 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
163 user=None, date=None):
263 user=None, date=None):
164 date = date or "%d %d" % (time.time(), time.timezone)
264 date = date or "%d %d" % (time.time(), time.timezone)
165 list.sort()
265 list.sort()
166 l = [hex(manifest), user, date] + list + ["", desc]
266 l = [hex(manifest), user, date] + list + ["", desc]
167 text = "\n".join(l)
267 text = "\n".join(l)
168 return self.addrevision(text, transaction, self.count(), p1, p2)
268 return self.addrevision(text, transaction, self.count(), p1, p2)
169
269
170 class dirstate:
270 class dirstate:
171 def __init__(self, opener, ui, root):
271 def __init__(self, opener, ui, root):
172 self.opener = opener
272 self.opener = opener
173 self.root = root
273 self.root = root
174 self.dirty = 0
274 self.dirty = 0
175 self.ui = ui
275 self.ui = ui
176 self.map = None
276 self.map = None
177 self.pl = None
277 self.pl = None
178 self.copies = {}
278 self.copies = {}
179
279
180 def __del__(self):
280 def __del__(self):
181 if self.dirty:
281 if self.dirty:
182 self.write()
282 self.write()
183
283
184 def __getitem__(self, key):
284 def __getitem__(self, key):
185 try:
285 try:
186 return self.map[key]
286 return self.map[key]
187 except TypeError:
287 except TypeError:
188 self.read()
288 self.read()
189 return self[key]
289 return self[key]
190
290
191 def __contains__(self, key):
291 def __contains__(self, key):
192 if not self.map: self.read()
292 if not self.map: self.read()
193 return key in self.map
293 return key in self.map
194
294
195 def parents(self):
295 def parents(self):
196 if not self.pl:
296 if not self.pl:
197 self.read()
297 self.read()
198 return self.pl
298 return self.pl
199
299
200 def setparents(self, p1, p2 = nullid):
300 def setparents(self, p1, p2 = nullid):
201 self.dirty = 1
301 self.dirty = 1
202 self.pl = p1, p2
302 self.pl = p1, p2
203
303
204 def state(self, key):
304 def state(self, key):
205 try:
305 try:
206 return self[key][0]
306 return self[key][0]
207 except KeyError:
307 except KeyError:
208 return "?"
308 return "?"
209
309
210 def read(self):
310 def read(self):
211 if self.map is not None: return self.map
311 if self.map is not None: return self.map
212
312
213 self.map = {}
313 self.map = {}
214 self.pl = [nullid, nullid]
314 self.pl = [nullid, nullid]
215 try:
315 try:
216 st = self.opener("dirstate").read()
316 st = self.opener("dirstate").read()
217 if not st: return
317 if not st: return
218 except: return
318 except: return
219
319
220 self.pl = [st[:20], st[20: 40]]
320 self.pl = [st[:20], st[20: 40]]
221
321
222 pos = 40
322 pos = 40
223 while pos < len(st):
323 while pos < len(st):
224 e = struct.unpack(">cllll", st[pos:pos+17])
324 e = struct.unpack(">cllll", st[pos:pos+17])
225 l = e[4]
325 l = e[4]
226 pos += 17
326 pos += 17
227 f = st[pos:pos + l]
327 f = st[pos:pos + l]
228 if '\0' in f:
328 if '\0' in f:
229 f, c = f.split('\0')
329 f, c = f.split('\0')
230 self.copies[f] = c
330 self.copies[f] = c
231 self.map[f] = e[:4]
331 self.map[f] = e[:4]
232 pos += l
332 pos += l
233
333
234 def copy(self, source, dest):
334 def copy(self, source, dest):
235 self.read()
335 self.read()
236 self.dirty = 1
336 self.dirty = 1
237 self.copies[dest] = source
337 self.copies[dest] = source
238
338
239 def copied(self, file):
339 def copied(self, file):
240 return self.copies.get(file, None)
340 return self.copies.get(file, None)
241
341
242 def update(self, files, state):
342 def update(self, files, state):
243 ''' current states:
343 ''' current states:
244 n normal
344 n normal
245 m needs merging
345 m needs merging
246 r marked for removal
346 r marked for removal
247 a marked for addition'''
347 a marked for addition'''
248
348
249 if not files: return
349 if not files: return
250 self.read()
350 self.read()
251 self.dirty = 1
351 self.dirty = 1
252 for f in files:
352 for f in files:
253 if state == "r":
353 if state == "r":
254 self.map[f] = ('r', 0, 0, 0)
354 self.map[f] = ('r', 0, 0, 0)
255 else:
355 else:
256 s = os.stat(os.path.join(self.root, f))
356 s = os.stat(os.path.join(self.root, f))
257 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
357 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
258
358
259 def forget(self, files):
359 def forget(self, files):
260 if not files: return
360 if not files: return
261 self.read()
361 self.read()
262 self.dirty = 1
362 self.dirty = 1
263 for f in files:
363 for f in files:
264 try:
364 try:
265 del self.map[f]
365 del self.map[f]
266 except KeyError:
366 except KeyError:
267 self.ui.warn("not in dirstate: %s!\n" % f)
367 self.ui.warn("not in dirstate: %s!\n" % f)
268 pass
368 pass
269
369
270 def clear(self):
370 def clear(self):
271 self.map = {}
371 self.map = {}
272 self.dirty = 1
372 self.dirty = 1
273
373
274 def write(self):
374 def write(self):
275 st = self.opener("dirstate", "w")
375 st = self.opener("dirstate", "w")
276 st.write("".join(self.pl))
376 st.write("".join(self.pl))
277 for f, e in self.map.items():
377 for f, e in self.map.items():
278 c = self.copied(f)
378 c = self.copied(f)
279 if c:
379 if c:
280 f = f + "\0" + c
380 f = f + "\0" + c
281 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
381 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
282 st.write(e + f)
382 st.write(e + f)
283 self.dirty = 0
383 self.dirty = 0
284
384
285 def changes(self, files, ignore):
385 def changes(self, files, ignore):
286 self.read()
386 self.read()
287 dc = self.map.copy()
387 dc = self.map.copy()
288 lookup, changed, added, unknown = [], [], [], []
388 lookup, changed, added, unknown = [], [], [], []
289
389
290 # compare all files by default
390 # compare all files by default
291 if not files: files = [self.root]
391 if not files: files = [self.root]
292
392
293 # recursive generator of all files listed
393 # recursive generator of all files listed
294 def walk(files):
394 def walk(files):
295 for f in util.unique(files):
395 for f in util.unique(files):
296 f = os.path.join(self.root, f)
396 f = os.path.join(self.root, f)
297 if os.path.isdir(f):
397 if os.path.isdir(f):
298 for dir, subdirs, fl in os.walk(f):
398 for dir, subdirs, fl in os.walk(f):
299 d = dir[len(self.root) + 1:]
399 d = dir[len(self.root) + 1:]
300 if ".hg" in subdirs: subdirs.remove(".hg")
400 if ".hg" in subdirs: subdirs.remove(".hg")
301 for fn in fl:
401 for fn in fl:
302 fn = util.pconvert(os.path.join(d, fn))
402 fn = util.pconvert(os.path.join(d, fn))
303 yield fn
403 yield fn
304 else:
404 else:
305 yield f[len(self.root) + 1:]
405 yield f[len(self.root) + 1:]
306
406
307 for fn in util.unique(walk(files)):
407 for fn in util.unique(walk(files)):
308 try: s = os.stat(os.path.join(self.root, fn))
408 try: s = os.stat(os.path.join(self.root, fn))
309 except: continue
409 except: continue
310
410
311 if fn in dc:
411 if fn in dc:
312 c = dc[fn]
412 c = dc[fn]
313 del dc[fn]
413 del dc[fn]
314
414
315 if c[0] == 'm':
415 if c[0] == 'm':
316 changed.append(fn)
416 changed.append(fn)
317 elif c[0] == 'a':
417 elif c[0] == 'a':
318 added.append(fn)
418 added.append(fn)
319 elif c[0] == 'r':
419 elif c[0] == 'r':
320 unknown.append(fn)
420 unknown.append(fn)
321 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
421 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
322 changed.append(fn)
422 changed.append(fn)
323 elif c[1] != s.st_mode or c[3] != s.st_mtime:
423 elif c[1] != s.st_mode or c[3] != s.st_mtime:
324 lookup.append(fn)
424 lookup.append(fn)
325 else:
425 else:
326 if not ignore(fn): unknown.append(fn)
426 if not ignore(fn): unknown.append(fn)
327
427
328 return (lookup, changed, added, dc.keys(), unknown)
428 return (lookup, changed, added, dc.keys(), unknown)
329
429
330 # used to avoid circular references so destructors work
430 # used to avoid circular references so destructors work
331 def opener(base):
431 def opener(base):
332 p = base
432 p = base
333 def o(path, mode="r"):
433 def o(path, mode="r"):
334 if p[:7] == "http://":
434 if p[:7] == "http://":
335 f = os.path.join(p, urllib.quote(path))
435 f = os.path.join(p, urllib.quote(path))
336 return httprangereader.httprangereader(f)
436 return httprangereader.httprangereader(f)
337
437
338 f = os.path.join(p, path)
438 f = os.path.join(p, path)
339
439
340 mode += "b" # for that other OS
440 mode += "b" # for that other OS
341
441
342 if mode[0] != "r":
442 if mode[0] != "r":
343 try:
443 try:
344 s = os.stat(f)
444 s = os.stat(f)
345 except OSError:
445 except OSError:
346 d = os.path.dirname(f)
446 d = os.path.dirname(f)
347 if not os.path.isdir(d):
447 if not os.path.isdir(d):
348 os.makedirs(d)
448 os.makedirs(d)
349 else:
449 else:
350 if s.st_nlink > 1:
450 if s.st_nlink > 1:
351 file(f + ".tmp", "wb").write(file(f, "rb").read())
451 file(f + ".tmp", "wb").write(file(f, "rb").read())
352 util.rename(f+".tmp", f)
452 util.rename(f+".tmp", f)
353
453
354 return file(f, mode)
454 return file(f, mode)
355
455
356 return o
456 return o
357
457
358 class RepoError(Exception): pass
458 class RepoError(Exception): pass
359
459
360 class localrepository:
460 class localrepository:
361 def __init__(self, ui, path=None, create=0):
461 def __init__(self, ui, path=None, create=0):
362 self.remote = 0
462 self.remote = 0
363 if path and path[:7] == "http://":
463 if path and path[:7] == "http://":
364 self.remote = 1
464 self.remote = 1
365 self.path = path
465 self.path = path
366 else:
466 else:
367 if not path:
467 if not path:
368 p = os.getcwd()
468 p = os.getcwd()
369 while not os.path.isdir(os.path.join(p, ".hg")):
469 while not os.path.isdir(os.path.join(p, ".hg")):
370 oldp = p
470 oldp = p
371 p = os.path.dirname(p)
471 p = os.path.dirname(p)
372 if p == oldp: raise RepoError("no repo found")
472 if p == oldp: raise RepoError("no repo found")
373 path = p
473 path = p
374 self.path = os.path.join(path, ".hg")
474 self.path = os.path.join(path, ".hg")
375
475
376 if not create and not os.path.isdir(self.path):
476 if not create and not os.path.isdir(self.path):
377 raise RepoError("repository %s not found" % self.path)
477 raise RepoError("repository %s not found" % self.path)
378
478
379 self.root = path
479 self.root = path
380 self.ui = ui
480 self.ui = ui
381
481
382 if create:
482 if create:
383 os.mkdir(self.path)
483 os.mkdir(self.path)
384 os.mkdir(self.join("data"))
484 os.mkdir(self.join("data"))
385
485
386 self.opener = opener(self.path)
486 self.opener = opener(self.path)
387 self.wopener = opener(self.root)
487 self.wopener = opener(self.root)
388 self.manifest = manifest(self.opener)
488 self.manifest = manifest(self.opener)
389 self.changelog = changelog(self.opener)
489 self.changelog = changelog(self.opener)
390 self.ignorelist = None
490 self.ignorelist = None
391 self.tagscache = None
491 self.tagscache = None
392 self.nodetagscache = None
492 self.nodetagscache = None
393
493
394 if not self.remote:
494 if not self.remote:
395 self.dirstate = dirstate(self.opener, ui, self.root)
495 self.dirstate = dirstate(self.opener, ui, self.root)
396 try:
496 try:
397 self.ui.readconfig(self.opener("hgrc"))
497 self.ui.readconfig(self.opener("hgrc"))
398 except IOError: pass
498 except IOError: pass
399
499
400 def ignore(self, f):
500 def ignore(self, f):
401 if self.ignorelist is None:
501 if self.ignorelist is None:
402 self.ignorelist = []
502 self.ignorelist = []
403 try:
503 try:
404 l = file(self.wjoin(".hgignore"))
504 l = file(self.wjoin(".hgignore"))
405 for pat in l:
505 for pat in l:
406 if pat != "\n":
506 if pat != "\n":
407 self.ignorelist.append(re.compile(util.pconvert(pat[:-1])))
507 self.ignorelist.append(re.compile(util.pconvert(pat[:-1])))
408 except IOError: pass
508 except IOError: pass
409 for pat in self.ignorelist:
509 for pat in self.ignorelist:
410 if pat.search(f): return True
510 if pat.search(f): return True
411 return False
511 return False
412
512
413 def hook(self, name, **args):
513 def hook(self, name, **args):
414 s = self.ui.config("hooks", name)
514 s = self.ui.config("hooks", name)
415 if s:
515 if s:
416 self.ui.note("running hook %s: %s\n" % (name, s))
516 self.ui.note("running hook %s: %s\n" % (name, s))
417 old = {}
517 old = {}
418 for k, v in args.items():
518 for k, v in args.items():
419 k = k.upper()
519 k = k.upper()
420 old[k] = os.environ.get(k, None)
520 old[k] = os.environ.get(k, None)
421 os.environ[k] = v
521 os.environ[k] = v
422
522
423 r = os.system(s)
523 r = os.system(s)
424
524
425 for k, v in old.items():
525 for k, v in old.items():
426 if v != None:
526 if v != None:
427 os.environ[k] = v
527 os.environ[k] = v
428 else:
528 else:
429 del os.environ[k]
529 del os.environ[k]
430
530
431 if r:
531 if r:
432 self.ui.warn("abort: %s hook failed with status %d!\n" %
532 self.ui.warn("abort: %s hook failed with status %d!\n" %
433 (name, r))
533 (name, r))
434 return False
534 return False
435 return True
535 return True
436
536
437 def tags(self):
537 def tags(self):
438 '''return a mapping of tag to node'''
538 '''return a mapping of tag to node'''
439 if not self.tagscache:
539 if not self.tagscache:
440 self.tagscache = {}
540 self.tagscache = {}
441 def addtag(self, k, n):
541 def addtag(self, k, n):
442 try:
542 try:
443 bin_n = bin(n)
543 bin_n = bin(n)
444 except TypeError:
544 except TypeError:
445 bin_n = ''
545 bin_n = ''
446 self.tagscache[k.strip()] = bin_n
546 self.tagscache[k.strip()] = bin_n
447
547
448 try:
548 try:
449 # read each head of the tags file, ending with the tip
549 # read each head of the tags file, ending with the tip
450 # and add each tag found to the map, with "newer" ones
550 # and add each tag found to the map, with "newer" ones
451 # taking precedence
551 # taking precedence
452 fl = self.file(".hgtags")
552 fl = self.file(".hgtags")
453 h = fl.heads()
553 h = fl.heads()
454 h.reverse()
554 h.reverse()
455 for r in h:
555 for r in h:
456 for l in fl.revision(r).splitlines():
556 for l in fl.revision(r).splitlines():
457 if l:
557 if l:
458 n, k = l.split(" ", 1)
558 n, k = l.split(" ", 1)
459 addtag(self, k, n)
559 addtag(self, k, n)
460 except KeyError:
560 except KeyError:
461 pass
561 pass
462
562
463 try:
563 try:
464 f = self.opener("localtags")
564 f = self.opener("localtags")
465 for l in f:
565 for l in f:
466 n, k = l.split(" ", 1)
566 n, k = l.split(" ", 1)
467 addtag(self, k, n)
567 addtag(self, k, n)
468 except IOError:
568 except IOError:
469 pass
569 pass
470
570
471 self.tagscache['tip'] = self.changelog.tip()
571 self.tagscache['tip'] = self.changelog.tip()
472
572
473 return self.tagscache
573 return self.tagscache
474
574
475 def tagslist(self):
575 def tagslist(self):
476 '''return a list of tags ordered by revision'''
576 '''return a list of tags ordered by revision'''
477 l = []
577 l = []
478 for t, n in self.tags().items():
578 for t, n in self.tags().items():
479 try:
579 try:
480 r = self.changelog.rev(n)
580 r = self.changelog.rev(n)
481 except:
581 except:
482 r = -2 # sort to the beginning of the list if unknown
582 r = -2 # sort to the beginning of the list if unknown
483 l.append((r,t,n))
583 l.append((r,t,n))
484 l.sort()
584 l.sort()
485 return [(t,n) for r,t,n in l]
585 return [(t,n) for r,t,n in l]
486
586
487 def nodetags(self, node):
587 def nodetags(self, node):
488 '''return the tags associated with a node'''
588 '''return the tags associated with a node'''
489 if not self.nodetagscache:
589 if not self.nodetagscache:
490 self.nodetagscache = {}
590 self.nodetagscache = {}
491 for t,n in self.tags().items():
591 for t,n in self.tags().items():
492 self.nodetagscache.setdefault(n,[]).append(t)
592 self.nodetagscache.setdefault(n,[]).append(t)
493 return self.nodetagscache.get(node, [])
593 return self.nodetagscache.get(node, [])
494
594
495 def lookup(self, key):
595 def lookup(self, key):
496 try:
596 try:
497 return self.tags()[key]
597 return self.tags()[key]
498 except KeyError:
598 except KeyError:
499 return self.changelog.lookup(key)
599 return self.changelog.lookup(key)
500
600
501 def dev(self):
601 def dev(self):
502 if self.remote: return -1
602 if self.remote: return -1
503 return os.stat(self.path).st_dev
603 return os.stat(self.path).st_dev
504
604
505 def join(self, f):
605 def join(self, f):
506 return os.path.join(self.path, f)
606 return os.path.join(self.path, f)
507
607
508 def wjoin(self, f):
608 def wjoin(self, f):
509 return os.path.join(self.root, f)
609 return os.path.join(self.root, f)
510
610
511 def file(self, f):
611 def file(self, f):
512 if f[0] == '/': f = f[1:]
612 if f[0] == '/': f = f[1:]
513 return filelog(self.opener, f)
613 return filelog(self.opener, f)
514
614
515 def getcwd(self):
615 def getcwd(self):
516 cwd = os.getcwd()
616 cwd = os.getcwd()
517 if cwd == self.root: return ''
617 if cwd == self.root: return ''
518 return cwd[len(self.root) + 1:]
618 return cwd[len(self.root) + 1:]
519
619
520 def wfile(self, f, mode='r'):
620 def wfile(self, f, mode='r'):
521 return self.wopener(f, mode)
621 return self.wopener(f, mode)
522
622
523 def transaction(self):
623 def transaction(self):
524 # save dirstate for undo
624 # save dirstate for undo
525 try:
625 try:
526 ds = self.opener("dirstate").read()
626 ds = self.opener("dirstate").read()
527 except IOError:
627 except IOError:
528 ds = ""
628 ds = ""
529 self.opener("undo.dirstate", "w").write(ds)
629 self.opener("undo.dirstate", "w").write(ds)
530
630
531 return transaction.transaction(self.ui.warn,
631 return transaction.transaction(self.ui.warn,
532 self.opener, self.join("journal"),
632 self.opener, self.join("journal"),
533 self.join("undo"))
633 self.join("undo"))
534
634
535 def recover(self):
635 def recover(self):
536 lock = self.lock()
636 lock = self.lock()
537 if os.path.exists(self.join("journal")):
637 if os.path.exists(self.join("journal")):
538 self.ui.status("rolling back interrupted transaction\n")
638 self.ui.status("rolling back interrupted transaction\n")
539 return transaction.rollback(self.opener, self.join("journal"))
639 return transaction.rollback(self.opener, self.join("journal"))
540 else:
640 else:
541 self.ui.warn("no interrupted transaction available\n")
641 self.ui.warn("no interrupted transaction available\n")
542
642
543 def undo(self):
643 def undo(self):
544 lock = self.lock()
644 lock = self.lock()
545 if os.path.exists(self.join("undo")):
645 if os.path.exists(self.join("undo")):
546 self.ui.status("rolling back last transaction\n")
646 self.ui.status("rolling back last transaction\n")
547 transaction.rollback(self.opener, self.join("undo"))
647 transaction.rollback(self.opener, self.join("undo"))
548 self.dirstate = None
648 self.dirstate = None
549 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
649 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
550 self.dirstate = dirstate(self.opener, self.ui, self.root)
650 self.dirstate = dirstate(self.opener, self.ui, self.root)
551 else:
651 else:
552 self.ui.warn("no undo information available\n")
652 self.ui.warn("no undo information available\n")
553
653
554 def lock(self, wait = 1):
654 def lock(self, wait = 1):
555 try:
655 try:
556 return lock.lock(self.join("lock"), 0)
656 return lock.lock(self.join("lock"), 0)
557 except lock.LockHeld, inst:
657 except lock.LockHeld, inst:
558 if wait:
658 if wait:
559 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
659 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
560 return lock.lock(self.join("lock"), wait)
660 return lock.lock(self.join("lock"), wait)
561 raise inst
661 raise inst
562
662
563 def rawcommit(self, files, text, user, date, p1=None, p2=None):
663 def rawcommit(self, files, text, user, date, p1=None, p2=None):
564 orig_parent = self.dirstate.parents()[0] or nullid
664 orig_parent = self.dirstate.parents()[0] or nullid
565 p1 = p1 or self.dirstate.parents()[0] or nullid
665 p1 = p1 or self.dirstate.parents()[0] or nullid
566 p2 = p2 or self.dirstate.parents()[1] or nullid
666 p2 = p2 or self.dirstate.parents()[1] or nullid
567 c1 = self.changelog.read(p1)
667 c1 = self.changelog.read(p1)
568 c2 = self.changelog.read(p2)
668 c2 = self.changelog.read(p2)
569 m1 = self.manifest.read(c1[0])
669 m1 = self.manifest.read(c1[0])
570 mf1 = self.manifest.readflags(c1[0])
670 mf1 = self.manifest.readflags(c1[0])
571 m2 = self.manifest.read(c2[0])
671 m2 = self.manifest.read(c2[0])
572
672
573 if orig_parent == p1:
673 if orig_parent == p1:
574 update_dirstate = 1
674 update_dirstate = 1
575 else:
675 else:
576 update_dirstate = 0
676 update_dirstate = 0
577
677
578 tr = self.transaction()
678 tr = self.transaction()
579 mm = m1.copy()
679 mm = m1.copy()
580 mfm = mf1.copy()
680 mfm = mf1.copy()
581 linkrev = self.changelog.count()
681 linkrev = self.changelog.count()
582 for f in files:
682 for f in files:
583 try:
683 try:
584 t = self.wfile(f).read()
684 t = self.wfile(f).read()
585 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
685 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
586 r = self.file(f)
686 r = self.file(f)
587 mfm[f] = tm
687 mfm[f] = tm
588 mm[f] = r.add(t, {}, tr, linkrev,
688 mm[f] = r.add(t, {}, tr, linkrev,
589 m1.get(f, nullid), m2.get(f, nullid))
689 m1.get(f, nullid), m2.get(f, nullid))
590 if update_dirstate:
690 if update_dirstate:
591 self.dirstate.update([f], "n")
691 self.dirstate.update([f], "n")
592 except IOError:
692 except IOError:
593 try:
693 try:
594 del mm[f]
694 del mm[f]
595 del mfm[f]
695 del mfm[f]
596 if update_dirstate:
696 if update_dirstate:
597 self.dirstate.forget([f])
697 self.dirstate.forget([f])
598 except:
698 except:
599 # deleted from p2?
699 # deleted from p2?
600 pass
700 pass
601
701
602 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
702 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
603 user = user or self.ui.username()
703 user = user or self.ui.username()
604 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
704 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
605 tr.close()
705 tr.close()
606 if update_dirstate:
706 if update_dirstate:
607 self.dirstate.setparents(n, nullid)
707 self.dirstate.setparents(n, nullid)
608
708
609 def commit(self, files = None, text = "", user = None, date = None):
709 def commit(self, files = None, text = "", user = None, date = None):
610 commit = []
710 commit = []
611 remove = []
711 remove = []
612 if files:
712 if files:
613 for f in files:
713 for f in files:
614 s = self.dirstate.state(f)
714 s = self.dirstate.state(f)
615 if s in 'nmai':
715 if s in 'nmai':
616 commit.append(f)
716 commit.append(f)
617 elif s == 'r':
717 elif s == 'r':
618 remove.append(f)
718 remove.append(f)
619 else:
719 else:
620 self.ui.warn("%s not tracked!\n" % f)
720 self.ui.warn("%s not tracked!\n" % f)
621 else:
721 else:
622 (c, a, d, u) = self.changes(None, None)
722 (c, a, d, u) = self.changes(None, None)
623 commit = c + a
723 commit = c + a
624 remove = d
724 remove = d
625
725
626 if not commit and not remove:
726 if not commit and not remove:
627 self.ui.status("nothing changed\n")
727 self.ui.status("nothing changed\n")
628 return
728 return
629
729
630 if not self.hook("precommit"):
730 if not self.hook("precommit"):
631 return 1
731 return 1
632
732
633 p1, p2 = self.dirstate.parents()
733 p1, p2 = self.dirstate.parents()
634 c1 = self.changelog.read(p1)
734 c1 = self.changelog.read(p1)
635 c2 = self.changelog.read(p2)
735 c2 = self.changelog.read(p2)
636 m1 = self.manifest.read(c1[0])
736 m1 = self.manifest.read(c1[0])
637 mf1 = self.manifest.readflags(c1[0])
737 mf1 = self.manifest.readflags(c1[0])
638 m2 = self.manifest.read(c2[0])
738 m2 = self.manifest.read(c2[0])
639 lock = self.lock()
739 lock = self.lock()
640 tr = self.transaction()
740 tr = self.transaction()
641
741
642 # check in files
742 # check in files
643 new = {}
743 new = {}
644 linkrev = self.changelog.count()
744 linkrev = self.changelog.count()
645 commit.sort()
745 commit.sort()
646 for f in commit:
746 for f in commit:
647 self.ui.note(f + "\n")
747 self.ui.note(f + "\n")
648 try:
748 try:
649 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
749 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
650 t = self.wfile(f).read()
750 t = self.wfile(f).read()
651 except IOError:
751 except IOError:
652 self.warn("trouble committing %s!\n" % f)
752 self.warn("trouble committing %s!\n" % f)
653 raise
753 raise
654
754
655 meta = {}
755 meta = {}
656 cp = self.dirstate.copied(f)
756 cp = self.dirstate.copied(f)
657 if cp:
757 if cp:
658 meta["copy"] = cp
758 meta["copy"] = cp
659 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
759 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
660 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
760 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
661
761
662 r = self.file(f)
762 r = self.file(f)
663 fp1 = m1.get(f, nullid)
763 fp1 = m1.get(f, nullid)
664 fp2 = m2.get(f, nullid)
764 fp2 = m2.get(f, nullid)
665 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
765 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
666
766
667 # update manifest
767 # update manifest
668 m1.update(new)
768 m1.update(new)
669 for f in remove:
769 for f in remove:
670 if f in m1:
770 if f in m1:
671 del m1[f]
771 del m1[f]
672 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0])
772 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], (new,remove))
673
773
674 # add changeset
774 # add changeset
675 new = new.keys()
775 new = new.keys()
676 new.sort()
776 new.sort()
677
777
678 if not text:
778 if not text:
679 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
779 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
680 edittext += "".join(["HG: changed %s\n" % f for f in new])
780 edittext += "".join(["HG: changed %s\n" % f for f in new])
681 edittext += "".join(["HG: removed %s\n" % f for f in remove])
781 edittext += "".join(["HG: removed %s\n" % f for f in remove])
682 edittext = self.ui.edit(edittext)
782 edittext = self.ui.edit(edittext)
683 if not edittext.rstrip():
783 if not edittext.rstrip():
684 return 1
784 return 1
685 text = edittext
785 text = edittext
686
786
687 user = user or self.ui.username()
787 user = user or self.ui.username()
688 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
788 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
689
789
690 if not self.hook("commit", node=hex(n)):
790 if not self.hook("commit", node=hex(n)):
691 return 1
791 return 1
692
792
693 tr.close()
793 tr.close()
694
794
695 self.dirstate.setparents(n)
795 self.dirstate.setparents(n)
696 self.dirstate.update(new, "n")
796 self.dirstate.update(new, "n")
697 self.dirstate.forget(remove)
797 self.dirstate.forget(remove)
698
798
699 def changes(self, node1, node2, files=None):
799 def changes(self, node1, node2, files=None):
700 mf2, u = None, []
800 mf2, u = None, []
701
801
702 def fcmp(fn, mf):
802 def fcmp(fn, mf):
703 t1 = self.wfile(fn).read()
803 t1 = self.wfile(fn).read()
704 t2 = self.file(fn).revision(mf[fn])
804 t2 = self.file(fn).revision(mf[fn])
705 return cmp(t1, t2)
805 return cmp(t1, t2)
706
806
707 # are we comparing the working directory?
807 # are we comparing the working directory?
708 if not node2:
808 if not node2:
709 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
809 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
710
810
711 # are we comparing working dir against its parent?
811 # are we comparing working dir against its parent?
712 if not node1:
812 if not node1:
713 if l:
813 if l:
714 # do a full compare of any files that might have changed
814 # do a full compare of any files that might have changed
715 change = self.changelog.read(self.dirstate.parents()[0])
815 change = self.changelog.read(self.dirstate.parents()[0])
716 mf2 = self.manifest.read(change[0])
816 mf2 = self.manifest.read(change[0])
717 for f in l:
817 for f in l:
718 if fcmp(f, mf2):
818 if fcmp(f, mf2):
719 c.append(f)
819 c.append(f)
720
820
721 for l in c, a, d, u:
821 for l in c, a, d, u:
722 l.sort()
822 l.sort()
723
823
724 return (c, a, d, u)
824 return (c, a, d, u)
725
825
726 # are we comparing working dir against non-tip?
826 # are we comparing working dir against non-tip?
727 # generate a pseudo-manifest for the working dir
827 # generate a pseudo-manifest for the working dir
728 if not node2:
828 if not node2:
729 if not mf2:
829 if not mf2:
730 change = self.changelog.read(self.dirstate.parents()[0])
830 change = self.changelog.read(self.dirstate.parents()[0])
731 mf2 = self.manifest.read(change[0]).copy()
831 mf2 = self.manifest.read(change[0]).copy()
732 for f in a + c + l:
832 for f in a + c + l:
733 mf2[f] = ""
833 mf2[f] = ""
734 for f in d:
834 for f in d:
735 if f in mf2: del mf2[f]
835 if f in mf2: del mf2[f]
736 else:
836 else:
737 change = self.changelog.read(node2)
837 change = self.changelog.read(node2)
738 mf2 = self.manifest.read(change[0])
838 mf2 = self.manifest.read(change[0])
739
839
740 # flush lists from dirstate before comparing manifests
840 # flush lists from dirstate before comparing manifests
741 c, a = [], []
841 c, a = [], []
742
842
743 change = self.changelog.read(node1)
843 change = self.changelog.read(node1)
744 mf1 = self.manifest.read(change[0]).copy()
844 mf1 = self.manifest.read(change[0]).copy()
745
845
746 for fn in mf2:
846 for fn in mf2:
747 if mf1.has_key(fn):
847 if mf1.has_key(fn):
748 if mf1[fn] != mf2[fn]:
848 if mf1[fn] != mf2[fn]:
749 if mf2[fn] != "" or fcmp(fn, mf1):
849 if mf2[fn] != "" or fcmp(fn, mf1):
750 c.append(fn)
850 c.append(fn)
751 del mf1[fn]
851 del mf1[fn]
752 else:
852 else:
753 a.append(fn)
853 a.append(fn)
754
854
755 d = mf1.keys()
855 d = mf1.keys()
756
856
757 for l in c, a, d, u:
857 for l in c, a, d, u:
758 l.sort()
858 l.sort()
759
859
760 return (c, a, d, u)
860 return (c, a, d, u)
761
861
762 def add(self, list):
862 def add(self, list):
763 for f in list:
863 for f in list:
764 p = self.wjoin(f)
864 p = self.wjoin(f)
765 if not os.path.exists(p):
865 if not os.path.exists(p):
766 self.ui.warn("%s does not exist!\n" % f)
866 self.ui.warn("%s does not exist!\n" % f)
767 elif not os.path.isfile(p):
867 elif not os.path.isfile(p):
768 self.ui.warn("%s not added: mercurial only supports files currently\n" % f)
868 self.ui.warn("%s not added: mercurial only supports files currently\n" % f)
769 elif self.dirstate.state(f) == 'n':
869 elif self.dirstate.state(f) == 'n':
770 self.ui.warn("%s already tracked!\n" % f)
870 self.ui.warn("%s already tracked!\n" % f)
771 else:
871 else:
772 self.dirstate.update([f], "a")
872 self.dirstate.update([f], "a")
773
873
774 def forget(self, list):
874 def forget(self, list):
775 for f in list:
875 for f in list:
776 if self.dirstate.state(f) not in 'ai':
876 if self.dirstate.state(f) not in 'ai':
777 self.ui.warn("%s not added!\n" % f)
877 self.ui.warn("%s not added!\n" % f)
778 else:
878 else:
779 self.dirstate.forget([f])
879 self.dirstate.forget([f])
780
880
781 def remove(self, list):
881 def remove(self, list):
782 for f in list:
882 for f in list:
783 p = self.wjoin(f)
883 p = self.wjoin(f)
784 if os.path.exists(p):
884 if os.path.exists(p):
785 self.ui.warn("%s still exists!\n" % f)
885 self.ui.warn("%s still exists!\n" % f)
786 elif self.dirstate.state(f) == 'a':
886 elif self.dirstate.state(f) == 'a':
787 self.ui.warn("%s never committed!\n" % f)
887 self.ui.warn("%s never committed!\n" % f)
788 self.dirstate.forget(f)
888 self.dirstate.forget(f)
789 elif f not in self.dirstate:
889 elif f not in self.dirstate:
790 self.ui.warn("%s not tracked!\n" % f)
890 self.ui.warn("%s not tracked!\n" % f)
791 else:
891 else:
792 self.dirstate.update([f], "r")
892 self.dirstate.update([f], "r")
793
893
794 def copy(self, source, dest):
894 def copy(self, source, dest):
795 p = self.wjoin(dest)
895 p = self.wjoin(dest)
796 if not os.path.exists(dest):
896 if not os.path.exists(dest):
797 self.ui.warn("%s does not exist!\n" % dest)
897 self.ui.warn("%s does not exist!\n" % dest)
798 elif not os.path.isfile(dest):
898 elif not os.path.isfile(dest):
799 self.ui.warn("copy failed: %s is not a file\n" % dest)
899 self.ui.warn("copy failed: %s is not a file\n" % dest)
800 else:
900 else:
801 if self.dirstate.state(dest) == '?':
901 if self.dirstate.state(dest) == '?':
802 self.dirstate.update([dest], "a")
902 self.dirstate.update([dest], "a")
803 self.dirstate.copy(source, dest)
903 self.dirstate.copy(source, dest)
804
904
805 def heads(self):
905 def heads(self):
806 return self.changelog.heads()
906 return self.changelog.heads()
807
907
808 def branches(self, nodes):
908 def branches(self, nodes):
809 if not nodes: nodes = [self.changelog.tip()]
909 if not nodes: nodes = [self.changelog.tip()]
810 b = []
910 b = []
811 for n in nodes:
911 for n in nodes:
812 t = n
912 t = n
813 while n:
913 while n:
814 p = self.changelog.parents(n)
914 p = self.changelog.parents(n)
815 if p[1] != nullid or p[0] == nullid:
915 if p[1] != nullid or p[0] == nullid:
816 b.append((t, n, p[0], p[1]))
916 b.append((t, n, p[0], p[1]))
817 break
917 break
818 n = p[0]
918 n = p[0]
819 return b
919 return b
820
920
821 def between(self, pairs):
921 def between(self, pairs):
822 r = []
922 r = []
823
923
824 for top, bottom in pairs:
924 for top, bottom in pairs:
825 n, l, i = top, [], 0
925 n, l, i = top, [], 0
826 f = 1
926 f = 1
827
927
828 while n != bottom:
928 while n != bottom:
829 p = self.changelog.parents(n)[0]
929 p = self.changelog.parents(n)[0]
830 if i == f:
930 if i == f:
831 l.append(n)
931 l.append(n)
832 f = f * 2
932 f = f * 2
833 n = p
933 n = p
834 i += 1
934 i += 1
835
935
836 r.append(l)
936 r.append(l)
837
937
838 return r
938 return r
839
939
840 def newer(self, nodes):
940 def newer(self, nodes):
841 m = {}
941 m = {}
842 nl = []
942 nl = []
843 pm = {}
943 pm = {}
844 cl = self.changelog
944 cl = self.changelog
845 t = l = cl.count()
945 t = l = cl.count()
846
946
847 # find the lowest numbered node
947 # find the lowest numbered node
848 for n in nodes:
948 for n in nodes:
849 l = min(l, cl.rev(n))
949 l = min(l, cl.rev(n))
850 m[n] = 1
950 m[n] = 1
851
951
852 for i in xrange(l, t):
952 for i in xrange(l, t):
853 n = cl.node(i)
953 n = cl.node(i)
854 if n in m: # explicitly listed
954 if n in m: # explicitly listed
855 pm[n] = 1
955 pm[n] = 1
856 nl.append(n)
956 nl.append(n)
857 continue
957 continue
858 for p in cl.parents(n):
958 for p in cl.parents(n):
859 if p in pm: # parent listed
959 if p in pm: # parent listed
860 pm[n] = 1
960 pm[n] = 1
861 nl.append(n)
961 nl.append(n)
862 break
962 break
863
963
864 return nl
964 return nl
865
965
866 def findincoming(self, remote, base={}):
966 def findincoming(self, remote, base={}):
867 m = self.changelog.nodemap
967 m = self.changelog.nodemap
868 search = []
968 search = []
869 fetch = []
969 fetch = []
870 seen = {}
970 seen = {}
871 seenbranch = {}
971 seenbranch = {}
872
972
873 # assume we're closer to the tip than the root
973 # assume we're closer to the tip than the root
874 # and start by examining the heads
974 # and start by examining the heads
875 self.ui.status("searching for changes\n")
975 self.ui.status("searching for changes\n")
876 heads = remote.heads()
976 heads = remote.heads()
877 unknown = []
977 unknown = []
878 for h in heads:
978 for h in heads:
879 if h not in m:
979 if h not in m:
880 unknown.append(h)
980 unknown.append(h)
881 else:
981 else:
882 base[h] = 1
982 base[h] = 1
883
983
884 if not unknown:
984 if not unknown:
885 return None
985 return None
886
986
887 rep = {}
987 rep = {}
888 reqcnt = 0
988 reqcnt = 0
889
989
890 # search through remote branches
990 # search through remote branches
891 # a 'branch' here is a linear segment of history, with four parts:
991 # a 'branch' here is a linear segment of history, with four parts:
892 # head, root, first parent, second parent
992 # head, root, first parent, second parent
893 # (a branch always has two parents (or none) by definition)
993 # (a branch always has two parents (or none) by definition)
894 unknown = remote.branches(unknown)
994 unknown = remote.branches(unknown)
895 while unknown:
995 while unknown:
896 r = []
996 r = []
897 while unknown:
997 while unknown:
898 n = unknown.pop(0)
998 n = unknown.pop(0)
899 if n[0] in seen:
999 if n[0] in seen:
900 continue
1000 continue
901
1001
902 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1002 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
903 if n[0] == nullid:
1003 if n[0] == nullid:
904 break
1004 break
905 if n in seenbranch:
1005 if n in seenbranch:
906 self.ui.debug("branch already found\n")
1006 self.ui.debug("branch already found\n")
907 continue
1007 continue
908 if n[1] and n[1] in m: # do we know the base?
1008 if n[1] and n[1] in m: # do we know the base?
909 self.ui.debug("found incomplete branch %s:%s\n"
1009 self.ui.debug("found incomplete branch %s:%s\n"
910 % (short(n[0]), short(n[1])))
1010 % (short(n[0]), short(n[1])))
911 search.append(n) # schedule branch range for scanning
1011 search.append(n) # schedule branch range for scanning
912 seenbranch[n] = 1
1012 seenbranch[n] = 1
913 else:
1013 else:
914 if n[1] not in seen and n[1] not in fetch:
1014 if n[1] not in seen and n[1] not in fetch:
915 if n[2] in m and n[3] in m:
1015 if n[2] in m and n[3] in m:
916 self.ui.debug("found new changeset %s\n" %
1016 self.ui.debug("found new changeset %s\n" %
917 short(n[1]))
1017 short(n[1]))
918 fetch.append(n[1]) # earliest unknown
1018 fetch.append(n[1]) # earliest unknown
919 base[n[2]] = 1 # latest known
1019 base[n[2]] = 1 # latest known
920 continue
1020 continue
921
1021
922 for a in n[2:4]:
1022 for a in n[2:4]:
923 if a not in rep:
1023 if a not in rep:
924 r.append(a)
1024 r.append(a)
925 rep[a] = 1
1025 rep[a] = 1
926
1026
927 seen[n[0]] = 1
1027 seen[n[0]] = 1
928
1028
929 if r:
1029 if r:
930 reqcnt += 1
1030 reqcnt += 1
931 self.ui.debug("request %d: %s\n" %
1031 self.ui.debug("request %d: %s\n" %
932 (reqcnt, " ".join(map(short, r))))
1032 (reqcnt, " ".join(map(short, r))))
933 for p in range(0, len(r), 10):
1033 for p in range(0, len(r), 10):
934 for b in remote.branches(r[p:p+10]):
1034 for b in remote.branches(r[p:p+10]):
935 self.ui.debug("received %s:%s\n" %
1035 self.ui.debug("received %s:%s\n" %
936 (short(b[0]), short(b[1])))
1036 (short(b[0]), short(b[1])))
937 if b[0] not in m and b[0] not in seen:
1037 if b[0] not in m and b[0] not in seen:
938 unknown.append(b)
1038 unknown.append(b)
939
1039
940 # do binary search on the branches we found
1040 # do binary search on the branches we found
941 while search:
1041 while search:
942 n = search.pop(0)
1042 n = search.pop(0)
943 reqcnt += 1
1043 reqcnt += 1
944 l = remote.between([(n[0], n[1])])[0]
1044 l = remote.between([(n[0], n[1])])[0]
945 l.append(n[1])
1045 l.append(n[1])
946 p = n[0]
1046 p = n[0]
947 f = 1
1047 f = 1
948 for i in l:
1048 for i in l:
949 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1049 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
950 if i in m:
1050 if i in m:
951 if f <= 2:
1051 if f <= 2:
952 self.ui.debug("found new branch changeset %s\n" %
1052 self.ui.debug("found new branch changeset %s\n" %
953 short(p))
1053 short(p))
954 fetch.append(p)
1054 fetch.append(p)
955 base[i] = 1
1055 base[i] = 1
956 else:
1056 else:
957 self.ui.debug("narrowed branch search to %s:%s\n"
1057 self.ui.debug("narrowed branch search to %s:%s\n"
958 % (short(p), short(i)))
1058 % (short(p), short(i)))
959 search.append((p, i))
1059 search.append((p, i))
960 break
1060 break
961 p, f = i, f * 2
1061 p, f = i, f * 2
962
1062
963 # sanity check our fetch list
1063 # sanity check our fetch list
964 for f in fetch:
1064 for f in fetch:
965 if f in m:
1065 if f in m:
966 raise RepoError("already have changeset " + short(f[:4]))
1066 raise RepoError("already have changeset " + short(f[:4]))
967
1067
968 if base.keys() == [nullid]:
1068 if base.keys() == [nullid]:
969 self.ui.warn("warning: pulling from an unrelated repository!\n")
1069 self.ui.warn("warning: pulling from an unrelated repository!\n")
970
1070
971 self.ui.note("adding new changesets starting at " +
1071 self.ui.note("adding new changesets starting at " +
972 " ".join([short(f) for f in fetch]) + "\n")
1072 " ".join([short(f) for f in fetch]) + "\n")
973
1073
974 self.ui.debug("%d total queries\n" % reqcnt)
1074 self.ui.debug("%d total queries\n" % reqcnt)
975
1075
976 return fetch
1076 return fetch
977
1077
978 def findoutgoing(self, remote):
1078 def findoutgoing(self, remote):
979 base = {}
1079 base = {}
980 self.findincoming(remote, base)
1080 self.findincoming(remote, base)
981 remain = dict.fromkeys(self.changelog.nodemap)
1081 remain = dict.fromkeys(self.changelog.nodemap)
982
1082
983 # prune everything remote has from the tree
1083 # prune everything remote has from the tree
984 del remain[nullid]
1084 del remain[nullid]
985 remove = base.keys()
1085 remove = base.keys()
986 while remove:
1086 while remove:
987 n = remove.pop(0)
1087 n = remove.pop(0)
988 if n in remain:
1088 if n in remain:
989 del remain[n]
1089 del remain[n]
990 for p in self.changelog.parents(n):
1090 for p in self.changelog.parents(n):
991 remove.append(p)
1091 remove.append(p)
992
1092
993 # find every node whose parents have been pruned
1093 # find every node whose parents have been pruned
994 subset = []
1094 subset = []
995 for n in remain:
1095 for n in remain:
996 p1, p2 = self.changelog.parents(n)
1096 p1, p2 = self.changelog.parents(n)
997 if p1 not in remain and p2 not in remain:
1097 if p1 not in remain and p2 not in remain:
998 subset.append(n)
1098 subset.append(n)
999
1099
1000 # this is the set of all roots we have to push
1100 # this is the set of all roots we have to push
1001 return subset
1101 return subset
1002
1102
1003 def pull(self, remote):
1103 def pull(self, remote):
1004 lock = self.lock()
1104 lock = self.lock()
1005
1105
1006 # if we have an empty repo, fetch everything
1106 # if we have an empty repo, fetch everything
1007 if self.changelog.tip() == nullid:
1107 if self.changelog.tip() == nullid:
1008 self.ui.status("requesting all changes\n")
1108 self.ui.status("requesting all changes\n")
1009 fetch = [nullid]
1109 fetch = [nullid]
1010 else:
1110 else:
1011 fetch = self.findincoming(remote)
1111 fetch = self.findincoming(remote)
1012
1112
1013 if not fetch:
1113 if not fetch:
1014 self.ui.status("no changes found\n")
1114 self.ui.status("no changes found\n")
1015 return 1
1115 return 1
1016
1116
1017 cg = remote.changegroup(fetch)
1117 cg = remote.changegroup(fetch)
1018 return self.addchangegroup(cg)
1118 return self.addchangegroup(cg)
1019
1119
1020 def push(self, remote):
1120 def push(self, remote):
1021 lock = remote.lock()
1121 lock = remote.lock()
1022 update = self.findoutgoing(remote)
1122 update = self.findoutgoing(remote)
1023 if not update:
1123 if not update:
1024 self.ui.status("no changes found\n")
1124 self.ui.status("no changes found\n")
1025 return 1
1125 return 1
1026
1126
1027 cg = self.changegroup(update)
1127 cg = self.changegroup(update)
1028 return remote.addchangegroup(cg)
1128 return remote.addchangegroup(cg)
1029
1129
1030 def changegroup(self, basenodes):
1130 def changegroup(self, basenodes):
1031 class genread:
1131 class genread:
1032 def __init__(self, generator):
1132 def __init__(self, generator):
1033 self.g = generator
1133 self.g = generator
1034 self.buf = ""
1134 self.buf = ""
1035 def read(self, l):
1135 def read(self, l):
1036 while l > len(self.buf):
1136 while l > len(self.buf):
1037 try:
1137 try:
1038 self.buf += self.g.next()
1138 self.buf += self.g.next()
1039 except StopIteration:
1139 except StopIteration:
1040 break
1140 break
1041 d, self.buf = self.buf[:l], self.buf[l:]
1141 d, self.buf = self.buf[:l], self.buf[l:]
1042 return d
1142 return d
1043
1143
1044 def gengroup():
1144 def gengroup():
1045 nodes = self.newer(basenodes)
1145 nodes = self.newer(basenodes)
1046
1146
1047 # construct the link map
1147 # construct the link map
1048 linkmap = {}
1148 linkmap = {}
1049 for n in nodes:
1149 for n in nodes:
1050 linkmap[self.changelog.rev(n)] = n
1150 linkmap[self.changelog.rev(n)] = n
1051
1151
1052 # construct a list of all changed files
1152 # construct a list of all changed files
1053 changed = {}
1153 changed = {}
1054 for n in nodes:
1154 for n in nodes:
1055 c = self.changelog.read(n)
1155 c = self.changelog.read(n)
1056 for f in c[3]:
1156 for f in c[3]:
1057 changed[f] = 1
1157 changed[f] = 1
1058 changed = changed.keys()
1158 changed = changed.keys()
1059 changed.sort()
1159 changed.sort()
1060
1160
1061 # the changegroup is changesets + manifests + all file revs
1161 # the changegroup is changesets + manifests + all file revs
1062 revs = [ self.changelog.rev(n) for n in nodes ]
1162 revs = [ self.changelog.rev(n) for n in nodes ]
1063
1163
1064 for y in self.changelog.group(linkmap): yield y
1164 for y in self.changelog.group(linkmap): yield y
1065 for y in self.manifest.group(linkmap): yield y
1165 for y in self.manifest.group(linkmap): yield y
1066 for f in changed:
1166 for f in changed:
1067 yield struct.pack(">l", len(f) + 4) + f
1167 yield struct.pack(">l", len(f) + 4) + f
1068 g = self.file(f).group(linkmap)
1168 g = self.file(f).group(linkmap)
1069 for y in g:
1169 for y in g:
1070 yield y
1170 yield y
1071
1171
1072 yield struct.pack(">l", 0)
1172 yield struct.pack(">l", 0)
1073
1173
1074 return genread(gengroup())
1174 return genread(gengroup())
1075
1175
1076 def addchangegroup(self, source):
1176 def addchangegroup(self, source):
1077
1177
1078 def getchunk():
1178 def getchunk():
1079 d = source.read(4)
1179 d = source.read(4)
1080 if not d: return ""
1180 if not d: return ""
1081 l = struct.unpack(">l", d)[0]
1181 l = struct.unpack(">l", d)[0]
1082 if l <= 4: return ""
1182 if l <= 4: return ""
1083 return source.read(l - 4)
1183 return source.read(l - 4)
1084
1184
1085 def getgroup():
1185 def getgroup():
1086 while 1:
1186 while 1:
1087 c = getchunk()
1187 c = getchunk()
1088 if not c: break
1188 if not c: break
1089 yield c
1189 yield c
1090
1190
1091 def csmap(x):
1191 def csmap(x):
1092 self.ui.debug("add changeset %s\n" % short(x))
1192 self.ui.debug("add changeset %s\n" % short(x))
1093 return self.changelog.count()
1193 return self.changelog.count()
1094
1194
1095 def revmap(x):
1195 def revmap(x):
1096 return self.changelog.rev(x)
1196 return self.changelog.rev(x)
1097
1197
1098 if not source: return
1198 if not source: return
1099 changesets = files = revisions = 0
1199 changesets = files = revisions = 0
1100
1200
1101 tr = self.transaction()
1201 tr = self.transaction()
1102
1202
1103 # pull off the changeset group
1203 # pull off the changeset group
1104 self.ui.status("adding changesets\n")
1204 self.ui.status("adding changesets\n")
1105 co = self.changelog.tip()
1205 co = self.changelog.tip()
1106 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1206 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1107 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1207 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1108
1208
1109 # pull off the manifest group
1209 # pull off the manifest group
1110 self.ui.status("adding manifests\n")
1210 self.ui.status("adding manifests\n")
1111 mm = self.manifest.tip()
1211 mm = self.manifest.tip()
1112 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1212 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1113
1213
1114 # process the files
1214 # process the files
1115 self.ui.status("adding file revisions\n")
1215 self.ui.status("adding file revisions\n")
1116 while 1:
1216 while 1:
1117 f = getchunk()
1217 f = getchunk()
1118 if not f: break
1218 if not f: break
1119 self.ui.debug("adding %s revisions\n" % f)
1219 self.ui.debug("adding %s revisions\n" % f)
1120 fl = self.file(f)
1220 fl = self.file(f)
1121 o = fl.count()
1221 o = fl.count()
1122 n = fl.addgroup(getgroup(), revmap, tr)
1222 n = fl.addgroup(getgroup(), revmap, tr)
1123 revisions += fl.count() - o
1223 revisions += fl.count() - o
1124 files += 1
1224 files += 1
1125
1225
1126 self.ui.status(("modified %d files, added %d changesets" +
1226 self.ui.status(("modified %d files, added %d changesets" +
1127 " and %d new revisions\n")
1227 " and %d new revisions\n")
1128 % (files, changesets, revisions))
1228 % (files, changesets, revisions))
1129
1229
1130 tr.close()
1230 tr.close()
1131 return
1231 return
1132
1232
1133 def update(self, node, allow=False, force=False, choose=None,
1233 def update(self, node, allow=False, force=False, choose=None,
1134 moddirstate=True):
1234 moddirstate=True):
1135 pl = self.dirstate.parents()
1235 pl = self.dirstate.parents()
1136 if not force and pl[1] != nullid:
1236 if not force and pl[1] != nullid:
1137 self.ui.warn("aborting: outstanding uncommitted merges\n")
1237 self.ui.warn("aborting: outstanding uncommitted merges\n")
1138 return
1238 return
1139
1239
1140 p1, p2 = pl[0], node
1240 p1, p2 = pl[0], node
1141 pa = self.changelog.ancestor(p1, p2)
1241 pa = self.changelog.ancestor(p1, p2)
1142 m1n = self.changelog.read(p1)[0]
1242 m1n = self.changelog.read(p1)[0]
1143 m2n = self.changelog.read(p2)[0]
1243 m2n = self.changelog.read(p2)[0]
1144 man = self.manifest.ancestor(m1n, m2n)
1244 man = self.manifest.ancestor(m1n, m2n)
1145 m1 = self.manifest.read(m1n)
1245 m1 = self.manifest.read(m1n)
1146 mf1 = self.manifest.readflags(m1n)
1246 mf1 = self.manifest.readflags(m1n)
1147 m2 = self.manifest.read(m2n)
1247 m2 = self.manifest.read(m2n)
1148 mf2 = self.manifest.readflags(m2n)
1248 mf2 = self.manifest.readflags(m2n)
1149 ma = self.manifest.read(man)
1249 ma = self.manifest.read(man)
1150 mfa = self.manifest.readflags(man)
1250 mfa = self.manifest.readflags(man)
1151
1251
1152 (c, a, d, u) = self.changes(None, None)
1252 (c, a, d, u) = self.changes(None, None)
1153
1253
1154 # is this a jump, or a merge? i.e. is there a linear path
1254 # is this a jump, or a merge? i.e. is there a linear path
1155 # from p1 to p2?
1255 # from p1 to p2?
1156 linear_path = (pa == p1 or pa == p2)
1256 linear_path = (pa == p1 or pa == p2)
1157
1257
1158 # resolve the manifest to determine which files
1258 # resolve the manifest to determine which files
1159 # we care about merging
1259 # we care about merging
1160 self.ui.note("resolving manifests\n")
1260 self.ui.note("resolving manifests\n")
1161 self.ui.debug(" ancestor %s local %s remote %s\n" %
1261 self.ui.debug(" ancestor %s local %s remote %s\n" %
1162 (short(man), short(m1n), short(m2n)))
1262 (short(man), short(m1n), short(m2n)))
1163
1263
1164 merge = {}
1264 merge = {}
1165 get = {}
1265 get = {}
1166 remove = []
1266 remove = []
1167 mark = {}
1267 mark = {}
1168
1268
1169 # construct a working dir manifest
1269 # construct a working dir manifest
1170 mw = m1.copy()
1270 mw = m1.copy()
1171 mfw = mf1.copy()
1271 mfw = mf1.copy()
1172 umap = dict.fromkeys(u)
1272 umap = dict.fromkeys(u)
1173
1273
1174 for f in a + c + u:
1274 for f in a + c + u:
1175 mw[f] = ""
1275 mw[f] = ""
1176 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1276 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1177
1277
1178 for f in d:
1278 for f in d:
1179 if f in mw: del mw[f]
1279 if f in mw: del mw[f]
1180
1280
1181 # If we're jumping between revisions (as opposed to merging),
1281 # If we're jumping between revisions (as opposed to merging),
1182 # and if neither the working directory nor the target rev has
1282 # and if neither the working directory nor the target rev has
1183 # the file, then we need to remove it from the dirstate, to
1283 # the file, then we need to remove it from the dirstate, to
1184 # prevent the dirstate from listing the file when it is no
1284 # prevent the dirstate from listing the file when it is no
1185 # longer in the manifest.
1285 # longer in the manifest.
1186 if moddirstate and linear_path and f not in m2:
1286 if moddirstate and linear_path and f not in m2:
1187 self.dirstate.forget((f,))
1287 self.dirstate.forget((f,))
1188
1288
1189 # Compare manifests
1289 # Compare manifests
1190 for f, n in mw.iteritems():
1290 for f, n in mw.iteritems():
1191 if choose and not choose(f): continue
1291 if choose and not choose(f): continue
1192 if f in m2:
1292 if f in m2:
1193 s = 0
1293 s = 0
1194
1294
1195 # is the wfile new since m1, and match m2?
1295 # is the wfile new since m1, and match m2?
1196 if f not in m1:
1296 if f not in m1:
1197 t1 = self.wfile(f).read()
1297 t1 = self.wfile(f).read()
1198 t2 = self.file(f).revision(m2[f])
1298 t2 = self.file(f).revision(m2[f])
1199 if cmp(t1, t2) == 0:
1299 if cmp(t1, t2) == 0:
1200 mark[f] = 1
1300 mark[f] = 1
1201 n = m2[f]
1301 n = m2[f]
1202 del t1, t2
1302 del t1, t2
1203
1303
1204 # are files different?
1304 # are files different?
1205 if n != m2[f]:
1305 if n != m2[f]:
1206 a = ma.get(f, nullid)
1306 a = ma.get(f, nullid)
1207 # are both different from the ancestor?
1307 # are both different from the ancestor?
1208 if n != a and m2[f] != a:
1308 if n != a and m2[f] != a:
1209 self.ui.debug(" %s versions differ, resolve\n" % f)
1309 self.ui.debug(" %s versions differ, resolve\n" % f)
1210 # merge executable bits
1310 # merge executable bits
1211 # "if we changed or they changed, change in merge"
1311 # "if we changed or they changed, change in merge"
1212 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1312 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1213 mode = ((a^b) | (a^c)) ^ a
1313 mode = ((a^b) | (a^c)) ^ a
1214 merge[f] = (m1.get(f, nullid), m2[f], mode)
1314 merge[f] = (m1.get(f, nullid), m2[f], mode)
1215 s = 1
1315 s = 1
1216 # are we clobbering?
1316 # are we clobbering?
1217 # is remote's version newer?
1317 # is remote's version newer?
1218 # or are we going back in time?
1318 # or are we going back in time?
1219 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1319 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1220 self.ui.debug(" remote %s is newer, get\n" % f)
1320 self.ui.debug(" remote %s is newer, get\n" % f)
1221 get[f] = m2[f]
1321 get[f] = m2[f]
1222 s = 1
1322 s = 1
1223 else:
1323 else:
1224 mark[f] = 1
1324 mark[f] = 1
1225 elif f in umap:
1325 elif f in umap:
1226 # this unknown file is the same as the checkout
1326 # this unknown file is the same as the checkout
1227 get[f] = m2[f]
1327 get[f] = m2[f]
1228
1328
1229 if not s and mfw[f] != mf2[f]:
1329 if not s and mfw[f] != mf2[f]:
1230 if force:
1330 if force:
1231 self.ui.debug(" updating permissions for %s\n" % f)
1331 self.ui.debug(" updating permissions for %s\n" % f)
1232 util.set_exec(self.wjoin(f), mf2[f])
1332 util.set_exec(self.wjoin(f), mf2[f])
1233 else:
1333 else:
1234 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1334 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1235 mode = ((a^b) | (a^c)) ^ a
1335 mode = ((a^b) | (a^c)) ^ a
1236 if mode != b:
1336 if mode != b:
1237 self.ui.debug(" updating permissions for %s\n" % f)
1337 self.ui.debug(" updating permissions for %s\n" % f)
1238 util.set_exec(self.wjoin(f), mode)
1338 util.set_exec(self.wjoin(f), mode)
1239 mark[f] = 1
1339 mark[f] = 1
1240 del m2[f]
1340 del m2[f]
1241 elif f in ma:
1341 elif f in ma:
1242 if n != ma[f]:
1342 if n != ma[f]:
1243 r = "d"
1343 r = "d"
1244 if not force and (linear_path or allow):
1344 if not force and (linear_path or allow):
1245 r = self.ui.prompt(
1345 r = self.ui.prompt(
1246 (" local changed %s which remote deleted\n" % f) +
1346 (" local changed %s which remote deleted\n" % f) +
1247 "(k)eep or (d)elete?", "[kd]", "k")
1347 "(k)eep or (d)elete?", "[kd]", "k")
1248 if r == "d":
1348 if r == "d":
1249 remove.append(f)
1349 remove.append(f)
1250 else:
1350 else:
1251 self.ui.debug("other deleted %s\n" % f)
1351 self.ui.debug("other deleted %s\n" % f)
1252 remove.append(f) # other deleted it
1352 remove.append(f) # other deleted it
1253 else:
1353 else:
1254 if n == m1.get(f, nullid): # same as parent
1354 if n == m1.get(f, nullid): # same as parent
1255 if p2 == pa: # going backwards?
1355 if p2 == pa: # going backwards?
1256 self.ui.debug("remote deleted %s\n" % f)
1356 self.ui.debug("remote deleted %s\n" % f)
1257 remove.append(f)
1357 remove.append(f)
1258 else:
1358 else:
1259 self.ui.debug("local created %s, keeping\n" % f)
1359 self.ui.debug("local created %s, keeping\n" % f)
1260 else:
1360 else:
1261 self.ui.debug("working dir created %s, keeping\n" % f)
1361 self.ui.debug("working dir created %s, keeping\n" % f)
1262
1362
1263 for f, n in m2.iteritems():
1363 for f, n in m2.iteritems():
1264 if choose and not choose(f): continue
1364 if choose and not choose(f): continue
1265 if f[0] == "/": continue
1365 if f[0] == "/": continue
1266 if f in ma and n != ma[f]:
1366 if f in ma and n != ma[f]:
1267 r = "k"
1367 r = "k"
1268 if not force and (linear_path or allow):
1368 if not force and (linear_path or allow):
1269 r = self.ui.prompt(
1369 r = self.ui.prompt(
1270 ("remote changed %s which local deleted\n" % f) +
1370 ("remote changed %s which local deleted\n" % f) +
1271 "(k)eep or (d)elete?", "[kd]", "k")
1371 "(k)eep or (d)elete?", "[kd]", "k")
1272 if r == "k": get[f] = n
1372 if r == "k": get[f] = n
1273 elif f not in ma:
1373 elif f not in ma:
1274 self.ui.debug("remote created %s\n" % f)
1374 self.ui.debug("remote created %s\n" % f)
1275 get[f] = n
1375 get[f] = n
1276 else:
1376 else:
1277 self.ui.debug("local deleted %s\n" % f)
1377 self.ui.debug("local deleted %s\n" % f)
1278
1378
1279 del mw, m1, m2, ma
1379 del mw, m1, m2, ma
1280
1380
1281 if force:
1381 if force:
1282 for f in merge:
1382 for f in merge:
1283 get[f] = merge[f][1]
1383 get[f] = merge[f][1]
1284 merge = {}
1384 merge = {}
1285
1385
1286 if linear_path:
1386 if linear_path:
1287 # we don't need to do any magic, just jump to the new rev
1387 # we don't need to do any magic, just jump to the new rev
1288 mode = 'n'
1388 mode = 'n'
1289 p1, p2 = p2, nullid
1389 p1, p2 = p2, nullid
1290 else:
1390 else:
1291 if not allow:
1391 if not allow:
1292 self.ui.status("this update spans a branch" +
1392 self.ui.status("this update spans a branch" +
1293 " affecting the following files:\n")
1393 " affecting the following files:\n")
1294 fl = merge.keys() + get.keys()
1394 fl = merge.keys() + get.keys()
1295 fl.sort()
1395 fl.sort()
1296 for f in fl:
1396 for f in fl:
1297 cf = ""
1397 cf = ""
1298 if f in merge: cf = " (resolve)"
1398 if f in merge: cf = " (resolve)"
1299 self.ui.status(" %s%s\n" % (f, cf))
1399 self.ui.status(" %s%s\n" % (f, cf))
1300 self.ui.warn("aborting update spanning branches!\n")
1400 self.ui.warn("aborting update spanning branches!\n")
1301 self.ui.status("(use update -m to perform a branch merge)\n")
1401 self.ui.status("(use update -m to perform a branch merge)\n")
1302 return 1
1402 return 1
1303 # we have to remember what files we needed to get/change
1403 # we have to remember what files we needed to get/change
1304 # because any file that's different from either one of its
1404 # because any file that's different from either one of its
1305 # parents must be in the changeset
1405 # parents must be in the changeset
1306 mode = 'm'
1406 mode = 'm'
1307 if moddirstate:
1407 if moddirstate:
1308 self.dirstate.update(mark.keys(), "m")
1408 self.dirstate.update(mark.keys(), "m")
1309
1409
1310 if moddirstate:
1410 if moddirstate:
1311 self.dirstate.setparents(p1, p2)
1411 self.dirstate.setparents(p1, p2)
1312
1412
1313 # get the files we don't need to change
1413 # get the files we don't need to change
1314 files = get.keys()
1414 files = get.keys()
1315 files.sort()
1415 files.sort()
1316 for f in files:
1416 for f in files:
1317 if f[0] == "/": continue
1417 if f[0] == "/": continue
1318 self.ui.note("getting %s\n" % f)
1418 self.ui.note("getting %s\n" % f)
1319 t = self.file(f).read(get[f])
1419 t = self.file(f).read(get[f])
1320 try:
1420 try:
1321 self.wfile(f, "w").write(t)
1421 self.wfile(f, "w").write(t)
1322 except IOError:
1422 except IOError:
1323 os.makedirs(os.path.dirname(self.wjoin(f)))
1423 os.makedirs(os.path.dirname(self.wjoin(f)))
1324 self.wfile(f, "w").write(t)
1424 self.wfile(f, "w").write(t)
1325 util.set_exec(self.wjoin(f), mf2[f])
1425 util.set_exec(self.wjoin(f), mf2[f])
1326 if moddirstate:
1426 if moddirstate:
1327 self.dirstate.update([f], mode)
1427 self.dirstate.update([f], mode)
1328
1428
1329 # merge the tricky bits
1429 # merge the tricky bits
1330 files = merge.keys()
1430 files = merge.keys()
1331 files.sort()
1431 files.sort()
1332 for f in files:
1432 for f in files:
1333 self.ui.status("merging %s\n" % f)
1433 self.ui.status("merging %s\n" % f)
1334 m, o, flag = merge[f]
1434 m, o, flag = merge[f]
1335 self.merge3(f, m, o)
1435 self.merge3(f, m, o)
1336 util.set_exec(self.wjoin(f), flag)
1436 util.set_exec(self.wjoin(f), flag)
1337 if moddirstate:
1437 if moddirstate:
1338 self.dirstate.update([f], 'm')
1438 self.dirstate.update([f], 'm')
1339
1439
1340 for f in remove:
1440 for f in remove:
1341 self.ui.note("removing %s\n" % f)
1441 self.ui.note("removing %s\n" % f)
1342 os.unlink(f)
1442 os.unlink(f)
1343 # try removing directories that might now be empty
1443 # try removing directories that might now be empty
1344 try: os.removedirs(os.path.dirname(f))
1444 try: os.removedirs(os.path.dirname(f))
1345 except: pass
1445 except: pass
1346 if moddirstate:
1446 if moddirstate:
1347 if mode == 'n':
1447 if mode == 'n':
1348 self.dirstate.forget(remove)
1448 self.dirstate.forget(remove)
1349 else:
1449 else:
1350 self.dirstate.update(remove, 'r')
1450 self.dirstate.update(remove, 'r')
1351
1451
1352 def merge3(self, fn, my, other):
1452 def merge3(self, fn, my, other):
1353 """perform a 3-way merge in the working directory"""
1453 """perform a 3-way merge in the working directory"""
1354
1454
1355 def temp(prefix, node):
1455 def temp(prefix, node):
1356 pre = "%s~%s." % (os.path.basename(fn), prefix)
1456 pre = "%s~%s." % (os.path.basename(fn), prefix)
1357 (fd, name) = tempfile.mkstemp("", pre)
1457 (fd, name) = tempfile.mkstemp("", pre)
1358 f = os.fdopen(fd, "wb")
1458 f = os.fdopen(fd, "wb")
1359 f.write(fl.revision(node))
1459 f.write(fl.revision(node))
1360 f.close()
1460 f.close()
1361 return name
1461 return name
1362
1462
1363 fl = self.file(fn)
1463 fl = self.file(fn)
1364 base = fl.ancestor(my, other)
1464 base = fl.ancestor(my, other)
1365 a = self.wjoin(fn)
1465 a = self.wjoin(fn)
1366 b = temp("base", base)
1466 b = temp("base", base)
1367 c = temp("other", other)
1467 c = temp("other", other)
1368
1468
1369 self.ui.note("resolving %s\n" % fn)
1469 self.ui.note("resolving %s\n" % fn)
1370 self.ui.debug("file %s: other %s ancestor %s\n" %
1470 self.ui.debug("file %s: other %s ancestor %s\n" %
1371 (fn, short(other), short(base)))
1471 (fn, short(other), short(base)))
1372
1472
1373 cmd = self.ui.config("ui", "merge") or \
1473 cmd = self.ui.config("ui", "merge") or \
1374 os.environ.get("HGMERGE", "hgmerge")
1474 os.environ.get("HGMERGE", "hgmerge")
1375 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1475 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1376 if r:
1476 if r:
1377 self.ui.warn("merging %s failed!\n" % fn)
1477 self.ui.warn("merging %s failed!\n" % fn)
1378
1478
1379 os.unlink(b)
1479 os.unlink(b)
1380 os.unlink(c)
1480 os.unlink(c)
1381
1481
1382 def verify(self):
1482 def verify(self):
1383 filelinkrevs = {}
1483 filelinkrevs = {}
1384 filenodes = {}
1484 filenodes = {}
1385 changesets = revisions = files = 0
1485 changesets = revisions = files = 0
1386 errors = 0
1486 errors = 0
1387
1487
1388 seen = {}
1488 seen = {}
1389 self.ui.status("checking changesets\n")
1489 self.ui.status("checking changesets\n")
1390 for i in range(self.changelog.count()):
1490 for i in range(self.changelog.count()):
1391 changesets += 1
1491 changesets += 1
1392 n = self.changelog.node(i)
1492 n = self.changelog.node(i)
1393 if n in seen:
1493 if n in seen:
1394 self.ui.warn("duplicate changeset at revision %d\n" % i)
1494 self.ui.warn("duplicate changeset at revision %d\n" % i)
1395 errors += 1
1495 errors += 1
1396 seen[n] = 1
1496 seen[n] = 1
1397
1497
1398 for p in self.changelog.parents(n):
1498 for p in self.changelog.parents(n):
1399 if p not in self.changelog.nodemap:
1499 if p not in self.changelog.nodemap:
1400 self.ui.warn("changeset %s has unknown parent %s\n" %
1500 self.ui.warn("changeset %s has unknown parent %s\n" %
1401 (short(n), short(p)))
1501 (short(n), short(p)))
1402 errors += 1
1502 errors += 1
1403 try:
1503 try:
1404 changes = self.changelog.read(n)
1504 changes = self.changelog.read(n)
1405 except Exception, inst:
1505 except Exception, inst:
1406 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1506 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1407 errors += 1
1507 errors += 1
1408
1508
1409 for f in changes[3]:
1509 for f in changes[3]:
1410 filelinkrevs.setdefault(f, []).append(i)
1510 filelinkrevs.setdefault(f, []).append(i)
1411
1511
1412 seen = {}
1512 seen = {}
1413 self.ui.status("checking manifests\n")
1513 self.ui.status("checking manifests\n")
1414 for i in range(self.manifest.count()):
1514 for i in range(self.manifest.count()):
1415 n = self.manifest.node(i)
1515 n = self.manifest.node(i)
1416 if n in seen:
1516 if n in seen:
1417 self.ui.warn("duplicate manifest at revision %d\n" % i)
1517 self.ui.warn("duplicate manifest at revision %d\n" % i)
1418 errors += 1
1518 errors += 1
1419 seen[n] = 1
1519 seen[n] = 1
1420
1520
1421 for p in self.manifest.parents(n):
1521 for p in self.manifest.parents(n):
1422 if p not in self.manifest.nodemap:
1522 if p not in self.manifest.nodemap:
1423 self.ui.warn("manifest %s has unknown parent %s\n" %
1523 self.ui.warn("manifest %s has unknown parent %s\n" %
1424 (short(n), short(p)))
1524 (short(n), short(p)))
1425 errors += 1
1525 errors += 1
1426
1526
1427 try:
1527 try:
1428 delta = mdiff.patchtext(self.manifest.delta(n))
1528 delta = mdiff.patchtext(self.manifest.delta(n))
1429 except KeyboardInterrupt:
1529 except KeyboardInterrupt:
1430 self.ui.warn("aborted")
1530 self.ui.warn("aborted")
1431 sys.exit(0)
1531 sys.exit(0)
1432 except Exception, inst:
1532 except Exception, inst:
1433 self.ui.warn("unpacking manifest %s: %s\n"
1533 self.ui.warn("unpacking manifest %s: %s\n"
1434 % (short(n), inst))
1534 % (short(n), inst))
1435 errors += 1
1535 errors += 1
1436
1536
1437 ff = [ l.split('\0') for l in delta.splitlines() ]
1537 ff = [ l.split('\0') for l in delta.splitlines() ]
1438 for f, fn in ff:
1538 for f, fn in ff:
1439 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1539 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1440
1540
1441 self.ui.status("crosschecking files in changesets and manifests\n")
1541 self.ui.status("crosschecking files in changesets and manifests\n")
1442 for f in filenodes:
1542 for f in filenodes:
1443 if f not in filelinkrevs:
1543 if f not in filelinkrevs:
1444 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1544 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1445 errors += 1
1545 errors += 1
1446
1546
1447 for f in filelinkrevs:
1547 for f in filelinkrevs:
1448 if f not in filenodes:
1548 if f not in filenodes:
1449 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1549 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1450 errors += 1
1550 errors += 1
1451
1551
1452 self.ui.status("checking files\n")
1552 self.ui.status("checking files\n")
1453 ff = filenodes.keys()
1553 ff = filenodes.keys()
1454 ff.sort()
1554 ff.sort()
1455 for f in ff:
1555 for f in ff:
1456 if f == "/dev/null": continue
1556 if f == "/dev/null": continue
1457 files += 1
1557 files += 1
1458 fl = self.file(f)
1558 fl = self.file(f)
1459 nodes = { nullid: 1 }
1559 nodes = { nullid: 1 }
1460 seen = {}
1560 seen = {}
1461 for i in range(fl.count()):
1561 for i in range(fl.count()):
1462 revisions += 1
1562 revisions += 1
1463 n = fl.node(i)
1563 n = fl.node(i)
1464
1564
1465 if n in seen:
1565 if n in seen:
1466 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1566 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1467 errors += 1
1567 errors += 1
1468
1568
1469 if n not in filenodes[f]:
1569 if n not in filenodes[f]:
1470 self.ui.warn("%s: %d:%s not in manifests\n"
1570 self.ui.warn("%s: %d:%s not in manifests\n"
1471 % (f, i, short(n)))
1571 % (f, i, short(n)))
1472 errors += 1
1572 errors += 1
1473 else:
1573 else:
1474 del filenodes[f][n]
1574 del filenodes[f][n]
1475
1575
1476 flr = fl.linkrev(n)
1576 flr = fl.linkrev(n)
1477 if flr not in filelinkrevs[f]:
1577 if flr not in filelinkrevs[f]:
1478 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1578 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1479 % (f, short(n), fl.linkrev(n)))
1579 % (f, short(n), fl.linkrev(n)))
1480 errors += 1
1580 errors += 1
1481 else:
1581 else:
1482 filelinkrevs[f].remove(flr)
1582 filelinkrevs[f].remove(flr)
1483
1583
1484 # verify contents
1584 # verify contents
1485 try:
1585 try:
1486 t = fl.read(n)
1586 t = fl.read(n)
1487 except Exception, inst:
1587 except Exception, inst:
1488 self.ui.warn("unpacking file %s %s: %s\n"
1588 self.ui.warn("unpacking file %s %s: %s\n"
1489 % (f, short(n), inst))
1589 % (f, short(n), inst))
1490 errors += 1
1590 errors += 1
1491
1591
1492 # verify parents
1592 # verify parents
1493 (p1, p2) = fl.parents(n)
1593 (p1, p2) = fl.parents(n)
1494 if p1 not in nodes:
1594 if p1 not in nodes:
1495 self.ui.warn("file %s:%s unknown parent 1 %s" %
1595 self.ui.warn("file %s:%s unknown parent 1 %s" %
1496 (f, short(n), short(p1)))
1596 (f, short(n), short(p1)))
1497 errors += 1
1597 errors += 1
1498 if p2 not in nodes:
1598 if p2 not in nodes:
1499 self.ui.warn("file %s:%s unknown parent 2 %s" %
1599 self.ui.warn("file %s:%s unknown parent 2 %s" %
1500 (f, short(n), short(p1)))
1600 (f, short(n), short(p1)))
1501 errors += 1
1601 errors += 1
1502 nodes[n] = 1
1602 nodes[n] = 1
1503
1603
1504 # cross-check
1604 # cross-check
1505 for node in filenodes[f]:
1605 for node in filenodes[f]:
1506 self.ui.warn("node %s in manifests not in %s\n"
1606 self.ui.warn("node %s in manifests not in %s\n"
1507 % (hex(n), f))
1607 % (hex(n), f))
1508 errors += 1
1608 errors += 1
1509
1609
1510 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1610 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1511 (files, changesets, revisions))
1611 (files, changesets, revisions))
1512
1612
1513 if errors:
1613 if errors:
1514 self.ui.warn("%d integrity errors encountered!\n" % errors)
1614 self.ui.warn("%d integrity errors encountered!\n" % errors)
1515 return 1
1615 return 1
1516
1616
1517 class httprepository:
1617 class httprepository:
1518 def __init__(self, ui, path):
1618 def __init__(self, ui, path):
1519 self.url = path
1619 self.url = path
1520 self.ui = ui
1620 self.ui = ui
1521 no_list = [ "localhost", "127.0.0.1" ]
1621 no_list = [ "localhost", "127.0.0.1" ]
1522 host = ui.config("http_proxy", "host")
1622 host = ui.config("http_proxy", "host")
1523 if host is None:
1623 if host is None:
1524 host = os.environ.get("http_proxy")
1624 host = os.environ.get("http_proxy")
1525 if host and host.startswith('http://'):
1625 if host and host.startswith('http://'):
1526 host = host[7:]
1626 host = host[7:]
1527 user = ui.config("http_proxy", "user")
1627 user = ui.config("http_proxy", "user")
1528 passwd = ui.config("http_proxy", "passwd")
1628 passwd = ui.config("http_proxy", "passwd")
1529 no = ui.config("http_proxy", "no")
1629 no = ui.config("http_proxy", "no")
1530 if no is None:
1630 if no is None:
1531 no = os.environ.get("no_proxy")
1631 no = os.environ.get("no_proxy")
1532 if no:
1632 if no:
1533 no_list = no_list + no.split(",")
1633 no_list = no_list + no.split(",")
1534
1634
1535 no_proxy = 0
1635 no_proxy = 0
1536 for h in no_list:
1636 for h in no_list:
1537 if (path.startswith("http://" + h + "/") or
1637 if (path.startswith("http://" + h + "/") or
1538 path.startswith("http://" + h + ":") or
1638 path.startswith("http://" + h + ":") or
1539 path == "http://" + h):
1639 path == "http://" + h):
1540 no_proxy = 1
1640 no_proxy = 1
1541
1641
1542 # Note: urllib2 takes proxy values from the environment and those will
1642 # Note: urllib2 takes proxy values from the environment and those will
1543 # take precedence
1643 # take precedence
1544 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1644 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1545 if os.environ.has_key(env):
1645 if os.environ.has_key(env):
1546 del os.environ[env]
1646 del os.environ[env]
1547
1647
1548 proxy_handler = urllib2.BaseHandler()
1648 proxy_handler = urllib2.BaseHandler()
1549 if host and not no_proxy:
1649 if host and not no_proxy:
1550 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1650 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1551
1651
1552 authinfo = None
1652 authinfo = None
1553 if user and passwd:
1653 if user and passwd:
1554 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1654 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1555 passmgr.add_password(None, host, user, passwd)
1655 passmgr.add_password(None, host, user, passwd)
1556 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1656 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1557
1657
1558 opener = urllib2.build_opener(proxy_handler, authinfo)
1658 opener = urllib2.build_opener(proxy_handler, authinfo)
1559 urllib2.install_opener(opener)
1659 urllib2.install_opener(opener)
1560
1660
1561 def dev(self):
1661 def dev(self):
1562 return -1
1662 return -1
1563
1663
1564 def do_cmd(self, cmd, **args):
1664 def do_cmd(self, cmd, **args):
1565 self.ui.debug("sending %s command\n" % cmd)
1665 self.ui.debug("sending %s command\n" % cmd)
1566 q = {"cmd": cmd}
1666 q = {"cmd": cmd}
1567 q.update(args)
1667 q.update(args)
1568 qs = urllib.urlencode(q)
1668 qs = urllib.urlencode(q)
1569 cu = "%s?%s" % (self.url, qs)
1669 cu = "%s?%s" % (self.url, qs)
1570 return urllib2.urlopen(cu)
1670 return urllib2.urlopen(cu)
1571
1671
1572 def heads(self):
1672 def heads(self):
1573 d = self.do_cmd("heads").read()
1673 d = self.do_cmd("heads").read()
1574 try:
1674 try:
1575 return map(bin, d[:-1].split(" "))
1675 return map(bin, d[:-1].split(" "))
1576 except:
1676 except:
1577 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1677 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1578 raise
1678 raise
1579
1679
1580 def branches(self, nodes):
1680 def branches(self, nodes):
1581 n = " ".join(map(hex, nodes))
1681 n = " ".join(map(hex, nodes))
1582 d = self.do_cmd("branches", nodes=n).read()
1682 d = self.do_cmd("branches", nodes=n).read()
1583 try:
1683 try:
1584 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1684 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1585 return br
1685 return br
1586 except:
1686 except:
1587 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1687 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1588 raise
1688 raise
1589
1689
1590 def between(self, pairs):
1690 def between(self, pairs):
1591 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1691 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1592 d = self.do_cmd("between", pairs=n).read()
1692 d = self.do_cmd("between", pairs=n).read()
1593 try:
1693 try:
1594 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1694 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1595 return p
1695 return p
1596 except:
1696 except:
1597 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1697 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1598 raise
1698 raise
1599
1699
1600 def changegroup(self, nodes):
1700 def changegroup(self, nodes):
1601 n = " ".join(map(hex, nodes))
1701 n = " ".join(map(hex, nodes))
1602 f = self.do_cmd("changegroup", roots=n)
1702 f = self.do_cmd("changegroup", roots=n)
1603 bytes = 0
1703 bytes = 0
1604
1704
1605 class zread:
1705 class zread:
1606 def __init__(self, f):
1706 def __init__(self, f):
1607 self.zd = zlib.decompressobj()
1707 self.zd = zlib.decompressobj()
1608 self.f = f
1708 self.f = f
1609 self.buf = ""
1709 self.buf = ""
1610 def read(self, l):
1710 def read(self, l):
1611 while l > len(self.buf):
1711 while l > len(self.buf):
1612 r = f.read(4096)
1712 r = f.read(4096)
1613 if r:
1713 if r:
1614 self.buf += self.zd.decompress(r)
1714 self.buf += self.zd.decompress(r)
1615 else:
1715 else:
1616 self.buf += self.zd.flush()
1716 self.buf += self.zd.flush()
1617 break
1717 break
1618 d, self.buf = self.buf[:l], self.buf[l:]
1718 d, self.buf = self.buf[:l], self.buf[l:]
1619 return d
1719 return d
1620
1720
1621 return zread(f)
1721 return zread(f)
1622
1722
1623 class remotelock:
1723 class remotelock:
1624 def __init__(self, repo):
1724 def __init__(self, repo):
1625 self.repo = repo
1725 self.repo = repo
1626 def release(self):
1726 def release(self):
1627 self.repo.unlock()
1727 self.repo.unlock()
1628 self.repo = None
1728 self.repo = None
1629 def __del__(self):
1729 def __del__(self):
1630 if self.repo:
1730 if self.repo:
1631 self.release()
1731 self.release()
1632
1732
1633 class sshrepository:
1733 class sshrepository:
1634 def __init__(self, ui, path):
1734 def __init__(self, ui, path):
1635 self.url = path
1735 self.url = path
1636 self.ui = ui
1736 self.ui = ui
1637
1737
1638 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
1738 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
1639 if not m:
1739 if not m:
1640 raise RepoError("couldn't parse destination %s\n" % path)
1740 raise RepoError("couldn't parse destination %s\n" % path)
1641
1741
1642 self.user = m.group(2)
1742 self.user = m.group(2)
1643 self.host = m.group(3)
1743 self.host = m.group(3)
1644 self.port = m.group(5)
1744 self.port = m.group(5)
1645 self.path = m.group(7)
1745 self.path = m.group(7)
1646
1746
1647 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1747 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1648 args = self.port and ("%s -p %s") % (args, self.port) or args
1748 args = self.port and ("%s -p %s") % (args, self.port) or args
1649 path = self.path or ""
1749 path = self.path or ""
1650
1750
1651 cmd = "ssh %s 'hg -R %s serve --stdio'"
1751 cmd = "ssh %s 'hg -R %s serve --stdio'"
1652 cmd = cmd % (args, path)
1752 cmd = cmd % (args, path)
1653
1753
1654 self.pipeo, self.pipei = os.popen2(cmd)
1754 self.pipeo, self.pipei = os.popen2(cmd)
1655
1755
1656 def __del__(self):
1756 def __del__(self):
1657 self.pipeo.close()
1757 self.pipeo.close()
1658 self.pipei.close()
1758 self.pipei.close()
1659
1759
1660 def dev(self):
1760 def dev(self):
1661 return -1
1761 return -1
1662
1762
1663 def do_cmd(self, cmd, **args):
1763 def do_cmd(self, cmd, **args):
1664 self.ui.debug("sending %s command\n" % cmd)
1764 self.ui.debug("sending %s command\n" % cmd)
1665 self.pipeo.write("%s\n" % cmd)
1765 self.pipeo.write("%s\n" % cmd)
1666 for k, v in args.items():
1766 for k, v in args.items():
1667 self.pipeo.write("%s %d\n" % (k, len(v)))
1767 self.pipeo.write("%s %d\n" % (k, len(v)))
1668 self.pipeo.write(v)
1768 self.pipeo.write(v)
1669 self.pipeo.flush()
1769 self.pipeo.flush()
1670
1770
1671 return self.pipei
1771 return self.pipei
1672
1772
1673 def call(self, cmd, **args):
1773 def call(self, cmd, **args):
1674 r = self.do_cmd(cmd, **args)
1774 r = self.do_cmd(cmd, **args)
1675 l = int(r.readline())
1775 l = int(r.readline())
1676 return r.read(l)
1776 return r.read(l)
1677
1777
1678 def lock(self):
1778 def lock(self):
1679 self.call("lock")
1779 self.call("lock")
1680 return remotelock(self)
1780 return remotelock(self)
1681
1781
1682 def unlock(self):
1782 def unlock(self):
1683 self.call("unlock")
1783 self.call("unlock")
1684
1784
1685 def heads(self):
1785 def heads(self):
1686 d = self.call("heads")
1786 d = self.call("heads")
1687 try:
1787 try:
1688 return map(bin, d[:-1].split(" "))
1788 return map(bin, d[:-1].split(" "))
1689 except:
1789 except:
1690 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1790 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1691 raise
1791 raise
1692
1792
1693 def branches(self, nodes):
1793 def branches(self, nodes):
1694 n = " ".join(map(hex, nodes))
1794 n = " ".join(map(hex, nodes))
1695 d = self.call("branches", nodes=n)
1795 d = self.call("branches", nodes=n)
1696 try:
1796 try:
1697 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1797 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1698 return br
1798 return br
1699 except:
1799 except:
1700 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1800 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1701 raise
1801 raise
1702
1802
1703 def between(self, pairs):
1803 def between(self, pairs):
1704 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1804 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1705 d = self.call("between", pairs=n)
1805 d = self.call("between", pairs=n)
1706 try:
1806 try:
1707 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1807 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1708 return p
1808 return p
1709 except:
1809 except:
1710 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1810 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1711 raise
1811 raise
1712
1812
1713 def changegroup(self, nodes):
1813 def changegroup(self, nodes):
1714 n = " ".join(map(hex, nodes))
1814 n = " ".join(map(hex, nodes))
1715 f = self.do_cmd("changegroup", roots=n)
1815 f = self.do_cmd("changegroup", roots=n)
1716 return self.pipei
1816 return self.pipei
1717
1817
1718 def addchangegroup(self, cg):
1818 def addchangegroup(self, cg):
1719 d = self.call("addchangegroup")
1819 d = self.call("addchangegroup")
1720 if d:
1820 if d:
1721 raise RepoError("push refused: %s", d)
1821 raise RepoError("push refused: %s", d)
1722
1822
1723 while 1:
1823 while 1:
1724 d = cg.read(4096)
1824 d = cg.read(4096)
1725 if not d: break
1825 if not d: break
1726 self.pipeo.write(d)
1826 self.pipeo.write(d)
1727
1827
1728 self.pipeo.flush()
1828 self.pipeo.flush()
1729
1829
1730 l = int(self.pipei.readline())
1830 l = int(self.pipei.readline())
1731 return self.pipei.read(l)
1831 return self.pipei.read(l)
1732
1832
1733 def repository(ui, path=None, create=0):
1833 def repository(ui, path=None, create=0):
1734 if path:
1834 if path:
1735 if path.startswith("http://"):
1835 if path.startswith("http://"):
1736 return httprepository(ui, path)
1836 return httprepository(ui, path)
1737 if path.startswith("hg://"):
1837 if path.startswith("hg://"):
1738 return httprepository(ui, path.replace("hg://", "http://"))
1838 return httprepository(ui, path.replace("hg://", "http://"))
1739 if path.startswith("old-http://"):
1839 if path.startswith("old-http://"):
1740 return localrepository(ui, path.replace("old-http://", "http://"))
1840 return localrepository(ui, path.replace("old-http://", "http://"))
1741 if path.startswith("ssh://"):
1841 if path.startswith("ssh://"):
1742 return sshrepository(ui, path)
1842 return sshrepository(ui, path)
1743
1843
1744 return localrepository(ui, path, create)
1844 return localrepository(ui, path, create)
@@ -1,541 +1,542 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # This provides efficient delta storage with O(1) retrieve and append
3 # This provides efficient delta storage with O(1) retrieve and append
4 # and O(changes) merge between branches
4 # and O(changes) merge between branches
5 #
5 #
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 #
7 #
8 # This software may be used and distributed according to the terms
8 # This software may be used and distributed according to the terms
9 # of the GNU General Public License, incorporated herein by reference.
9 # of the GNU General Public License, incorporated herein by reference.
10
10
11 import zlib, struct, sha, binascii, heapq
11 import zlib, struct, sha, binascii, heapq
12 from mercurial import mdiff
12 from mercurial import mdiff
13
13
14 def hex(node): return binascii.hexlify(node)
14 def hex(node): return binascii.hexlify(node)
15 def bin(node): return binascii.unhexlify(node)
15 def bin(node): return binascii.unhexlify(node)
16 def short(node): return hex(node[:6])
16 def short(node): return hex(node[:6])
17
17
18 def compress(text):
18 def compress(text):
19 if not text: return text
19 if not text: return text
20 if len(text) < 44:
20 if len(text) < 44:
21 if text[0] == '\0': return text
21 if text[0] == '\0': return text
22 return 'u' + text
22 return 'u' + text
23 bin = zlib.compress(text)
23 bin = zlib.compress(text)
24 if len(bin) > len(text):
24 if len(bin) > len(text):
25 if text[0] == '\0': return text
25 if text[0] == '\0': return text
26 return 'u' + text
26 return 'u' + text
27 return bin
27 return bin
28
28
29 def decompress(bin):
29 def decompress(bin):
30 if not bin: return bin
30 if not bin: return bin
31 t = bin[0]
31 t = bin[0]
32 if t == '\0': return bin
32 if t == '\0': return bin
33 if t == 'x': return zlib.decompress(bin)
33 if t == 'x': return zlib.decompress(bin)
34 if t == 'u': return bin[1:]
34 if t == 'u': return bin[1:]
35 raise "unknown compression type %s" % t
35 raise "unknown compression type %s" % t
36
36
37 def hash(text, p1, p2):
37 def hash(text, p1, p2):
38 l = [p1, p2]
38 l = [p1, p2]
39 l.sort()
39 l.sort()
40 s = sha.new(l[0])
40 s = sha.new(l[0])
41 s.update(l[1])
41 s.update(l[1])
42 s.update(text)
42 s.update(text)
43 return s.digest()
43 return s.digest()
44
44
45 nullid = "\0" * 20
45 nullid = "\0" * 20
46 indexformat = ">4l20s20s20s"
46 indexformat = ">4l20s20s20s"
47
47
48 class lazyparser:
48 class lazyparser:
49 def __init__(self, data, revlog):
49 def __init__(self, data, revlog):
50 self.data = data
50 self.data = data
51 self.s = struct.calcsize(indexformat)
51 self.s = struct.calcsize(indexformat)
52 self.l = len(data)/self.s
52 self.l = len(data)/self.s
53 self.index = [None] * self.l
53 self.index = [None] * self.l
54 self.map = {nullid: -1}
54 self.map = {nullid: -1}
55 self.all = 0
55 self.all = 0
56 self.revlog = revlog
56 self.revlog = revlog
57
57
58 def load(self, pos=None):
58 def load(self, pos=None):
59 if self.all: return
59 if self.all: return
60 if pos is not None:
60 if pos is not None:
61 block = pos / 1000
61 block = pos / 1000
62 i = block * 1000
62 i = block * 1000
63 end = min(self.l, i + 1000)
63 end = min(self.l, i + 1000)
64 else:
64 else:
65 self.all = 1
65 self.all = 1
66 i = 0
66 i = 0
67 end = self.l
67 end = self.l
68 self.revlog.index = self.index
68 self.revlog.index = self.index
69 self.revlog.nodemap = self.map
69 self.revlog.nodemap = self.map
70
70
71 while i < end:
71 while i < end:
72 d = self.data[i * self.s: (i + 1) * self.s]
72 d = self.data[i * self.s: (i + 1) * self.s]
73 e = struct.unpack(indexformat, d)
73 e = struct.unpack(indexformat, d)
74 self.index[i] = e
74 self.index[i] = e
75 self.map[e[6]] = i
75 self.map[e[6]] = i
76 i += 1
76 i += 1
77
77
78 class lazyindex:
78 class lazyindex:
79 def __init__(self, parser):
79 def __init__(self, parser):
80 self.p = parser
80 self.p = parser
81 def __len__(self):
81 def __len__(self):
82 return len(self.p.index)
82 return len(self.p.index)
83 def load(self, pos):
83 def load(self, pos):
84 self.p.load(pos)
84 self.p.load(pos)
85 return self.p.index[pos]
85 return self.p.index[pos]
86 def __getitem__(self, pos):
86 def __getitem__(self, pos):
87 return self.p.index[pos] or self.load(pos)
87 return self.p.index[pos] or self.load(pos)
88 def append(self, e):
88 def append(self, e):
89 self.p.index.append(e)
89 self.p.index.append(e)
90
90
91 class lazymap:
91 class lazymap:
92 def __init__(self, parser):
92 def __init__(self, parser):
93 self.p = parser
93 self.p = parser
94 def load(self, key):
94 def load(self, key):
95 if self.p.all: return
95 if self.p.all: return
96 n = self.p.data.find(key)
96 n = self.p.data.find(key)
97 if n < 0: raise KeyError("node " + hex(key))
97 if n < 0: raise KeyError("node " + hex(key))
98 pos = n / self.p.s
98 pos = n / self.p.s
99 self.p.load(pos)
99 self.p.load(pos)
100 def __contains__(self, key):
100 def __contains__(self, key):
101 self.p.load()
101 self.p.load()
102 return key in self.p.map
102 return key in self.p.map
103 def __iter__(self):
103 def __iter__(self):
104 yield nullid
104 yield nullid
105 for i in xrange(self.p.l):
105 for i in xrange(self.p.l):
106 try:
106 try:
107 yield self.p.index[i][6]
107 yield self.p.index[i][6]
108 except:
108 except:
109 self.p.load(i)
109 self.p.load(i)
110 yield self.p.index[i][6]
110 yield self.p.index[i][6]
111 def __getitem__(self, key):
111 def __getitem__(self, key):
112 try:
112 try:
113 return self.p.map[key]
113 return self.p.map[key]
114 except KeyError:
114 except KeyError:
115 try:
115 try:
116 self.load(key)
116 self.load(key)
117 return self.p.map[key]
117 return self.p.map[key]
118 except KeyError:
118 except KeyError:
119 raise KeyError("node " + hex(key))
119 raise KeyError("node " + hex(key))
120 def __setitem__(self, key, val):
120 def __setitem__(self, key, val):
121 self.p.map[key] = val
121 self.p.map[key] = val
122
122
123 class revlog:
123 class revlog:
124 def __init__(self, opener, indexfile, datafile):
124 def __init__(self, opener, indexfile, datafile):
125 self.indexfile = indexfile
125 self.indexfile = indexfile
126 self.datafile = datafile
126 self.datafile = datafile
127 self.opener = opener
127 self.opener = opener
128 self.cache = None
128 self.cache = None
129
129
130 try:
130 try:
131 i = self.opener(self.indexfile).read()
131 i = self.opener(self.indexfile).read()
132 except IOError:
132 except IOError:
133 i = ""
133 i = ""
134
134
135 if len(i) > 10000:
135 if len(i) > 10000:
136 # big index, let's parse it on demand
136 # big index, let's parse it on demand
137 parser = lazyparser(i, self)
137 parser = lazyparser(i, self)
138 self.index = lazyindex(parser)
138 self.index = lazyindex(parser)
139 self.nodemap = lazymap(parser)
139 self.nodemap = lazymap(parser)
140 else:
140 else:
141 s = struct.calcsize(indexformat)
141 s = struct.calcsize(indexformat)
142 l = len(i) / s
142 l = len(i) / s
143 self.index = [None] * l
143 self.index = [None] * l
144 m = [None] * l
144 m = [None] * l
145
145
146 n = 0
146 n = 0
147 for f in xrange(0, len(i), s):
147 for f in xrange(0, len(i), s):
148 # offset, size, base, linkrev, p1, p2, nodeid
148 # offset, size, base, linkrev, p1, p2, nodeid
149 e = struct.unpack(indexformat, i[f:f + s])
149 e = struct.unpack(indexformat, i[f:f + s])
150 m[n] = (e[6], n)
150 m[n] = (e[6], n)
151 self.index[n] = e
151 self.index[n] = e
152 n += 1
152 n += 1
153
153
154 self.nodemap = dict(m)
154 self.nodemap = dict(m)
155 self.nodemap[nullid] = -1
155 self.nodemap[nullid] = -1
156
156
157 def tip(self): return self.node(len(self.index) - 1)
157 def tip(self): return self.node(len(self.index) - 1)
158 def count(self): return len(self.index)
158 def count(self): return len(self.index)
159 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
159 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
160 def rev(self, node): return self.nodemap[node]
160 def rev(self, node): return self.nodemap[node]
161 def linkrev(self, node): return self.index[self.nodemap[node]][3]
161 def linkrev(self, node): return self.index[self.nodemap[node]][3]
162 def parents(self, node):
162 def parents(self, node):
163 if node == nullid: return (nullid, nullid)
163 if node == nullid: return (nullid, nullid)
164 return self.index[self.nodemap[node]][4:6]
164 return self.index[self.nodemap[node]][4:6]
165
165
166 def start(self, rev): return self.index[rev][0]
166 def start(self, rev): return self.index[rev][0]
167 def length(self, rev): return self.index[rev][1]
167 def length(self, rev): return self.index[rev][1]
168 def end(self, rev): return self.start(rev) + self.length(rev)
168 def end(self, rev): return self.start(rev) + self.length(rev)
169 def base(self, rev): return self.index[rev][2]
169 def base(self, rev): return self.index[rev][2]
170
170
171 def heads(self):
171 def heads(self):
172 p = {}
172 p = {}
173 h = []
173 h = []
174 for r in range(self.count() - 1, -1, -1):
174 for r in range(self.count() - 1, -1, -1):
175 n = self.node(r)
175 n = self.node(r)
176 if n not in p:
176 if n not in p:
177 h.append(n)
177 h.append(n)
178 for pn in self.parents(n):
178 for pn in self.parents(n):
179 p[pn] = 1
179 p[pn] = 1
180 return h
180 return h
181
181
182 def children(self, node):
182 def children(self, node):
183 c = []
183 c = []
184 p = self.rev(node)
184 p = self.rev(node)
185 for r in range(p + 1, self.count()):
185 for r in range(p + 1, self.count()):
186 n = self.node(r)
186 n = self.node(r)
187 for pn in self.parents(n):
187 for pn in self.parents(n):
188 if pn == p:
188 if pn == p:
189 c.append(p)
189 c.append(p)
190 continue
190 continue
191 elif pn == nullid:
191 elif pn == nullid:
192 continue
192 continue
193 return c
193 return c
194
194
195 def lookup(self, id):
195 def lookup(self, id):
196 try:
196 try:
197 rev = int(id)
197 rev = int(id)
198 if str(rev) != id: raise ValueError
198 if str(rev) != id: raise ValueError
199 if rev < 0: rev = self.count() + rev
199 if rev < 0: rev = self.count() + rev
200 if rev < 0 or rev >= self.count(): raise ValueError
200 if rev < 0 or rev >= self.count(): raise ValueError
201 return self.node(rev)
201 return self.node(rev)
202 except (ValueError, OverflowError):
202 except (ValueError, OverflowError):
203 c = []
203 c = []
204 for n in self.nodemap:
204 for n in self.nodemap:
205 if hex(n).startswith(id):
205 if hex(n).startswith(id):
206 c.append(n)
206 c.append(n)
207 if len(c) > 1: raise KeyError("Ambiguous identifier")
207 if len(c) > 1: raise KeyError("Ambiguous identifier")
208 if len(c) < 1: raise KeyError("No match found")
208 if len(c) < 1: raise KeyError("No match found")
209 return c[0]
209 return c[0]
210
210
211 return None
211 return None
212
212
213 def diff(self, a, b):
213 def diff(self, a, b):
214 return mdiff.textdiff(a, b)
214 return mdiff.textdiff(a, b)
215
215
216 def patches(self, t, pl):
216 def patches(self, t, pl):
217 return mdiff.patches(t, pl)
217 return mdiff.patches(t, pl)
218
218
219 def delta(self, node):
219 def delta(self, node):
220 r = self.rev(node)
220 r = self.rev(node)
221 b = self.base(r)
221 b = self.base(r)
222 if r == b:
222 if r == b:
223 return self.diff(self.revision(self.node(r - 1)),
223 return self.diff(self.revision(self.node(r - 1)),
224 self.revision(node))
224 self.revision(node))
225 else:
225 else:
226 f = self.opener(self.datafile)
226 f = self.opener(self.datafile)
227 f.seek(self.start(r))
227 f.seek(self.start(r))
228 data = f.read(self.length(r))
228 data = f.read(self.length(r))
229 return decompress(data)
229 return decompress(data)
230
230
231 def revision(self, node):
231 def revision(self, node):
232 if node == nullid: return ""
232 if node == nullid: return ""
233 if self.cache and self.cache[0] == node: return self.cache[2]
233 if self.cache and self.cache[0] == node: return self.cache[2]
234
234
235 text = None
235 text = None
236 rev = self.rev(node)
236 rev = self.rev(node)
237 start, length, base, link, p1, p2, node = self.index[rev]
237 start, length, base, link, p1, p2, node = self.index[rev]
238 end = start + length
238 end = start + length
239 if base != rev: start = self.start(base)
239 if base != rev: start = self.start(base)
240
240
241 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
241 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
242 base = self.cache[1]
242 base = self.cache[1]
243 start = self.start(base + 1)
243 start = self.start(base + 1)
244 text = self.cache[2]
244 text = self.cache[2]
245 last = 0
245 last = 0
246
246
247 f = self.opener(self.datafile)
247 f = self.opener(self.datafile)
248 f.seek(start)
248 f.seek(start)
249 data = f.read(end - start)
249 data = f.read(end - start)
250
250
251 if not text:
251 if not text:
252 last = self.length(base)
252 last = self.length(base)
253 text = decompress(data[:last])
253 text = decompress(data[:last])
254
254
255 bins = []
255 bins = []
256 for r in xrange(base + 1, rev + 1):
256 for r in xrange(base + 1, rev + 1):
257 s = self.length(r)
257 s = self.length(r)
258 bins.append(decompress(data[last:last + s]))
258 bins.append(decompress(data[last:last + s]))
259 last = last + s
259 last = last + s
260
260
261 text = mdiff.patches(text, bins)
261 text = mdiff.patches(text, bins)
262
262
263 if node != hash(text, p1, p2):
263 if node != hash(text, p1, p2):
264 raise IOError("integrity check failed on %s:%d"
264 raise IOError("integrity check failed on %s:%d"
265 % (self.datafile, rev))
265 % (self.datafile, rev))
266
266
267 self.cache = (node, rev, text)
267 self.cache = (node, rev, text)
268 return text
268 return text
269
269
270 def addrevision(self, text, transaction, link, p1=None, p2=None):
270 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
271 if text is None: text = ""
271 if text is None: text = ""
272 if p1 is None: p1 = self.tip()
272 if p1 is None: p1 = self.tip()
273 if p2 is None: p2 = nullid
273 if p2 is None: p2 = nullid
274
274
275 node = hash(text, p1, p2)
275 node = hash(text, p1, p2)
276
276
277 if node in self.nodemap:
277 if node in self.nodemap:
278 return node
278 return node
279
279
280 n = self.count()
280 n = self.count()
281 t = n - 1
281 t = n - 1
282
282
283 if n:
283 if n:
284 base = self.base(t)
284 base = self.base(t)
285 start = self.start(base)
285 start = self.start(base)
286 end = self.end(t)
286 end = self.end(t)
287 prev = self.revision(self.tip())
287 if not d:
288 d = self.diff(prev, text)
288 prev = self.revision(self.tip())
289 d = self.diff(prev, text)
289 data = compress(d)
290 data = compress(d)
290 dist = end - start + len(data)
291 dist = end - start + len(data)
291
292
292 # full versions are inserted when the needed deltas
293 # full versions are inserted when the needed deltas
293 # become comparable to the uncompressed text
294 # become comparable to the uncompressed text
294 if not n or dist > len(text) * 2:
295 if not n or dist > len(text) * 2:
295 data = compress(text)
296 data = compress(text)
296 base = n
297 base = n
297 else:
298 else:
298 base = self.base(t)
299 base = self.base(t)
299
300
300 offset = 0
301 offset = 0
301 if t >= 0:
302 if t >= 0:
302 offset = self.end(t)
303 offset = self.end(t)
303
304
304 e = (offset, len(data), base, link, p1, p2, node)
305 e = (offset, len(data), base, link, p1, p2, node)
305
306
306 self.index.append(e)
307 self.index.append(e)
307 self.nodemap[node] = n
308 self.nodemap[node] = n
308 entry = struct.pack(indexformat, *e)
309 entry = struct.pack(indexformat, *e)
309
310
310 transaction.add(self.datafile, e[0])
311 transaction.add(self.datafile, e[0])
311 self.opener(self.datafile, "a").write(data)
312 self.opener(self.datafile, "a").write(data)
312 transaction.add(self.indexfile, n * len(entry))
313 transaction.add(self.indexfile, n * len(entry))
313 self.opener(self.indexfile, "a").write(entry)
314 self.opener(self.indexfile, "a").write(entry)
314
315
315 self.cache = (node, n, text)
316 self.cache = (node, n, text)
316 return node
317 return node
317
318
318 def ancestor(self, a, b):
319 def ancestor(self, a, b):
319 # calculate the distance of every node from root
320 # calculate the distance of every node from root
320 dist = {nullid: 0}
321 dist = {nullid: 0}
321 for i in xrange(self.count()):
322 for i in xrange(self.count()):
322 n = self.node(i)
323 n = self.node(i)
323 p1, p2 = self.parents(n)
324 p1, p2 = self.parents(n)
324 dist[n] = max(dist[p1], dist[p2]) + 1
325 dist[n] = max(dist[p1], dist[p2]) + 1
325
326
326 # traverse ancestors in order of decreasing distance from root
327 # traverse ancestors in order of decreasing distance from root
327 def ancestors(node):
328 def ancestors(node):
328 # we store negative distances because heap returns smallest member
329 # we store negative distances because heap returns smallest member
329 h = [(-dist[node], node)]
330 h = [(-dist[node], node)]
330 seen = {}
331 seen = {}
331 earliest = self.count()
332 earliest = self.count()
332 while h:
333 while h:
333 d, n = heapq.heappop(h)
334 d, n = heapq.heappop(h)
334 if n not in seen:
335 if n not in seen:
335 seen[n] = 1
336 seen[n] = 1
336 r = self.rev(n)
337 r = self.rev(n)
337 yield (-d, r, n)
338 yield (-d, r, n)
338 for p in self.parents(n):
339 for p in self.parents(n):
339 heapq.heappush(h, (-dist[p], p))
340 heapq.heappush(h, (-dist[p], p))
340
341
341 x = ancestors(a)
342 x = ancestors(a)
342 y = ancestors(b)
343 y = ancestors(b)
343 lx = x.next()
344 lx = x.next()
344 ly = y.next()
345 ly = y.next()
345
346
346 # increment each ancestor list until it is closer to root than
347 # increment each ancestor list until it is closer to root than
347 # the other, or they match
348 # the other, or they match
348 while 1:
349 while 1:
349 if lx == ly:
350 if lx == ly:
350 return lx[2]
351 return lx[2]
351 elif lx < ly:
352 elif lx < ly:
352 ly = y.next()
353 ly = y.next()
353 elif lx > ly:
354 elif lx > ly:
354 lx = x.next()
355 lx = x.next()
355
356
356 def group(self, linkmap):
357 def group(self, linkmap):
357 # given a list of changeset revs, return a set of deltas and
358 # given a list of changeset revs, return a set of deltas and
358 # metadata corresponding to nodes. the first delta is
359 # metadata corresponding to nodes. the first delta is
359 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
360 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
360 # have this parent as it has all history before these
361 # have this parent as it has all history before these
361 # changesets. parent is parent[0]
362 # changesets. parent is parent[0]
362
363
363 revs = []
364 revs = []
364 needed = {}
365 needed = {}
365
366
366 # find file nodes/revs that match changeset revs
367 # find file nodes/revs that match changeset revs
367 for i in xrange(0, self.count()):
368 for i in xrange(0, self.count()):
368 if self.index[i][3] in linkmap:
369 if self.index[i][3] in linkmap:
369 revs.append(i)
370 revs.append(i)
370 needed[i] = 1
371 needed[i] = 1
371
372
372 # if we don't have any revisions touched by these changesets, bail
373 # if we don't have any revisions touched by these changesets, bail
373 if not revs:
374 if not revs:
374 yield struct.pack(">l", 0)
375 yield struct.pack(">l", 0)
375 return
376 return
376
377
377 # add the parent of the first rev
378 # add the parent of the first rev
378 p = self.parents(self.node(revs[0]))[0]
379 p = self.parents(self.node(revs[0]))[0]
379 revs.insert(0, self.rev(p))
380 revs.insert(0, self.rev(p))
380
381
381 # for each delta that isn't contiguous in the log, we need to
382 # for each delta that isn't contiguous in the log, we need to
382 # reconstruct the base, reconstruct the result, and then
383 # reconstruct the base, reconstruct the result, and then
383 # calculate the delta. We also need to do this where we've
384 # calculate the delta. We also need to do this where we've
384 # stored a full version and not a delta
385 # stored a full version and not a delta
385 for i in xrange(0, len(revs) - 1):
386 for i in xrange(0, len(revs) - 1):
386 a, b = revs[i], revs[i + 1]
387 a, b = revs[i], revs[i + 1]
387 if a + 1 != b or self.base(b) == b:
388 if a + 1 != b or self.base(b) == b:
388 for j in xrange(self.base(a), a + 1):
389 for j in xrange(self.base(a), a + 1):
389 needed[j] = 1
390 needed[j] = 1
390 for j in xrange(self.base(b), b + 1):
391 for j in xrange(self.base(b), b + 1):
391 needed[j] = 1
392 needed[j] = 1
392
393
393 # calculate spans to retrieve from datafile
394 # calculate spans to retrieve from datafile
394 needed = needed.keys()
395 needed = needed.keys()
395 needed.sort()
396 needed.sort()
396 spans = []
397 spans = []
397 oo = -1
398 oo = -1
398 ol = 0
399 ol = 0
399 for n in needed:
400 for n in needed:
400 if n < 0: continue
401 if n < 0: continue
401 o = self.start(n)
402 o = self.start(n)
402 l = self.length(n)
403 l = self.length(n)
403 if oo + ol == o: # can we merge with the previous?
404 if oo + ol == o: # can we merge with the previous?
404 nl = spans[-1][2]
405 nl = spans[-1][2]
405 nl.append((n, l))
406 nl.append((n, l))
406 ol += l
407 ol += l
407 spans[-1] = (oo, ol, nl)
408 spans[-1] = (oo, ol, nl)
408 else:
409 else:
409 oo = o
410 oo = o
410 ol = l
411 ol = l
411 spans.append((oo, ol, [(n, l)]))
412 spans.append((oo, ol, [(n, l)]))
412
413
413 # read spans in, divide up chunks
414 # read spans in, divide up chunks
414 chunks = {}
415 chunks = {}
415 for span in spans:
416 for span in spans:
416 # we reopen the file for each span to make http happy for now
417 # we reopen the file for each span to make http happy for now
417 f = self.opener(self.datafile)
418 f = self.opener(self.datafile)
418 f.seek(span[0])
419 f.seek(span[0])
419 data = f.read(span[1])
420 data = f.read(span[1])
420
421
421 # divide up the span
422 # divide up the span
422 pos = 0
423 pos = 0
423 for r, l in span[2]:
424 for r, l in span[2]:
424 chunks[r] = decompress(data[pos: pos + l])
425 chunks[r] = decompress(data[pos: pos + l])
425 pos += l
426 pos += l
426
427
427 # helper to reconstruct intermediate versions
428 # helper to reconstruct intermediate versions
428 def construct(text, base, rev):
429 def construct(text, base, rev):
429 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
430 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
430 return mdiff.patches(text, bins)
431 return mdiff.patches(text, bins)
431
432
432 # build deltas
433 # build deltas
433 deltas = []
434 deltas = []
434 for d in xrange(0, len(revs) - 1):
435 for d in xrange(0, len(revs) - 1):
435 a, b = revs[d], revs[d + 1]
436 a, b = revs[d], revs[d + 1]
436 n = self.node(b)
437 n = self.node(b)
437
438
438 # do we need to construct a new delta?
439 # do we need to construct a new delta?
439 if a + 1 != b or self.base(b) == b:
440 if a + 1 != b or self.base(b) == b:
440 if a >= 0:
441 if a >= 0:
441 base = self.base(a)
442 base = self.base(a)
442 ta = chunks[self.base(a)]
443 ta = chunks[self.base(a)]
443 ta = construct(ta, base, a)
444 ta = construct(ta, base, a)
444 else:
445 else:
445 ta = ""
446 ta = ""
446
447
447 base = self.base(b)
448 base = self.base(b)
448 if a > base:
449 if a > base:
449 base = a
450 base = a
450 tb = ta
451 tb = ta
451 else:
452 else:
452 tb = chunks[self.base(b)]
453 tb = chunks[self.base(b)]
453 tb = construct(tb, base, b)
454 tb = construct(tb, base, b)
454 d = self.diff(ta, tb)
455 d = self.diff(ta, tb)
455 else:
456 else:
456 d = chunks[b]
457 d = chunks[b]
457
458
458 p = self.parents(n)
459 p = self.parents(n)
459 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
460 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
460 l = struct.pack(">l", len(meta) + len(d) + 4)
461 l = struct.pack(">l", len(meta) + len(d) + 4)
461 yield l
462 yield l
462 yield meta
463 yield meta
463 yield d
464 yield d
464
465
465 yield struct.pack(">l", 0)
466 yield struct.pack(">l", 0)
466
467
467 def addgroup(self, revs, linkmapper, transaction, unique = 0):
468 def addgroup(self, revs, linkmapper, transaction, unique = 0):
468 # given a set of deltas, add them to the revision log. the
469 # given a set of deltas, add them to the revision log. the
469 # first delta is against its parent, which should be in our
470 # first delta is against its parent, which should be in our
470 # log, the rest are against the previous delta.
471 # log, the rest are against the previous delta.
471
472
472 # track the base of the current delta log
473 # track the base of the current delta log
473 r = self.count()
474 r = self.count()
474 t = r - 1
475 t = r - 1
475 node = nullid
476 node = nullid
476
477
477 base = prev = -1
478 base = prev = -1
478 start = end = 0
479 start = end = 0
479 if r:
480 if r:
480 start = self.start(self.base(t))
481 start = self.start(self.base(t))
481 end = self.end(t)
482 end = self.end(t)
482 measure = self.length(self.base(t))
483 measure = self.length(self.base(t))
483 base = self.base(t)
484 base = self.base(t)
484 prev = self.tip()
485 prev = self.tip()
485
486
486 transaction.add(self.datafile, end)
487 transaction.add(self.datafile, end)
487 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
488 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
488 dfh = self.opener(self.datafile, "a")
489 dfh = self.opener(self.datafile, "a")
489 ifh = self.opener(self.indexfile, "a")
490 ifh = self.opener(self.indexfile, "a")
490
491
491 # loop through our set of deltas
492 # loop through our set of deltas
492 chain = None
493 chain = None
493 for chunk in revs:
494 for chunk in revs:
494 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
495 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
495 link = linkmapper(cs)
496 link = linkmapper(cs)
496 if node in self.nodemap:
497 if node in self.nodemap:
497 # this can happen if two branches make the same change
498 # this can happen if two branches make the same change
498 if unique:
499 if unique:
499 raise "already have %s" % hex(node[:4])
500 raise "already have %s" % hex(node[:4])
500 continue
501 continue
501 delta = chunk[80:]
502 delta = chunk[80:]
502
503
503 if not chain:
504 if not chain:
504 # retrieve the parent revision of the delta chain
505 # retrieve the parent revision of the delta chain
505 chain = p1
506 chain = p1
506 if not chain in self.nodemap:
507 if not chain in self.nodemap:
507 raise "unknown base %s" % short(chain[:4])
508 raise "unknown base %s" % short(chain[:4])
508
509
509 # full versions are inserted when the needed deltas become
510 # full versions are inserted when the needed deltas become
510 # comparable to the uncompressed text or when the previous
511 # comparable to the uncompressed text or when the previous
511 # version is not the one we have a delta against. We use
512 # version is not the one we have a delta against. We use
512 # the size of the previous full rev as a proxy for the
513 # the size of the previous full rev as a proxy for the
513 # current size.
514 # current size.
514
515
515 if chain == prev:
516 if chain == prev:
516 cdelta = compress(delta)
517 cdelta = compress(delta)
517
518
518 if chain != prev or (end - start + len(cdelta)) > measure * 2:
519 if chain != prev or (end - start + len(cdelta)) > measure * 2:
519 # flush our writes here so we can read it in revision
520 # flush our writes here so we can read it in revision
520 dfh.flush()
521 dfh.flush()
521 ifh.flush()
522 ifh.flush()
522 text = self.revision(chain)
523 text = self.revision(chain)
523 text = self.patches(text, [delta])
524 text = self.patches(text, [delta])
524 chk = self.addrevision(text, transaction, link, p1, p2)
525 chk = self.addrevision(text, transaction, link, p1, p2)
525 if chk != node:
526 if chk != node:
526 raise "consistency error adding group"
527 raise "consistency error adding group"
527 measure = len(text)
528 measure = len(text)
528 else:
529 else:
529 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
530 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
530 self.index.append(e)
531 self.index.append(e)
531 self.nodemap[node] = r
532 self.nodemap[node] = r
532 dfh.write(cdelta)
533 dfh.write(cdelta)
533 ifh.write(struct.pack(indexformat, *e))
534 ifh.write(struct.pack(indexformat, *e))
534
535
535 t, r, chain, prev = r, r + 1, node, node
536 t, r, chain, prev = r, r + 1, node, node
536 start = self.start(self.base(t))
537 start = self.start(self.base(t))
537 end = self.end(t)
538 end = self.end(t)
538
539
539 dfh.close()
540 dfh.close()
540 ifh.close()
541 ifh.close()
541 return node
542 return node
General Comments 0
You need to be logged in to leave comments. Login now