##// END OF EJS Templates
Merge with upstream
Thomas Arendsen Hein -
r1538:482b4efd merge default
parent child Browse files
Show More
@@ -2429,14 +2429,7 b' def parse(ui, args):'
2429 cmd, args = args[0], args[1:]
2429 cmd, args = args[0], args[1:]
2430 defaults = ui.config("defaults", cmd)
2430 defaults = ui.config("defaults", cmd)
2431 if defaults:
2431 if defaults:
2432 # reparse with command defaults added
2432 args = defaults.split() + args
2433 args = [cmd] + defaults.split() + args
2434 try:
2435 args = fancyopts.fancyopts(args, globalopts, options)
2436 except fancyopts.getopt.GetoptError, inst:
2437 raise ParseError(None, inst)
2438
2439 cmd, args = args[0], args[1:]
2440
2433
2441 aliases, i = find(cmd)
2434 aliases, i = find(cmd)
2442 cmd = aliases[0]
2435 cmd = aliases[0]
@@ -101,16 +101,15 b' class dirstate:'
101 try:
101 try:
102 return self.map[key]
102 return self.map[key]
103 except TypeError:
103 except TypeError:
104 self.read()
104 self.lazyread()
105 return self[key]
105 return self[key]
106
106
107 def __contains__(self, key):
107 def __contains__(self, key):
108 if not self.map: self.read()
108 self.lazyread()
109 return key in self.map
109 return key in self.map
110
110
111 def parents(self):
111 def parents(self):
112 if not self.pl:
112 self.lazyread()
113 self.read()
114 return self.pl
113 return self.pl
115
114
116 def markdirty(self):
115 def markdirty(self):
@@ -118,8 +117,7 b' class dirstate:'
118 self.dirty = 1
117 self.dirty = 1
119
118
120 def setparents(self, p1, p2=nullid):
119 def setparents(self, p1, p2=nullid):
121 if not self.pl:
120 self.lazyread()
122 self.read()
123 self.markdirty()
121 self.markdirty()
124 self.pl = p1, p2
122 self.pl = p1, p2
125
123
@@ -129,9 +127,11 b' class dirstate:'
129 except KeyError:
127 except KeyError:
130 return "?"
128 return "?"
131
129
130 def lazyread(self):
131 if self.map is None:
132 self.read()
133
132 def read(self):
134 def read(self):
133 if self.map is not None: return self.map
134
135 self.map = {}
135 self.map = {}
136 self.pl = [nullid, nullid]
136 self.pl = [nullid, nullid]
137 try:
137 try:
@@ -154,7 +154,7 b' class dirstate:'
154 pos += l
154 pos += l
155
155
156 def copy(self, source, dest):
156 def copy(self, source, dest):
157 self.read()
157 self.lazyread()
158 self.markdirty()
158 self.markdirty()
159 self.copies[dest] = source
159 self.copies[dest] = source
160
160
@@ -169,7 +169,7 b' class dirstate:'
169 a marked for addition'''
169 a marked for addition'''
170
170
171 if not files: return
171 if not files: return
172 self.read()
172 self.lazyread()
173 self.markdirty()
173 self.markdirty()
174 for f in files:
174 for f in files:
175 if state == "r":
175 if state == "r":
@@ -184,7 +184,7 b' class dirstate:'
184
184
185 def forget(self, files):
185 def forget(self, files):
186 if not files: return
186 if not files: return
187 self.read()
187 self.lazyread()
188 self.markdirty()
188 self.markdirty()
189 for f in files:
189 for f in files:
190 try:
190 try:
@@ -198,7 +198,7 b' class dirstate:'
198 self.markdirty()
198 self.markdirty()
199
199
200 def write(self):
200 def write(self):
201 st = self.opener("dirstate", "w")
201 st = self.opener("dirstate", "w", atomic=True)
202 st.write("".join(self.pl))
202 st.write("".join(self.pl))
203 for f, e in self.map.items():
203 for f, e in self.map.items():
204 c = self.copied(f)
204 c = self.copied(f)
@@ -241,7 +241,7 b' class dirstate:'
241 bs += 1
241 bs += 1
242 return ret
242 return ret
243
243
244 def supported_type(self, f, st, verbose=True):
244 def supported_type(self, f, st, verbose=False):
245 if stat.S_ISREG(st.st_mode):
245 if stat.S_ISREG(st.st_mode):
246 return True
246 return True
247 if verbose:
247 if verbose:
@@ -258,7 +258,7 b' class dirstate:'
258 return False
258 return False
259
259
260 def statwalk(self, files=None, match=util.always, dc=None):
260 def statwalk(self, files=None, match=util.always, dc=None):
261 self.read()
261 self.lazyread()
262
262
263 # walk all files by default
263 # walk all files by default
264 if not files:
264 if not files:
@@ -352,7 +352,7 b' class dirstate:'
352 continue
352 continue
353 self.blockignore = True
353 self.blockignore = True
354 if statmatch(ff, st):
354 if statmatch(ff, st):
355 if self.supported_type(ff, st):
355 if self.supported_type(ff, st, verbose=True):
356 yield 'f', ff, st
356 yield 'f', ff, st
357 elif ff in dc:
357 elif ff in dc:
358 yield 'm', ff, st
358 yield 'm', ff, st
@@ -232,13 +232,13 b' class localrepository:'
232 return False
232 return False
233
233
234 def undo(self):
234 def undo(self):
235 wlock = self.wlock()
235 lock = self.lock()
236 lock = self.lock()
236 if os.path.exists(self.join("undo")):
237 if os.path.exists(self.join("undo")):
237 self.ui.status(_("rolling back last transaction\n"))
238 self.ui.status(_("rolling back last transaction\n"))
238 transaction.rollback(self.opener, self.join("undo"))
239 transaction.rollback(self.opener, self.join("undo"))
239 self.dirstate = None
240 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
240 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
241 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
241 self.dirstate.read()
242 else:
242 else:
243 self.ui.warn(_("no undo information available\n"))
243 self.ui.warn(_("no undo information available\n"))
244
244
@@ -251,6 +251,17 b' class localrepository:'
251 return lock.lock(self.join("lock"), wait)
251 return lock.lock(self.join("lock"), wait)
252 raise inst
252 raise inst
253
253
254 def wlock(self, wait=1):
255 try:
256 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
257 except lock.LockHeld, inst:
258 if not wait:
259 raise inst
260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
261 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
262 self.dirstate.read()
263 return wlock
264
254 def rawcommit(self, files, text, user, date, p1=None, p2=None):
265 def rawcommit(self, files, text, user, date, p1=None, p2=None):
255 orig_parent = self.dirstate.parents()[0] or nullid
266 orig_parent = self.dirstate.parents()[0] or nullid
256 p1 = p1 or self.dirstate.parents()[0] or nullid
267 p1 = p1 or self.dirstate.parents()[0] or nullid
@@ -267,6 +278,8 b' class localrepository:'
267 else:
278 else:
268 update_dirstate = 0
279 update_dirstate = 0
269
280
281 wlock = self.wlock()
282 lock = self.lock()
270 tr = self.transaction()
283 tr = self.transaction()
271 mm = m1.copy()
284 mm = m1.copy()
272 mfm = mf1.copy()
285 mfm = mf1.copy()
@@ -355,6 +368,7 b' class localrepository:'
355 if not self.hook("precommit"):
368 if not self.hook("precommit"):
356 return None
369 return None
357
370
371 wlock = self.wlock()
358 lock = self.lock()
372 lock = self.lock()
359 tr = self.transaction()
373 tr = self.transaction()
360
374
@@ -472,6 +486,10 b' class localrepository:'
472
486
473 # are we comparing the working directory?
487 # are we comparing the working directory?
474 if not node2:
488 if not node2:
489 try:
490 wlock = self.wlock(wait=0)
491 except lock.LockHeld:
492 wlock = None
475 l, c, a, d, u = self.dirstate.changes(files, match)
493 l, c, a, d, u = self.dirstate.changes(files, match)
476
494
477 # are we comparing working dir against its parent?
495 # are we comparing working dir against its parent?
@@ -483,6 +501,8 b' class localrepository:'
483 for f in l:
501 for f in l:
484 if fcmp(f, mf2):
502 if fcmp(f, mf2):
485 c.append(f)
503 c.append(f)
504 elif wlock is not None:
505 self.dirstate.update([f], "n")
486
506
487 for l in c, a, d, u:
507 for l in c, a, d, u:
488 l.sort()
508 l.sort()
@@ -526,6 +546,7 b' class localrepository:'
526 return (c, a, d, u)
546 return (c, a, d, u)
527
547
528 def add(self, list):
548 def add(self, list):
549 wlock = self.wlock()
529 for f in list:
550 for f in list:
530 p = self.wjoin(f)
551 p = self.wjoin(f)
531 if not os.path.exists(p):
552 if not os.path.exists(p):
@@ -538,6 +559,7 b' class localrepository:'
538 self.dirstate.update([f], "a")
559 self.dirstate.update([f], "a")
539
560
540 def forget(self, list):
561 def forget(self, list):
562 wlock = self.wlock()
541 for f in list:
563 for f in list:
542 if self.dirstate.state(f) not in 'ai':
564 if self.dirstate.state(f) not in 'ai':
543 self.ui.warn(_("%s not added!\n") % f)
565 self.ui.warn(_("%s not added!\n") % f)
@@ -551,6 +573,7 b' class localrepository:'
551 util.unlink(self.wjoin(f))
573 util.unlink(self.wjoin(f))
552 except OSError, inst:
574 except OSError, inst:
553 if inst.errno != errno.ENOENT: raise
575 if inst.errno != errno.ENOENT: raise
576 wlock = self.wlock()
554 for f in list:
577 for f in list:
555 p = self.wjoin(f)
578 p = self.wjoin(f)
556 if os.path.exists(p):
579 if os.path.exists(p):
@@ -568,6 +591,7 b' class localrepository:'
568 mn = self.changelog.read(p)[0]
591 mn = self.changelog.read(p)[0]
569 mf = self.manifest.readflags(mn)
592 mf = self.manifest.readflags(mn)
570 m = self.manifest.read(mn)
593 m = self.manifest.read(mn)
594 wlock = self.wlock()
571 for f in list:
595 for f in list:
572 if self.dirstate.state(f) not in "r":
596 if self.dirstate.state(f) not in "r":
573 self.ui.warn("%s not removed!\n" % f)
597 self.ui.warn("%s not removed!\n" % f)
@@ -584,6 +608,7 b' class localrepository:'
584 elif not os.path.isfile(p):
608 elif not os.path.isfile(p):
585 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
609 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
586 else:
610 else:
611 wlock = self.wlock()
587 if self.dirstate.state(dest) == '?':
612 if self.dirstate.state(dest) == '?':
588 self.dirstate.update([dest], "a")
613 self.dirstate.update([dest], "a")
589 self.dirstate.copy(source, dest)
614 self.dirstate.copy(source, dest)
@@ -1374,6 +1399,9 b' class localrepository:'
1374 mw[f] = ""
1399 mw[f] = ""
1375 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1400 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1376
1401
1402 if moddirstate:
1403 wlock = self.wlock()
1404
1377 for f in d:
1405 for f in d:
1378 if f in mw: del mw[f]
1406 if f in mw: del mw[f]
1379
1407
@@ -12,10 +12,11 b' class LockHeld(Exception):'
12 pass
12 pass
13
13
14 class lock:
14 class lock:
15 def __init__(self, file, wait=1):
15 def __init__(self, file, wait=1, releasefn=None):
16 self.f = file
16 self.f = file
17 self.held = 0
17 self.held = 0
18 self.wait = wait
18 self.wait = wait
19 self.releasefn = releasefn
19 self.lock()
20 self.lock()
20
21
21 def __del__(self):
22 def __del__(self):
@@ -43,6 +44,8 b' class lock:'
43 def release(self):
44 def release(self):
44 if self.held:
45 if self.held:
45 self.held = 0
46 self.held = 0
47 if self.releasefn:
48 self.releasefn()
46 try:
49 try:
47 os.unlink(self.f)
50 os.unlink(self.f)
48 except: pass
51 except: pass
@@ -9,13 +9,12 b' import sys, struct'
9 from revlog import *
9 from revlog import *
10 from i18n import gettext as _
10 from i18n import gettext as _
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "bisect")
12 demandload(globals(), "bisect array")
13
13
14 class manifest(revlog):
14 class manifest(revlog):
15 def __init__(self, opener):
15 def __init__(self, opener):
16 self.mapcache = None
16 self.mapcache = None
17 self.listcache = None
17 self.listcache = None
18 self.addlist = None
19 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
20
19
21 def read(self, node):
20 def read(self, node):
@@ -25,8 +24,9 b' class manifest(revlog):'
25 text = self.revision(node)
24 text = self.revision(node)
26 map = {}
25 map = {}
27 flag = {}
26 flag = {}
28 self.listcache = (text, text.splitlines(1))
27 self.listcache = array.array('c', text)
29 for l in self.listcache[1]:
28 lines = text.splitlines(1)
29 for l in lines:
30 (f, n) = l.split('\0')
30 (f, n) = l.split('\0')
31 map[f] = bin(n[:40])
31 map[f] = bin(n[:40])
32 flag[f] = (n[40:-1] == "x")
32 flag[f] = (n[40:-1] == "x")
@@ -39,57 +39,67 b' class manifest(revlog):'
39 self.read(node)
39 self.read(node)
40 return self.mapcache[2]
40 return self.mapcache[2]
41
41
42 def diff(self, a, b):
43 return mdiff.textdiff(str(a), str(b))
44
42 def add(self, map, flags, transaction, link, p1=None, p2=None,
45 def add(self, map, flags, transaction, link, p1=None, p2=None,
43 changed=None):
46 changed=None):
44 # directly generate the mdiff delta from the data collected during
47
45 # the bisect loop below
48 # returns a tuple (start, end). If the string is found
46 def gendelta(delta):
49 # m[start:end] are the line containing that string. If start == end
47 i = 0
50 # the string was not found and they indicate the proper sorted
48 result = []
51 # insertion point. This was taken from bisect_left, and modified
49 while i < len(delta):
52 # to find line start/end as it goes along.
50 start = delta[i][2]
53 #
51 end = delta[i][3]
54 # m should be a buffer or a string
52 l = delta[i][4]
55 # s is a string
53 if l == None:
56 #
54 l = ""
57 def manifestsearch(m, s, lo=0, hi=None):
55 while i < len(delta) - 1 and start <= delta[i+1][2] \
58 def advance(i, c):
56 and end >= delta[i+1][2]:
59 while i < lenm and m[i] != c:
57 if delta[i+1][3] > end:
58 end = delta[i+1][3]
59 if delta[i+1][4]:
60 l += delta[i+1][4]
61 i += 1
60 i += 1
62 result.append(struct.pack(">lll", start, end, len(l)) + l)
61 return i
63 i += 1
62 lenm = len(m)
64 return result
63 if not hi:
64 hi = lenm
65 while lo < hi:
66 mid = (lo + hi) // 2
67 start = mid
68 while start > 0 and m[start-1] != '\n':
69 start -= 1
70 end = advance(start, '\0')
71 if m[start:end] < s:
72 # we know that after the null there are 40 bytes of sha1
73 # this translates to the bisect lo = mid + 1
74 lo = advance(end + 40, '\n') + 1
75 else:
76 # this translates to the bisect hi = mid
77 hi = start
78 end = advance(lo, '\0')
79 found = m[lo:end]
80 if cmp(s, found) == 0:
81 # we know that after the null there are 40 bytes of sha1
82 end = advance(end + 40, '\n')
83 return (lo, end+1)
84 else:
85 return (lo, lo)
65
86
66 # apply the changes collected during the bisect loop to our addlist
87 # apply the changes collected during the bisect loop to our addlist
67 def addlistdelta(addlist, delta):
88 # return a delta suitable for addrevision
68 # apply the deltas to the addlist. start from the bottom up
89 def addlistdelta(addlist, x):
90 # start from the bottom up
69 # so changes to the offsets don't mess things up.
91 # so changes to the offsets don't mess things up.
70 i = len(delta)
92 i = len(x)
71 while i > 0:
93 while i > 0:
72 i -= 1
94 i -= 1
73 start = delta[i][0]
95 start = x[i][0]
74 end = delta[i][1]
96 end = x[i][1]
75 if delta[i][4]:
97 if x[i][2]:
76 addlist[start:end] = [delta[i][4]]
98 addlist[start:end] = array.array('c', x[i][2])
77 else:
99 else:
78 del addlist[start:end]
100 del addlist[start:end]
79 return addlist
101 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
80
102 for d in x ])
81 # calculate the byte offset of the start of each line in the
82 # manifest
83 def calcoffsets(addlist):
84 offsets = [0] * (len(addlist) + 1)
85 offset = 0
86 i = 0
87 while i < len(addlist):
88 offsets[i] = offset
89 offset += len(addlist[i])
90 i += 1
91 offsets[i] = offset
92 return offsets
93
103
94 # if we're using the listcache, make sure it is valid and
104 # if we're using the listcache, make sure it is valid and
95 # parented by the same node we're diffing against
105 # parented by the same node we're diffing against
@@ -98,15 +108,13 b' class manifest(revlog):'
98 files = map.keys()
108 files = map.keys()
99 files.sort()
109 files.sort()
100
110
101 self.addlist = ["%s\000%s%s\n" %
111 text = ["%s\000%s%s\n" %
102 (f, hex(map[f]), flags[f] and "x" or '')
112 (f, hex(map[f]), flags[f] and "x" or '')
103 for f in files]
113 for f in files]
114 self.listcache = array.array('c', "".join(text))
104 cachedelta = None
115 cachedelta = None
105 else:
116 else:
106 addlist = self.listcache[1]
117 addlist = self.listcache
107
108 # find the starting offset for each line in the add list
109 offsets = calcoffsets(addlist)
110
118
111 # combine the changed lists into one list for sorting
119 # combine the changed lists into one list for sorting
112 work = [[x, 0] for x in changed[0]]
120 work = [[x, 0] for x in changed[0]]
@@ -114,45 +122,52 b' class manifest(revlog):'
114 work.sort()
122 work.sort()
115
123
116 delta = []
124 delta = []
117 bs = 0
125 dstart = None
126 dend = None
127 dline = [""]
128 start = 0
129 # zero copy representation of addlist as a buffer
130 addbuf = buffer(addlist)
118
131
132 # start with a readonly loop that finds the offset of
133 # each line and creates the deltas
119 for w in work:
134 for w in work:
120 f = w[0]
135 f = w[0]
121 # bs will either be the index of the item or the insert point
136 # bs will either be the index of the item or the insert point
122 bs = bisect.bisect(addlist, f, bs)
137 start, end = manifestsearch(addbuf, f, start)
123 if bs < len(addlist):
124 fn = addlist[bs][:addlist[bs].index('\0')]
125 else:
126 fn = None
127 if w[1] == 0:
138 if w[1] == 0:
128 l = "%s\000%s%s\n" % (f, hex(map[f]),
139 l = "%s\000%s%s\n" % (f, hex(map[f]),
129 flags[f] and "x" or '')
140 flags[f] and "x" or '')
130 else:
141 else:
131 l = None
142 l = ""
132 start = bs
143 if start == end and w[1] == 1:
133 if fn != f:
144 # item we want to delete was not found, error out
134 # item not found, insert a new one
145 raise AssertionError(
135 end = bs
136 if w[1] == 1:
137 raise AssertionError(
138 _("failed to remove %s from manifest\n") % f)
146 _("failed to remove %s from manifest\n") % f)
147 if dstart != None and dstart <= start and dend >= start:
148 if dend < end:
149 dend = end
150 if l:
151 dline.append(l)
139 else:
152 else:
140 # item is found, replace/delete the existing line
153 if dstart != None:
141 end = bs + 1
154 delta.append([dstart, dend, "".join(dline)])
142 delta.append([start, end, offsets[start], offsets[end], l])
155 dstart = start
156 dend = end
157 dline = [l]
143
158
144 self.addlist = addlistdelta(addlist, delta)
159 if dstart != None:
145 if self.mapcache[0] == self.tip():
160 delta.append([dstart, dend, "".join(dline)])
146 cachedelta = "".join(gendelta(delta))
161 # apply the delta to the addlist, and get a delta for addrevision
147 else:
162 cachedelta = addlistdelta(addlist, delta)
148 cachedelta = None
149
163
150 text = "".join(self.addlist)
164 # the delta is only valid if we've been processing the tip revision
151 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
165 if self.mapcache[0] != self.tip():
152 raise AssertionError(_("manifest delta failure\n"))
166 cachedelta = None
153 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
167 self.listcache = addlist
168
169 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
170 p2, cachedelta)
154 self.mapcache = (n, map, flags)
171 self.mapcache = (n, map, flags)
155 self.listcache = (text, self.addlist)
156 self.addlist = None
157
172
158 return n
173 return n
@@ -31,15 +31,15 b' def hash(text, p1, p2):'
31
31
32 def compress(text):
32 def compress(text):
33 """ generate a possibly-compressed representation of text """
33 """ generate a possibly-compressed representation of text """
34 if not text: return text
34 if not text: return ("", text)
35 if len(text) < 44:
35 if len(text) < 44:
36 if text[0] == '\0': return text
36 if text[0] == '\0': return ("", text)
37 return 'u' + text
37 return ('u', text)
38 bin = zlib.compress(text)
38 bin = zlib.compress(text)
39 if len(bin) > len(text):
39 if len(bin) > len(text):
40 if text[0] == '\0': return text
40 if text[0] == '\0': return ("", text)
41 return 'u' + text
41 return ('u', text)
42 return bin
42 return ("", bin)
43
43
44 def decompress(bin):
44 def decompress(bin):
45 """ decompress the given input """
45 """ decompress the given input """
@@ -71,6 +71,9 b' class lazyparser:'
71 self.all = 0
71 self.all = 0
72 self.revlog = revlog
72 self.revlog = revlog
73
73
74 def trunc(self, pos):
75 self.l = pos/self.s
76
74 def load(self, pos=None):
77 def load(self, pos=None):
75 if self.all: return
78 if self.all: return
76 if pos is not None:
79 if pos is not None:
@@ -104,8 +107,12 b' class lazyindex:'
104 return self.p.index[pos]
107 return self.p.index[pos]
105 def __getitem__(self, pos):
108 def __getitem__(self, pos):
106 return self.p.index[pos] or self.load(pos)
109 return self.p.index[pos] or self.load(pos)
110 def __delitem__(self, pos):
111 del self.p.index[pos]
107 def append(self, e):
112 def append(self, e):
108 self.p.index.append(e)
113 self.p.index.append(e)
114 def trunc(self, pos):
115 self.p.trunc(pos)
109
116
110 class lazymap:
117 class lazymap:
111 """a lazy version of the node map"""
118 """a lazy version of the node map"""
@@ -140,6 +147,8 b' class lazymap:'
140 raise KeyError("node " + hex(key))
147 raise KeyError("node " + hex(key))
141 def __setitem__(self, key, val):
148 def __setitem__(self, key, val):
142 self.p.map[key] = val
149 self.p.map[key] = val
150 def __delitem__(self, key):
151 del self.p.map[key]
143
152
144 class RevlogError(Exception): pass
153 class RevlogError(Exception): pass
145
154
@@ -543,14 +552,16 b' class revlog:'
543 end = self.end(t)
552 end = self.end(t)
544 if not d:
553 if not d:
545 prev = self.revision(self.tip())
554 prev = self.revision(self.tip())
546 d = self.diff(prev, text)
555 d = self.diff(prev, str(text))
547 data = compress(d)
556 data = compress(d)
548 dist = end - start + len(data)
557 l = len(data[1]) + len(data[0])
558 dist = end - start + l
549
559
550 # full versions are inserted when the needed deltas
560 # full versions are inserted when the needed deltas
551 # become comparable to the uncompressed text
561 # become comparable to the uncompressed text
552 if not n or dist > len(text) * 2:
562 if not n or dist > len(text) * 2:
553 data = compress(text)
563 data = compress(text)
564 l = len(data[1]) + len(data[0])
554 base = n
565 base = n
555 else:
566 else:
556 base = self.base(t)
567 base = self.base(t)
@@ -559,14 +570,17 b' class revlog:'
559 if t >= 0:
570 if t >= 0:
560 offset = self.end(t)
571 offset = self.end(t)
561
572
562 e = (offset, len(data), base, link, p1, p2, node)
573 e = (offset, l, base, link, p1, p2, node)
563
574
564 self.index.append(e)
575 self.index.append(e)
565 self.nodemap[node] = n
576 self.nodemap[node] = n
566 entry = struct.pack(indexformat, *e)
577 entry = struct.pack(indexformat, *e)
567
578
568 transaction.add(self.datafile, e[0])
579 transaction.add(self.datafile, e[0])
569 self.opener(self.datafile, "a").write(data)
580 f = self.opener(self.datafile, "a")
581 if data[0]:
582 f.write(data[0])
583 f.write(data[1])
570 transaction.add(self.indexfile, n * len(entry))
584 transaction.add(self.indexfile, n * len(entry))
571 self.opener(self.indexfile, "a").write(entry)
585 self.opener(self.indexfile, "a").write(entry)
572
586
@@ -801,7 +815,8 b' class revlog:'
801 # current size.
815 # current size.
802
816
803 if chain == prev:
817 if chain == prev:
804 cdelta = compress(delta)
818 tempd = compress(delta)
819 cdelta = tempd[0] + tempd[1]
805
820
806 if chain != prev or (end - start + len(cdelta)) > measure * 2:
821 if chain != prev or (end - start + len(cdelta)) > measure * 2:
807 # flush our writes here so we can read it in revision
822 # flush our writes here so we can read it in revision
@@ -828,6 +843,36 b' class revlog:'
828 ifh.close()
843 ifh.close()
829 return node
844 return node
830
845
846 def strip(self, rev, minlink):
847 if self.count() == 0 or rev >= self.count():
848 return
849
850 # When stripping away a revision, we need to make sure it
851 # does not actually belong to an older changeset.
852 # The minlink parameter defines the oldest revision
853 # we're allowed to strip away.
854 while minlink > self.index[rev][3]:
855 rev += 1
856 if rev >= self.count():
857 return
858
859 # first truncate the files on disk
860 end = self.start(rev)
861 self.opener(self.datafile, "a").truncate(end)
862 end = rev * struct.calcsize(indexformat)
863 self.opener(self.indexfile, "a").truncate(end)
864
865 # then reset internal state in memory to forget those revisions
866 self.cache = None
867 for p in self.index[rev:]:
868 del self.nodemap[p[6]]
869 del self.index[rev:]
870
871 # truncating the lazyindex also truncates the lazymap.
872 if isinstance(self.index, lazyindex):
873 self.index.trunc(end)
874
875
831 def checksize(self):
876 def checksize(self):
832 expected = 0
877 expected = 0
833 if self.count():
878 if self.count():
@@ -362,7 +362,36 b' def opener(base):'
362 remote file access from higher level code.
362 remote file access from higher level code.
363 """
363 """
364 p = base
364 p = base
365 def o(path, mode="r", text=False):
365
366 def mktempcopy(name):
367 d, fn = os.path.split(name)
368 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
369 fp = os.fdopen(fd, "wb")
370 try:
371 fp.write(file(name, "rb").read())
372 except:
373 try: os.unlink(temp)
374 except: pass
375 raise
376 fp.close()
377 st = os.lstat(name)
378 os.chmod(temp, st.st_mode)
379 return temp
380
381 class atomicfile(file):
382 """the file will only be copied on close"""
383 def __init__(self, name, mode, atomic=False):
384 self.__name = name
385 self.temp = mktempcopy(name)
386 file.__init__(self, self.temp, mode)
387 def close(self):
388 if not self.closed:
389 rename(self.temp, self.__name)
390 file.close(self)
391 def __del__(self):
392 self.close()
393
394 def o(path, mode="r", text=False, atomic=False):
366 f = os.path.join(p, path)
395 f = os.path.join(p, path)
367
396
368 if not text:
397 if not text:
@@ -376,21 +405,10 b' def opener(base):'
376 if not os.path.isdir(d):
405 if not os.path.isdir(d):
377 os.makedirs(d)
406 os.makedirs(d)
378 else:
407 else:
408 if atomic:
409 return atomicfile(f, mode)
379 if nlink > 1:
410 if nlink > 1:
380 d, fn = os.path.split(f)
411 rename(mktempcopy(f), f)
381 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
382 fp = os.fdopen(fd, "wb")
383 try:
384 fp.write(file(f, "rb").read())
385 except:
386 try: os.unlink(temp)
387 except: pass
388 raise
389 fp.close()
390 st = os.lstat(f)
391 os.chmod(temp, st.st_mode)
392 rename(temp, f)
393
394 return file(f, mode)
412 return file(f, mode)
395
413
396 return o
414 return o
@@ -1,15 +1,11 b''
1 bar: unsupported file type (type is symbolic link)
2 adding foo
1 adding foo
3 bar: unsupported file type (type is symbolic link)
4 bar: unsupported file type (type is symbolic link)
5 adding bomb
2 adding bomb
6 bar: unsupported file type (type is symbolic link)
7 adding a.c
3 adding a.c
8 adding dir/a.o
4 adding dir/a.o
9 adding dir/b.o
5 adding dir/b.o
10 a.c: unsupported file type (type is fifo)
11 dir/b.o: unsupported file type (type is symbolic link)
12 R a.c
6 R a.c
13 R dir/a.o
7 R dir/a.o
14 R dir/b.o
8 R dir/b.o
15 ? .hgignore
9 ? .hgignore
10 a.c: unsupported file type (type is fifo)
11 R a.c
General Comments 0
You need to be logged in to leave comments. Login now