Show More
@@ -1,1064 +1,1066 b'' | |||||
1 | """ |
|
1 | """ | |
2 | revlog.py - storage back-end for mercurial |
|
2 | revlog.py - storage back-end for mercurial | |
3 |
|
3 | |||
4 | This provides efficient delta storage with O(1) retrieve and append |
|
4 | This provides efficient delta storage with O(1) retrieve and append | |
5 | and O(changes) merge between branches |
|
5 | and O(changes) merge between branches | |
6 |
|
6 | |||
7 | Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
7 | Copyright 2005 Matt Mackall <mpm@selenic.com> | |
8 |
|
8 | |||
9 | This software may be used and distributed according to the terms |
|
9 | This software may be used and distributed according to the terms | |
10 | of the GNU General Public License, incorporated herein by reference. |
|
10 | of the GNU General Public License, incorporated herein by reference. | |
11 | """ |
|
11 | """ | |
12 |
|
12 | |||
13 | from node import * |
|
13 | from node import * | |
14 | from i18n import gettext as _ |
|
14 | from i18n import gettext as _ | |
15 | from demandload import demandload |
|
15 | from demandload import demandload | |
16 | demandload(globals(), "binascii changegroup errno heapq mdiff os") |
|
16 | demandload(globals(), "binascii changegroup errno heapq mdiff os") | |
17 | demandload(globals(), "sha struct zlib") |
|
17 | demandload(globals(), "sha struct zlib") | |
18 |
|
18 | |||
19 | # revlog version strings |
|
19 | # revlog version strings | |
20 | REVLOGV0 = 0 |
|
20 | REVLOGV0 = 0 | |
21 | REVLOGNG = 1 |
|
21 | REVLOGNG = 1 | |
22 |
|
22 | |||
23 | # revlog flags |
|
23 | # revlog flags | |
24 | REVLOGNGINLINEDATA = (1 << 16) |
|
24 | REVLOGNGINLINEDATA = (1 << 16) | |
25 |
|
25 | |||
26 | def flagstr(flag): |
|
26 | def flagstr(flag): | |
27 | if flag == "inline": |
|
27 | if flag == "inline": | |
28 | return REVLOGNGINLINEDATA |
|
28 | return REVLOGNGINLINEDATA | |
29 | raise RevlogError(_("unknown revlog flag %s" % flag)) |
|
29 | raise RevlogError(_("unknown revlog flag %s" % flag)) | |
30 |
|
30 | |||
31 | def hash(text, p1, p2): |
|
31 | def hash(text, p1, p2): | |
32 | """generate a hash from the given text and its parent hashes |
|
32 | """generate a hash from the given text and its parent hashes | |
33 |
|
33 | |||
34 | This hash combines both the current file contents and its history |
|
34 | This hash combines both the current file contents and its history | |
35 | in a manner that makes it easy to distinguish nodes with the same |
|
35 | in a manner that makes it easy to distinguish nodes with the same | |
36 | content in the revision graph. |
|
36 | content in the revision graph. | |
37 | """ |
|
37 | """ | |
38 | l = [p1, p2] |
|
38 | l = [p1, p2] | |
39 | l.sort() |
|
39 | l.sort() | |
40 | s = sha.new(l[0]) |
|
40 | s = sha.new(l[0]) | |
41 | s.update(l[1]) |
|
41 | s.update(l[1]) | |
42 | s.update(text) |
|
42 | s.update(text) | |
43 | return s.digest() |
|
43 | return s.digest() | |
44 |
|
44 | |||
45 | def compress(text): |
|
45 | def compress(text): | |
46 | """ generate a possibly-compressed representation of text """ |
|
46 | """ generate a possibly-compressed representation of text """ | |
47 | if not text: return ("", text) |
|
47 | if not text: return ("", text) | |
48 | if len(text) < 44: |
|
48 | if len(text) < 44: | |
49 | if text[0] == '\0': return ("", text) |
|
49 | if text[0] == '\0': return ("", text) | |
50 | return ('u', text) |
|
50 | return ('u', text) | |
51 | bin = zlib.compress(text) |
|
51 | bin = zlib.compress(text) | |
52 | if len(bin) > len(text): |
|
52 | if len(bin) > len(text): | |
53 | if text[0] == '\0': return ("", text) |
|
53 | if text[0] == '\0': return ("", text) | |
54 | return ('u', text) |
|
54 | return ('u', text) | |
55 | return ("", bin) |
|
55 | return ("", bin) | |
56 |
|
56 | |||
57 | def decompress(bin): |
|
57 | def decompress(bin): | |
58 | """ decompress the given input """ |
|
58 | """ decompress the given input """ | |
59 | if not bin: return bin |
|
59 | if not bin: return bin | |
60 | t = bin[0] |
|
60 | t = bin[0] | |
61 | if t == '\0': return bin |
|
61 | if t == '\0': return bin | |
62 | if t == 'x': return zlib.decompress(bin) |
|
62 | if t == 'x': return zlib.decompress(bin) | |
63 | if t == 'u': return bin[1:] |
|
63 | if t == 'u': return bin[1:] | |
64 | raise RevlogError(_("unknown compression type %r") % t) |
|
64 | raise RevlogError(_("unknown compression type %r") % t) | |
65 |
|
65 | |||
66 | indexformatv0 = ">4l20s20s20s" |
|
66 | indexformatv0 = ">4l20s20s20s" | |
67 | # index ng: |
|
67 | # index ng: | |
68 | # 6 bytes offset |
|
68 | # 6 bytes offset | |
69 | # 2 bytes flags |
|
69 | # 2 bytes flags | |
70 | # 4 bytes compressed length |
|
70 | # 4 bytes compressed length | |
71 | # 4 bytes uncompressed length |
|
71 | # 4 bytes uncompressed length | |
72 | # 4 bytes: base rev |
|
72 | # 4 bytes: base rev | |
73 | # 4 bytes link rev |
|
73 | # 4 bytes link rev | |
74 | # 4 bytes parent 1 rev |
|
74 | # 4 bytes parent 1 rev | |
75 | # 4 bytes parent 2 rev |
|
75 | # 4 bytes parent 2 rev | |
76 | # 32 bytes: nodeid |
|
76 | # 32 bytes: nodeid | |
77 | indexformatng = ">Qiiiiii20s12x" |
|
77 | indexformatng = ">Qiiiiii20s12x" | |
78 | versionformat = ">i" |
|
78 | versionformat = ">i" | |
79 |
|
79 | |||
80 | class lazyparser(object): |
|
80 | class lazyparser(object): | |
81 | """ |
|
81 | """ | |
82 | this class avoids the need to parse the entirety of large indices |
|
82 | this class avoids the need to parse the entirety of large indices | |
83 |
|
83 | |||
84 | By default we parse and load 1000 entries at a time. |
|
84 | By default we parse and load 1000 entries at a time. | |
85 |
|
85 | |||
86 | If no position is specified, we load the whole index, and replace |
|
86 | If no position is specified, we load the whole index, and replace | |
87 | the lazy objects in revlog with the underlying objects for |
|
87 | the lazy objects in revlog with the underlying objects for | |
88 | efficiency in cases where we look at most of the nodes. |
|
88 | efficiency in cases where we look at most of the nodes. | |
89 | """ |
|
89 | """ | |
90 | def __init__(self, data, revlog, indexformat): |
|
90 | def __init__(self, data, revlog, indexformat): | |
91 | self.data = data |
|
91 | self.data = data | |
92 | self.s = struct.calcsize(indexformat) |
|
92 | self.s = struct.calcsize(indexformat) | |
93 | self.indexformat = indexformat |
|
93 | self.indexformat = indexformat | |
94 | self.l = len(data)/self.s |
|
94 | self.l = len(data)/self.s | |
95 | self.index = [None] * self.l |
|
95 | self.index = [None] * self.l | |
96 | self.map = {nullid: -1} |
|
96 | self.map = {nullid: -1} | |
97 | self.all = 0 |
|
97 | self.all = 0 | |
98 | self.revlog = revlog |
|
98 | self.revlog = revlog | |
99 |
|
99 | |||
100 | def load(self, pos=None): |
|
100 | def load(self, pos=None): | |
101 | if self.all: return |
|
101 | if self.all: return | |
102 | if pos is not None: |
|
102 | if pos is not None: | |
103 | block = pos / 1000 |
|
103 | block = pos / 1000 | |
104 | i = block * 1000 |
|
104 | i = block * 1000 | |
105 | end = min(self.l, i + 1000) |
|
105 | end = min(self.l, i + 1000) | |
106 | else: |
|
106 | else: | |
107 | self.all = 1 |
|
107 | self.all = 1 | |
108 | i = 0 |
|
108 | i = 0 | |
109 | end = self.l |
|
109 | end = self.l | |
110 | self.revlog.index = self.index |
|
110 | self.revlog.index = self.index | |
111 | self.revlog.nodemap = self.map |
|
111 | self.revlog.nodemap = self.map | |
112 |
|
112 | |||
113 | while i < end: |
|
113 | while i < end: | |
114 | if not self.index[i]: |
|
114 | if not self.index[i]: | |
115 | d = self.data[i * self.s: (i + 1) * self.s] |
|
115 | d = self.data[i * self.s: (i + 1) * self.s] | |
116 | e = struct.unpack(self.indexformat, d) |
|
116 | e = struct.unpack(self.indexformat, d) | |
117 | self.index[i] = e |
|
117 | self.index[i] = e | |
118 | self.map[e[-1]] = i |
|
118 | self.map[e[-1]] = i | |
119 | i += 1 |
|
119 | i += 1 | |
120 |
|
120 | |||
121 | class lazyindex(object): |
|
121 | class lazyindex(object): | |
122 | """a lazy version of the index array""" |
|
122 | """a lazy version of the index array""" | |
123 | def __init__(self, parser): |
|
123 | def __init__(self, parser): | |
124 | self.p = parser |
|
124 | self.p = parser | |
125 | def __len__(self): |
|
125 | def __len__(self): | |
126 | return len(self.p.index) |
|
126 | return len(self.p.index) | |
127 | def load(self, pos): |
|
127 | def load(self, pos): | |
128 | if pos < 0: |
|
128 | if pos < 0: | |
129 | pos += len(self.p.index) |
|
129 | pos += len(self.p.index) | |
130 | self.p.load(pos) |
|
130 | self.p.load(pos) | |
131 | return self.p.index[pos] |
|
131 | return self.p.index[pos] | |
132 | def __getitem__(self, pos): |
|
132 | def __getitem__(self, pos): | |
133 | return self.p.index[pos] or self.load(pos) |
|
133 | return self.p.index[pos] or self.load(pos) | |
134 | def __setitem__(self, pos, item): |
|
134 | def __setitem__(self, pos, item): | |
135 | self.p.index[pos] = item |
|
135 | self.p.index[pos] = item | |
136 | def __delitem__(self, pos): |
|
136 | def __delitem__(self, pos): | |
137 | del self.p.index[pos] |
|
137 | del self.p.index[pos] | |
138 | def append(self, e): |
|
138 | def append(self, e): | |
139 | self.p.index.append(e) |
|
139 | self.p.index.append(e) | |
140 |
|
140 | |||
141 | class lazymap(object): |
|
141 | class lazymap(object): | |
142 | """a lazy version of the node map""" |
|
142 | """a lazy version of the node map""" | |
143 | def __init__(self, parser): |
|
143 | def __init__(self, parser): | |
144 | self.p = parser |
|
144 | self.p = parser | |
145 | def load(self, key): |
|
145 | def load(self, key): | |
146 | if self.p.all: return |
|
146 | if self.p.all: return | |
147 | n = self.p.data.find(key) |
|
147 | n = self.p.data.find(key) | |
148 | if n < 0: |
|
148 | if n < 0: | |
149 | raise KeyError(key) |
|
149 | raise KeyError(key) | |
150 | pos = n / self.p.s |
|
150 | pos = n / self.p.s | |
151 | self.p.load(pos) |
|
151 | self.p.load(pos) | |
152 | def __contains__(self, key): |
|
152 | def __contains__(self, key): | |
153 | self.p.load() |
|
153 | self.p.load() | |
154 | return key in self.p.map |
|
154 | return key in self.p.map | |
155 | def __iter__(self): |
|
155 | def __iter__(self): | |
156 | yield nullid |
|
156 | yield nullid | |
157 | for i in xrange(self.p.l): |
|
157 | for i in xrange(self.p.l): | |
158 | try: |
|
158 | try: | |
159 | yield self.p.index[i][-1] |
|
159 | yield self.p.index[i][-1] | |
160 | except: |
|
160 | except: | |
161 | self.p.load(i) |
|
161 | self.p.load(i) | |
162 | yield self.p.index[i][-1] |
|
162 | yield self.p.index[i][-1] | |
163 | def __getitem__(self, key): |
|
163 | def __getitem__(self, key): | |
164 | try: |
|
164 | try: | |
165 | return self.p.map[key] |
|
165 | return self.p.map[key] | |
166 | except KeyError: |
|
166 | except KeyError: | |
167 | try: |
|
167 | try: | |
168 | self.load(key) |
|
168 | self.load(key) | |
169 | return self.p.map[key] |
|
169 | return self.p.map[key] | |
170 | except KeyError: |
|
170 | except KeyError: | |
171 | raise KeyError("node " + hex(key)) |
|
171 | raise KeyError("node " + hex(key)) | |
172 | def __setitem__(self, key, val): |
|
172 | def __setitem__(self, key, val): | |
173 | self.p.map[key] = val |
|
173 | self.p.map[key] = val | |
174 | def __delitem__(self, key): |
|
174 | def __delitem__(self, key): | |
175 | del self.p.map[key] |
|
175 | del self.p.map[key] | |
176 |
|
176 | |||
177 | class RevlogError(Exception): pass |
|
177 | class RevlogError(Exception): pass | |
178 |
|
178 | |||
179 | class revlog(object): |
|
179 | class revlog(object): | |
180 | """ |
|
180 | """ | |
181 | the underlying revision storage object |
|
181 | the underlying revision storage object | |
182 |
|
182 | |||
183 | A revlog consists of two parts, an index and the revision data. |
|
183 | A revlog consists of two parts, an index and the revision data. | |
184 |
|
184 | |||
185 | The index is a file with a fixed record size containing |
|
185 | The index is a file with a fixed record size containing | |
186 | information on each revision, includings its nodeid (hash), the |
|
186 | information on each revision, includings its nodeid (hash), the | |
187 | nodeids of its parents, the position and offset of its data within |
|
187 | nodeids of its parents, the position and offset of its data within | |
188 | the data file, and the revision it's based on. Finally, each entry |
|
188 | the data file, and the revision it's based on. Finally, each entry | |
189 | contains a linkrev entry that can serve as a pointer to external |
|
189 | contains a linkrev entry that can serve as a pointer to external | |
190 | data. |
|
190 | data. | |
191 |
|
191 | |||
192 | The revision data itself is a linear collection of data chunks. |
|
192 | The revision data itself is a linear collection of data chunks. | |
193 | Each chunk represents a revision and is usually represented as a |
|
193 | Each chunk represents a revision and is usually represented as a | |
194 | delta against the previous chunk. To bound lookup time, runs of |
|
194 | delta against the previous chunk. To bound lookup time, runs of | |
195 | deltas are limited to about 2 times the length of the original |
|
195 | deltas are limited to about 2 times the length of the original | |
196 | version data. This makes retrieval of a version proportional to |
|
196 | version data. This makes retrieval of a version proportional to | |
197 | its size, or O(1) relative to the number of revisions. |
|
197 | its size, or O(1) relative to the number of revisions. | |
198 |
|
198 | |||
199 | Both pieces of the revlog are written to in an append-only |
|
199 | Both pieces of the revlog are written to in an append-only | |
200 | fashion, which means we never need to rewrite a file to insert or |
|
200 | fashion, which means we never need to rewrite a file to insert or | |
201 | remove data, and can use some simple techniques to avoid the need |
|
201 | remove data, and can use some simple techniques to avoid the need | |
202 | for locking while reading. |
|
202 | for locking while reading. | |
203 | """ |
|
203 | """ | |
204 | def __init__(self, opener, indexfile, datafile, defversion=0): |
|
204 | def __init__(self, opener, indexfile, datafile, defversion=0): | |
205 | """ |
|
205 | """ | |
206 | create a revlog object |
|
206 | create a revlog object | |
207 |
|
207 | |||
208 | opener is a function that abstracts the file opening operation |
|
208 | opener is a function that abstracts the file opening operation | |
209 | and can be used to implement COW semantics or the like. |
|
209 | and can be used to implement COW semantics or the like. | |
210 | """ |
|
210 | """ | |
211 | self.indexfile = indexfile |
|
211 | self.indexfile = indexfile | |
212 | self.datafile = datafile |
|
212 | self.datafile = datafile | |
213 | self.opener = opener |
|
213 | self.opener = opener | |
214 |
|
214 | |||
215 | self.indexstat = None |
|
215 | self.indexstat = None | |
216 | self.cache = None |
|
216 | self.cache = None | |
217 | self.chunkcache = None |
|
217 | self.chunkcache = None | |
218 | self.defversion = defversion |
|
218 | self.defversion = defversion | |
219 | self.load() |
|
219 | self.load() | |
220 |
|
220 | |||
221 | def load(self): |
|
221 | def load(self): | |
222 | v = self.defversion |
|
222 | v = self.defversion | |
223 | try: |
|
223 | try: | |
224 | f = self.opener(self.indexfile) |
|
224 | f = self.opener(self.indexfile) | |
225 | i = f.read() |
|
225 | i = f.read() | |
226 | except IOError, inst: |
|
226 | except IOError, inst: | |
227 | if inst.errno != errno.ENOENT: |
|
227 | if inst.errno != errno.ENOENT: | |
228 | raise |
|
228 | raise | |
229 | i = "" |
|
229 | i = "" | |
230 | else: |
|
230 | else: | |
231 | try: |
|
231 | try: | |
232 | st = os.fstat(f.fileno()) |
|
232 | st = os.fstat(f.fileno()) | |
233 | except AttributeError, inst: |
|
233 | except AttributeError, inst: | |
234 | st = None |
|
234 | st = None | |
235 | else: |
|
235 | else: | |
236 | oldst = self.indexstat |
|
236 | oldst = self.indexstat | |
237 | if (oldst and st.st_dev == oldst.st_dev |
|
237 | if (oldst and st.st_dev == oldst.st_dev | |
238 | and st.st_ino == oldst.st_ino |
|
238 | and st.st_ino == oldst.st_ino | |
239 | and st.st_mtime == oldst.st_mtime |
|
239 | and st.st_mtime == oldst.st_mtime | |
240 | and st.st_ctime == oldst.st_ctime): |
|
240 | and st.st_ctime == oldst.st_ctime): | |
241 | return |
|
241 | return | |
242 | self.indexstat = st |
|
242 | self.indexstat = st | |
243 | if len(i) > 0: |
|
243 | if len(i) > 0: | |
244 | v = struct.unpack(versionformat, i[:4])[0] |
|
244 | v = struct.unpack(versionformat, i[:4])[0] | |
245 | flags = v & ~0xFFFF |
|
245 | flags = v & ~0xFFFF | |
246 | fmt = v & 0xFFFF |
|
246 | fmt = v & 0xFFFF | |
247 | if fmt == 0: |
|
247 | if fmt == 0: | |
248 | if flags: |
|
248 | if flags: | |
249 | raise RevlogError(_("index %s invalid flags %x for format v0" % |
|
249 | raise RevlogError(_("index %s invalid flags %x for format v0" % | |
250 | (self.indexfile, flags))) |
|
250 | (self.indexfile, flags))) | |
251 | elif fmt == REVLOGNG: |
|
251 | elif fmt == REVLOGNG: | |
252 | if flags & ~REVLOGNGINLINEDATA: |
|
252 | if flags & ~REVLOGNGINLINEDATA: | |
253 | raise RevlogError(_("index %s invalid flags %x for revlogng" % |
|
253 | raise RevlogError(_("index %s invalid flags %x for revlogng" % | |
254 | (self.indexfile, flags))) |
|
254 | (self.indexfile, flags))) | |
255 | else: |
|
255 | else: | |
256 | raise RevlogError(_("index %s invalid format %d" % |
|
256 | raise RevlogError(_("index %s invalid format %d" % | |
257 | (self.indexfile, fmt))) |
|
257 | (self.indexfile, fmt))) | |
258 | self.version = v |
|
258 | self.version = v | |
259 | if v == 0: |
|
259 | if v == 0: | |
260 | self.indexformat = indexformatv0 |
|
260 | self.indexformat = indexformatv0 | |
261 | else: |
|
261 | else: | |
262 | self.indexformat = indexformatng |
|
262 | self.indexformat = indexformatng | |
263 |
|
263 | |||
264 | if i: |
|
264 | if i: | |
265 | if not self.inlinedata() and st and st.st_size > 10000: |
|
265 | if not self.inlinedata() and st and st.st_size > 10000: | |
266 | # big index, let's parse it on demand |
|
266 | # big index, let's parse it on demand | |
267 | parser = lazyparser(i, self, self.indexformat) |
|
267 | parser = lazyparser(i, self, self.indexformat) | |
268 | self.index = lazyindex(parser) |
|
268 | self.index = lazyindex(parser) | |
269 | self.nodemap = lazymap(parser) |
|
269 | self.nodemap = lazymap(parser) | |
270 | else: |
|
270 | else: | |
271 | self.parseindex(i) |
|
271 | self.parseindex(i) | |
272 | if self.inlinedata(): |
|
272 | if self.inlinedata(): | |
273 | # we've already got the entire data file read in, save it |
|
273 | # we've already got the entire data file read in, save it | |
274 | # in the chunk data |
|
274 | # in the chunk data | |
275 | self.chunkcache = (0, i) |
|
275 | self.chunkcache = (0, i) | |
276 | if self.version != 0: |
|
276 | if self.version != 0: | |
277 | e = list(self.index[0]) |
|
277 | e = list(self.index[0]) | |
278 | type = self.ngtype(e[0]) |
|
278 | type = self.ngtype(e[0]) | |
279 | e[0] = self.offset_type(0, type) |
|
279 | e[0] = self.offset_type(0, type) | |
280 | self.index[0] = e |
|
280 | self.index[0] = e | |
281 | else: |
|
281 | else: | |
282 | self.nodemap = { nullid: -1} |
|
282 | self.nodemap = { nullid: -1} | |
283 | self.index = [] |
|
283 | self.index = [] | |
284 |
|
284 | |||
285 |
|
285 | |||
286 | def parseindex(self, data): |
|
286 | def parseindex(self, data): | |
287 | s = struct.calcsize(self.indexformat) |
|
287 | s = struct.calcsize(self.indexformat) | |
288 | l = len(data) |
|
288 | l = len(data) | |
289 | self.index = [] |
|
289 | self.index = [] | |
290 | self.nodemap = {nullid: -1} |
|
290 | self.nodemap = {nullid: -1} | |
291 | inline = self.inlinedata() |
|
291 | inline = self.inlinedata() | |
292 | off = 0 |
|
292 | off = 0 | |
293 | n = 0 |
|
293 | n = 0 | |
294 | while off < l: |
|
294 | while off < l: | |
295 | e = struct.unpack(self.indexformat, data[off:off + s]) |
|
295 | e = struct.unpack(self.indexformat, data[off:off + s]) | |
296 | self.index.append(e) |
|
296 | self.index.append(e) | |
297 | self.nodemap[e[-1]] = n |
|
297 | self.nodemap[e[-1]] = n | |
298 | n += 1 |
|
298 | n += 1 | |
299 | off += s |
|
299 | off += s | |
300 | if inline: |
|
300 | if inline: | |
301 | off += e[1] |
|
301 | off += e[1] | |
302 |
|
302 | |||
303 | def ngoffset(self, q): |
|
303 | def ngoffset(self, q): | |
304 | if q & 0xFFFF: |
|
304 | if q & 0xFFFF: | |
305 | raise RevlogError(_('%s: incompatible revision flag %x') % |
|
305 | raise RevlogError(_('%s: incompatible revision flag %x') % | |
306 | (self.indexfile, type)) |
|
306 | (self.indexfile, type)) | |
307 | return long(q >> 16) |
|
307 | return long(q >> 16) | |
308 |
|
308 | |||
309 | def ngtype(self, q): |
|
309 | def ngtype(self, q): | |
310 | return int(q & 0xFFFF) |
|
310 | return int(q & 0xFFFF) | |
311 |
|
311 | |||
312 | def offset_type(self, offset, type): |
|
312 | def offset_type(self, offset, type): | |
313 | return long(long(offset) << 16 | type) |
|
313 | return long(long(offset) << 16 | type) | |
314 |
|
314 | |||
315 | def loadindexmap(self): |
|
315 | def loadindexmap(self): | |
316 | """loads both the map and the index from the lazy parser""" |
|
316 | """loads both the map and the index from the lazy parser""" | |
317 | if isinstance(self.index, lazyindex): |
|
317 | if isinstance(self.index, lazyindex): | |
318 | p = self.index.p |
|
318 | p = self.index.p | |
319 | p.load() |
|
319 | p.load() | |
320 |
|
320 | |||
321 | def inlinedata(self): return self.version & REVLOGNGINLINEDATA |
|
321 | def inlinedata(self): return self.version & REVLOGNGINLINEDATA | |
322 | def tip(self): return self.node(len(self.index) - 1) |
|
322 | def tip(self): return self.node(len(self.index) - 1) | |
323 | def count(self): return len(self.index) |
|
323 | def count(self): return len(self.index) | |
324 | def node(self, rev): |
|
324 | def node(self, rev): | |
325 | return (rev < 0) and nullid or self.index[rev][-1] |
|
325 | return (rev < 0) and nullid or self.index[rev][-1] | |
326 | def rev(self, node): |
|
326 | def rev(self, node): | |
327 | try: |
|
327 | try: | |
328 | return self.nodemap[node] |
|
328 | return self.nodemap[node] | |
329 | except KeyError: |
|
329 | except KeyError: | |
330 | raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node))) |
|
330 | raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node))) | |
331 | def linkrev(self, node): return self.index[self.rev(node)][-4] |
|
331 | def linkrev(self, node): return self.index[self.rev(node)][-4] | |
332 | def parents(self, node): |
|
332 | def parents(self, node): | |
333 | if node == nullid: return (nullid, nullid) |
|
333 | if node == nullid: return (nullid, nullid) | |
334 | r = self.rev(node) |
|
334 | r = self.rev(node) | |
335 | d = self.index[r][-3:-1] |
|
335 | d = self.index[r][-3:-1] | |
336 | if self.version == 0: |
|
336 | if self.version == 0: | |
337 | return d |
|
337 | return d | |
338 | return [ self.node(x) for x in d ] |
|
338 | return [ self.node(x) for x in d ] | |
339 | def start(self, rev): |
|
339 | def start(self, rev): | |
340 | if rev < 0: |
|
340 | if rev < 0: | |
341 | return -1 |
|
341 | return -1 | |
342 | if self.version != 0: |
|
342 | if self.version != 0: | |
343 | return self.ngoffset(self.index[rev][0]) |
|
343 | return self.ngoffset(self.index[rev][0]) | |
344 | return self.index[rev][0] |
|
344 | return self.index[rev][0] | |
345 | def end(self, rev): return self.start(rev) + self.length(rev) |
|
345 | def end(self, rev): return self.start(rev) + self.length(rev) | |
346 |
|
346 | |||
347 | def length(self, rev): |
|
347 | def length(self, rev): | |
348 | if rev < 0: |
|
348 | if rev < 0: | |
349 | return 0 |
|
349 | return 0 | |
350 | else: |
|
350 | else: | |
351 | return self.index[rev][1] |
|
351 | return self.index[rev][1] | |
352 | def base(self, rev): return (rev < 0) and rev or self.index[rev][-5] |
|
352 | def base(self, rev): return (rev < 0) and rev or self.index[rev][-5] | |
353 |
|
353 | |||
354 | def reachable(self, rev, stop=None): |
|
354 | def reachable(self, rev, stop=None): | |
355 | reachable = {} |
|
355 | reachable = {} | |
356 | visit = [rev] |
|
356 | visit = [rev] | |
357 | reachable[rev] = 1 |
|
357 | reachable[rev] = 1 | |
358 | if stop: |
|
358 | if stop: | |
359 | stopn = self.rev(stop) |
|
359 | stopn = self.rev(stop) | |
360 | else: |
|
360 | else: | |
361 | stopn = 0 |
|
361 | stopn = 0 | |
362 | while visit: |
|
362 | while visit: | |
363 | n = visit.pop(0) |
|
363 | n = visit.pop(0) | |
364 | if n == stop: |
|
364 | if n == stop: | |
365 | continue |
|
365 | continue | |
366 | if n == nullid: |
|
366 | if n == nullid: | |
367 | continue |
|
367 | continue | |
368 | for p in self.parents(n): |
|
368 | for p in self.parents(n): | |
369 | if self.rev(p) < stopn: |
|
369 | if self.rev(p) < stopn: | |
370 | continue |
|
370 | continue | |
371 | if p not in reachable: |
|
371 | if p not in reachable: | |
372 | reachable[p] = 1 |
|
372 | reachable[p] = 1 | |
373 | visit.append(p) |
|
373 | visit.append(p) | |
374 | return reachable |
|
374 | return reachable | |
375 |
|
375 | |||
376 | def nodesbetween(self, roots=None, heads=None): |
|
376 | def nodesbetween(self, roots=None, heads=None): | |
377 | """Return a tuple containing three elements. Elements 1 and 2 contain |
|
377 | """Return a tuple containing three elements. Elements 1 and 2 contain | |
378 | a final list bases and heads after all the unreachable ones have been |
|
378 | a final list bases and heads after all the unreachable ones have been | |
379 | pruned. Element 0 contains a topologically sorted list of all |
|
379 | pruned. Element 0 contains a topologically sorted list of all | |
380 |
|
380 | |||
381 | nodes that satisfy these constraints: |
|
381 | nodes that satisfy these constraints: | |
382 | 1. All nodes must be descended from a node in roots (the nodes on |
|
382 | 1. All nodes must be descended from a node in roots (the nodes on | |
383 | roots are considered descended from themselves). |
|
383 | roots are considered descended from themselves). | |
384 | 2. All nodes must also be ancestors of a node in heads (the nodes in |
|
384 | 2. All nodes must also be ancestors of a node in heads (the nodes in | |
385 | heads are considered to be their own ancestors). |
|
385 | heads are considered to be their own ancestors). | |
386 |
|
386 | |||
387 | If roots is unspecified, nullid is assumed as the only root. |
|
387 | If roots is unspecified, nullid is assumed as the only root. | |
388 | If heads is unspecified, it is taken to be the output of the |
|
388 | If heads is unspecified, it is taken to be the output of the | |
389 | heads method (i.e. a list of all nodes in the repository that |
|
389 | heads method (i.e. a list of all nodes in the repository that | |
390 | have no children).""" |
|
390 | have no children).""" | |
391 | nonodes = ([], [], []) |
|
391 | nonodes = ([], [], []) | |
392 | if roots is not None: |
|
392 | if roots is not None: | |
393 | roots = list(roots) |
|
393 | roots = list(roots) | |
394 | if not roots: |
|
394 | if not roots: | |
395 | return nonodes |
|
395 | return nonodes | |
396 | lowestrev = min([self.rev(n) for n in roots]) |
|
396 | lowestrev = min([self.rev(n) for n in roots]) | |
397 | else: |
|
397 | else: | |
398 | roots = [nullid] # Everybody's a descendent of nullid |
|
398 | roots = [nullid] # Everybody's a descendent of nullid | |
399 | lowestrev = -1 |
|
399 | lowestrev = -1 | |
400 | if (lowestrev == -1) and (heads is None): |
|
400 | if (lowestrev == -1) and (heads is None): | |
401 | # We want _all_ the nodes! |
|
401 | # We want _all_ the nodes! | |
402 | return ([self.node(r) for r in xrange(0, self.count())], |
|
402 | return ([self.node(r) for r in xrange(0, self.count())], | |
403 | [nullid], list(self.heads())) |
|
403 | [nullid], list(self.heads())) | |
404 | if heads is None: |
|
404 | if heads is None: | |
405 | # All nodes are ancestors, so the latest ancestor is the last |
|
405 | # All nodes are ancestors, so the latest ancestor is the last | |
406 | # node. |
|
406 | # node. | |
407 | highestrev = self.count() - 1 |
|
407 | highestrev = self.count() - 1 | |
408 | # Set ancestors to None to signal that every node is an ancestor. |
|
408 | # Set ancestors to None to signal that every node is an ancestor. | |
409 | ancestors = None |
|
409 | ancestors = None | |
410 | # Set heads to an empty dictionary for later discovery of heads |
|
410 | # Set heads to an empty dictionary for later discovery of heads | |
411 | heads = {} |
|
411 | heads = {} | |
412 | else: |
|
412 | else: | |
413 | heads = list(heads) |
|
413 | heads = list(heads) | |
414 | if not heads: |
|
414 | if not heads: | |
415 | return nonodes |
|
415 | return nonodes | |
416 | ancestors = {} |
|
416 | ancestors = {} | |
417 | # Start at the top and keep marking parents until we're done. |
|
417 | # Start at the top and keep marking parents until we're done. | |
418 | nodestotag = heads[:] |
|
418 | nodestotag = heads[:] | |
419 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
419 | # Turn heads into a dictionary so we can remove 'fake' heads. | |
420 | # Also, later we will be using it to filter out the heads we can't |
|
420 | # Also, later we will be using it to filter out the heads we can't | |
421 | # find from roots. |
|
421 | # find from roots. | |
422 | heads = dict.fromkeys(heads, 0) |
|
422 | heads = dict.fromkeys(heads, 0) | |
423 | # Remember where the top was so we can use it as a limit later. |
|
423 | # Remember where the top was so we can use it as a limit later. | |
424 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
424 | highestrev = max([self.rev(n) for n in nodestotag]) | |
425 | while nodestotag: |
|
425 | while nodestotag: | |
426 | # grab a node to tag |
|
426 | # grab a node to tag | |
427 | n = nodestotag.pop() |
|
427 | n = nodestotag.pop() | |
428 | # Never tag nullid |
|
428 | # Never tag nullid | |
429 | if n == nullid: |
|
429 | if n == nullid: | |
430 | continue |
|
430 | continue | |
431 | # A node's revision number represents its place in a |
|
431 | # A node's revision number represents its place in a | |
432 | # topologically sorted list of nodes. |
|
432 | # topologically sorted list of nodes. | |
433 | r = self.rev(n) |
|
433 | r = self.rev(n) | |
434 | if r >= lowestrev: |
|
434 | if r >= lowestrev: | |
435 | if n not in ancestors: |
|
435 | if n not in ancestors: | |
436 | # If we are possibly a descendent of one of the roots |
|
436 | # If we are possibly a descendent of one of the roots | |
437 | # and we haven't already been marked as an ancestor |
|
437 | # and we haven't already been marked as an ancestor | |
438 | ancestors[n] = 1 # Mark as ancestor |
|
438 | ancestors[n] = 1 # Mark as ancestor | |
439 | # Add non-nullid parents to list of nodes to tag. |
|
439 | # Add non-nullid parents to list of nodes to tag. | |
440 | nodestotag.extend([p for p in self.parents(n) if |
|
440 | nodestotag.extend([p for p in self.parents(n) if | |
441 | p != nullid]) |
|
441 | p != nullid]) | |
442 | elif n in heads: # We've seen it before, is it a fake head? |
|
442 | elif n in heads: # We've seen it before, is it a fake head? | |
443 | # So it is, real heads should not be the ancestors of |
|
443 | # So it is, real heads should not be the ancestors of | |
444 | # any other heads. |
|
444 | # any other heads. | |
445 | heads.pop(n) |
|
445 | heads.pop(n) | |
446 | if not ancestors: |
|
446 | if not ancestors: | |
447 | return nonodes |
|
447 | return nonodes | |
448 | # Now that we have our set of ancestors, we want to remove any |
|
448 | # Now that we have our set of ancestors, we want to remove any | |
449 | # roots that are not ancestors. |
|
449 | # roots that are not ancestors. | |
450 |
|
450 | |||
451 | # If one of the roots was nullid, everything is included anyway. |
|
451 | # If one of the roots was nullid, everything is included anyway. | |
452 | if lowestrev > -1: |
|
452 | if lowestrev > -1: | |
453 | # But, since we weren't, let's recompute the lowest rev to not |
|
453 | # But, since we weren't, let's recompute the lowest rev to not | |
454 | # include roots that aren't ancestors. |
|
454 | # include roots that aren't ancestors. | |
455 |
|
455 | |||
456 | # Filter out roots that aren't ancestors of heads |
|
456 | # Filter out roots that aren't ancestors of heads | |
457 | roots = [n for n in roots if n in ancestors] |
|
457 | roots = [n for n in roots if n in ancestors] | |
458 | # Recompute the lowest revision |
|
458 | # Recompute the lowest revision | |
459 | if roots: |
|
459 | if roots: | |
460 | lowestrev = min([self.rev(n) for n in roots]) |
|
460 | lowestrev = min([self.rev(n) for n in roots]) | |
461 | else: |
|
461 | else: | |
462 | # No more roots? Return empty list |
|
462 | # No more roots? Return empty list | |
463 | return nonodes |
|
463 | return nonodes | |
464 | else: |
|
464 | else: | |
465 | # We are descending from nullid, and don't need to care about |
|
465 | # We are descending from nullid, and don't need to care about | |
466 | # any other roots. |
|
466 | # any other roots. | |
467 | lowestrev = -1 |
|
467 | lowestrev = -1 | |
468 | roots = [nullid] |
|
468 | roots = [nullid] | |
469 | # Transform our roots list into a 'set' (i.e. a dictionary where the |
|
469 | # Transform our roots list into a 'set' (i.e. a dictionary where the | |
470 | # values don't matter. |
|
470 | # values don't matter. | |
471 | descendents = dict.fromkeys(roots, 1) |
|
471 | descendents = dict.fromkeys(roots, 1) | |
472 | # Also, keep the original roots so we can filter out roots that aren't |
|
472 | # Also, keep the original roots so we can filter out roots that aren't | |
473 | # 'real' roots (i.e. are descended from other roots). |
|
473 | # 'real' roots (i.e. are descended from other roots). | |
474 | roots = descendents.copy() |
|
474 | roots = descendents.copy() | |
475 | # Our topologically sorted list of output nodes. |
|
475 | # Our topologically sorted list of output nodes. | |
476 | orderedout = [] |
|
476 | orderedout = [] | |
477 | # Don't start at nullid since we don't want nullid in our output list, |
|
477 | # Don't start at nullid since we don't want nullid in our output list, | |
478 | # and if nullid shows up in descedents, empty parents will look like |
|
478 | # and if nullid shows up in descedents, empty parents will look like | |
479 | # they're descendents. |
|
479 | # they're descendents. | |
480 | for r in xrange(max(lowestrev, 0), highestrev + 1): |
|
480 | for r in xrange(max(lowestrev, 0), highestrev + 1): | |
481 | n = self.node(r) |
|
481 | n = self.node(r) | |
482 | isdescendent = False |
|
482 | isdescendent = False | |
483 | if lowestrev == -1: # Everybody is a descendent of nullid |
|
483 | if lowestrev == -1: # Everybody is a descendent of nullid | |
484 | isdescendent = True |
|
484 | isdescendent = True | |
485 | elif n in descendents: |
|
485 | elif n in descendents: | |
486 | # n is already a descendent |
|
486 | # n is already a descendent | |
487 | isdescendent = True |
|
487 | isdescendent = True | |
488 | # This check only needs to be done here because all the roots |
|
488 | # This check only needs to be done here because all the roots | |
489 | # will start being marked is descendents before the loop. |
|
489 | # will start being marked is descendents before the loop. | |
490 | if n in roots: |
|
490 | if n in roots: | |
491 | # If n was a root, check if it's a 'real' root. |
|
491 | # If n was a root, check if it's a 'real' root. | |
492 | p = tuple(self.parents(n)) |
|
492 | p = tuple(self.parents(n)) | |
493 | # If any of its parents are descendents, it's not a root. |
|
493 | # If any of its parents are descendents, it's not a root. | |
494 | if (p[0] in descendents) or (p[1] in descendents): |
|
494 | if (p[0] in descendents) or (p[1] in descendents): | |
495 | roots.pop(n) |
|
495 | roots.pop(n) | |
496 | else: |
|
496 | else: | |
497 | p = tuple(self.parents(n)) |
|
497 | p = tuple(self.parents(n)) | |
498 | # A node is a descendent if either of its parents are |
|
498 | # A node is a descendent if either of its parents are | |
499 | # descendents. (We seeded the dependents list with the roots |
|
499 | # descendents. (We seeded the dependents list with the roots | |
500 | # up there, remember?) |
|
500 | # up there, remember?) | |
501 | if (p[0] in descendents) or (p[1] in descendents): |
|
501 | if (p[0] in descendents) or (p[1] in descendents): | |
502 | descendents[n] = 1 |
|
502 | descendents[n] = 1 | |
503 | isdescendent = True |
|
503 | isdescendent = True | |
504 | if isdescendent and ((ancestors is None) or (n in ancestors)): |
|
504 | if isdescendent and ((ancestors is None) or (n in ancestors)): | |
505 | # Only include nodes that are both descendents and ancestors. |
|
505 | # Only include nodes that are both descendents and ancestors. | |
506 | orderedout.append(n) |
|
506 | orderedout.append(n) | |
507 | if (ancestors is not None) and (n in heads): |
|
507 | if (ancestors is not None) and (n in heads): | |
508 | # We're trying to figure out which heads are reachable |
|
508 | # We're trying to figure out which heads are reachable | |
509 | # from roots. |
|
509 | # from roots. | |
510 | # Mark this head as having been reached |
|
510 | # Mark this head as having been reached | |
511 | heads[n] = 1 |
|
511 | heads[n] = 1 | |
512 | elif ancestors is None: |
|
512 | elif ancestors is None: | |
513 | # Otherwise, we're trying to discover the heads. |
|
513 | # Otherwise, we're trying to discover the heads. | |
514 | # Assume this is a head because if it isn't, the next step |
|
514 | # Assume this is a head because if it isn't, the next step | |
515 | # will eventually remove it. |
|
515 | # will eventually remove it. | |
516 | heads[n] = 1 |
|
516 | heads[n] = 1 | |
517 | # But, obviously its parents aren't. |
|
517 | # But, obviously its parents aren't. | |
518 | for p in self.parents(n): |
|
518 | for p in self.parents(n): | |
519 | heads.pop(p, None) |
|
519 | heads.pop(p, None) | |
520 | heads = [n for n in heads.iterkeys() if heads[n] != 0] |
|
520 | heads = [n for n in heads.iterkeys() if heads[n] != 0] | |
521 | roots = roots.keys() |
|
521 | roots = roots.keys() | |
522 | assert orderedout |
|
522 | assert orderedout | |
523 | assert roots |
|
523 | assert roots | |
524 | assert heads |
|
524 | assert heads | |
525 | return (orderedout, roots, heads) |
|
525 | return (orderedout, roots, heads) | |
526 |
|
526 | |||
527 | def heads(self, start=None): |
|
527 | def heads(self, start=None): | |
528 | """return the list of all nodes that have no children |
|
528 | """return the list of all nodes that have no children | |
529 |
|
529 | |||
530 | if start is specified, only heads that are descendants of |
|
530 | if start is specified, only heads that are descendants of | |
531 | start will be returned |
|
531 | start will be returned | |
532 |
|
532 | |||
533 | """ |
|
533 | """ | |
534 | if start is None: |
|
534 | if start is None: | |
535 | start = nullid |
|
535 | start = nullid | |
536 | reachable = {start: 1} |
|
536 | reachable = {start: 1} | |
537 | heads = {start: 1} |
|
537 | heads = {start: 1} | |
538 | startrev = self.rev(start) |
|
538 | startrev = self.rev(start) | |
539 |
|
539 | |||
540 | for r in xrange(startrev + 1, self.count()): |
|
540 | for r in xrange(startrev + 1, self.count()): | |
541 | n = self.node(r) |
|
541 | n = self.node(r) | |
542 | for pn in self.parents(n): |
|
542 | for pn in self.parents(n): | |
543 | if pn in reachable: |
|
543 | if pn in reachable: | |
544 | reachable[n] = 1 |
|
544 | reachable[n] = 1 | |
545 | heads[n] = 1 |
|
545 | heads[n] = 1 | |
546 | if pn in heads: |
|
546 | if pn in heads: | |
547 | del heads[pn] |
|
547 | del heads[pn] | |
548 | return heads.keys() |
|
548 | return heads.keys() | |
549 |
|
549 | |||
550 | def children(self, node): |
|
550 | def children(self, node): | |
551 | """find the children of a given node""" |
|
551 | """find the children of a given node""" | |
552 | c = [] |
|
552 | c = [] | |
553 | p = self.rev(node) |
|
553 | p = self.rev(node) | |
554 | for r in range(p + 1, self.count()): |
|
554 | for r in range(p + 1, self.count()): | |
555 | n = self.node(r) |
|
555 | n = self.node(r) | |
556 | for pn in self.parents(n): |
|
556 | for pn in self.parents(n): | |
557 | if pn == node: |
|
557 | if pn == node: | |
558 | c.append(n) |
|
558 | c.append(n) | |
559 | continue |
|
559 | continue | |
560 | elif pn == nullid: |
|
560 | elif pn == nullid: | |
561 | continue |
|
561 | continue | |
562 | return c |
|
562 | return c | |
563 |
|
563 | |||
564 | def lookup(self, id): |
|
564 | def lookup(self, id): | |
565 | """locate a node based on revision number or subset of hex nodeid""" |
|
565 | """locate a node based on revision number or subset of hex nodeid""" | |
566 | try: |
|
566 | try: | |
567 | rev = int(id) |
|
567 | rev = int(id) | |
568 | if str(rev) != id: raise ValueError |
|
568 | if str(rev) != id: raise ValueError | |
569 | if rev < 0: rev = self.count() + rev |
|
569 | if rev < 0: rev = self.count() + rev | |
570 | if rev < 0 or rev >= self.count(): raise ValueError |
|
570 | if rev < 0 or rev >= self.count(): raise ValueError | |
571 | return self.node(rev) |
|
571 | return self.node(rev) | |
572 | except (ValueError, OverflowError): |
|
572 | except (ValueError, OverflowError): | |
573 | c = [] |
|
573 | c = [] | |
574 | for n in self.nodemap: |
|
574 | for n in self.nodemap: | |
575 | if hex(n).startswith(id): |
|
575 | if hex(n).startswith(id): | |
576 | c.append(n) |
|
576 | c.append(n) | |
577 | if len(c) > 1: raise RevlogError(_("Ambiguous identifier")) |
|
577 | if len(c) > 1: raise RevlogError(_("Ambiguous identifier")) | |
578 | if len(c) < 1: raise RevlogError(_("No match found")) |
|
578 | if len(c) < 1: raise RevlogError(_("No match found")) | |
579 | return c[0] |
|
579 | return c[0] | |
580 |
|
580 | |||
581 | return None |
|
581 | return None | |
582 |
|
582 | |||
583 | def diff(self, a, b): |
|
583 | def diff(self, a, b): | |
584 | """return a delta between two revisions""" |
|
584 | """return a delta between two revisions""" | |
585 | return mdiff.textdiff(a, b) |
|
585 | return mdiff.textdiff(a, b) | |
586 |
|
586 | |||
587 | def patches(self, t, pl): |
|
587 | def patches(self, t, pl): | |
588 | """apply a list of patches to a string""" |
|
588 | """apply a list of patches to a string""" | |
589 | return mdiff.patches(t, pl) |
|
589 | return mdiff.patches(t, pl) | |
590 |
|
590 | |||
591 | def chunk(self, rev, df=None, cachelen=4096): |
|
591 | def chunk(self, rev, df=None, cachelen=4096): | |
592 | start, length = self.start(rev), self.length(rev) |
|
592 | start, length = self.start(rev), self.length(rev) | |
593 | inline = self.inlinedata() |
|
593 | inline = self.inlinedata() | |
594 | if inline: |
|
594 | if inline: | |
595 | start += (rev + 1) * struct.calcsize(self.indexformat) |
|
595 | start += (rev + 1) * struct.calcsize(self.indexformat) | |
596 | end = start + length |
|
596 | end = start + length | |
597 | def loadcache(df): |
|
597 | def loadcache(df): | |
598 | cache_length = max(cachelen, length) # 4k |
|
598 | cache_length = max(cachelen, length) # 4k | |
599 | if not df: |
|
599 | if not df: | |
600 | if inline: |
|
600 | if inline: | |
601 | df = self.opener(self.indexfile) |
|
601 | df = self.opener(self.indexfile) | |
602 | else: |
|
602 | else: | |
603 | df = self.opener(self.datafile) |
|
603 | df = self.opener(self.datafile) | |
604 | df.seek(start) |
|
604 | df.seek(start) | |
605 | self.chunkcache = (start, df.read(cache_length)) |
|
605 | self.chunkcache = (start, df.read(cache_length)) | |
606 |
|
606 | |||
607 | if not self.chunkcache: |
|
607 | if not self.chunkcache: | |
608 | loadcache(df) |
|
608 | loadcache(df) | |
609 |
|
609 | |||
610 | cache_start = self.chunkcache[0] |
|
610 | cache_start = self.chunkcache[0] | |
611 | cache_end = cache_start + len(self.chunkcache[1]) |
|
611 | cache_end = cache_start + len(self.chunkcache[1]) | |
612 | if start >= cache_start and end <= cache_end: |
|
612 | if start >= cache_start and end <= cache_end: | |
613 | # it is cached |
|
613 | # it is cached | |
614 | offset = start - cache_start |
|
614 | offset = start - cache_start | |
615 | else: |
|
615 | else: | |
616 | loadcache(df) |
|
616 | loadcache(df) | |
617 | offset = 0 |
|
617 | offset = 0 | |
618 |
|
618 | |||
619 | #def checkchunk(): |
|
619 | #def checkchunk(): | |
620 | # df = self.opener(self.datafile) |
|
620 | # df = self.opener(self.datafile) | |
621 | # df.seek(start) |
|
621 | # df.seek(start) | |
622 | # return df.read(length) |
|
622 | # return df.read(length) | |
623 | #assert s == checkchunk() |
|
623 | #assert s == checkchunk() | |
624 | return decompress(self.chunkcache[1][offset:offset + length]) |
|
624 | return decompress(self.chunkcache[1][offset:offset + length]) | |
625 |
|
625 | |||
626 | def delta(self, node): |
|
626 | def delta(self, node): | |
627 | """return or calculate a delta between a node and its predecessor""" |
|
627 | """return or calculate a delta between a node and its predecessor""" | |
628 | r = self.rev(node) |
|
628 | r = self.rev(node) | |
629 | return self.revdiff(r - 1, r) |
|
629 | return self.revdiff(r - 1, r) | |
630 |
|
630 | |||
631 | def revdiff(self, rev1, rev2): |
|
631 | def revdiff(self, rev1, rev2): | |
632 | """return or calculate a delta between two revisions""" |
|
632 | """return or calculate a delta between two revisions""" | |
633 | b1 = self.base(rev1) |
|
633 | b1 = self.base(rev1) | |
634 | b2 = self.base(rev2) |
|
634 | b2 = self.base(rev2) | |
635 | if b1 == b2 and rev1 + 1 == rev2: |
|
635 | if b1 == b2 and rev1 + 1 == rev2: | |
636 | return self.chunk(rev2) |
|
636 | return self.chunk(rev2) | |
637 | else: |
|
637 | else: | |
638 | return self.diff(self.revision(self.node(rev1)), |
|
638 | return self.diff(self.revision(self.node(rev1)), | |
639 | self.revision(self.node(rev2))) |
|
639 | self.revision(self.node(rev2))) | |
640 |
|
640 | |||
641 | def revision(self, node): |
|
641 | def revision(self, node): | |
642 | """return an uncompressed revision of a given""" |
|
642 | """return an uncompressed revision of a given""" | |
643 | if node == nullid: return "" |
|
643 | if node == nullid: return "" | |
644 | if self.cache and self.cache[0] == node: return self.cache[2] |
|
644 | if self.cache and self.cache[0] == node: return self.cache[2] | |
645 |
|
645 | |||
646 | # look up what we need to read |
|
646 | # look up what we need to read | |
647 | text = None |
|
647 | text = None | |
648 | rev = self.rev(node) |
|
648 | rev = self.rev(node) | |
649 | base = self.base(rev) |
|
649 | base = self.base(rev) | |
650 |
|
650 | |||
651 | if self.inlinedata(): |
|
651 | if self.inlinedata(): | |
652 | # we probably have the whole chunk cached |
|
652 | # we probably have the whole chunk cached | |
653 | df = None |
|
653 | df = None | |
654 | else: |
|
654 | else: | |
655 | df = self.opener(self.datafile) |
|
655 | df = self.opener(self.datafile) | |
656 |
|
656 | |||
657 | # do we have useful data cached? |
|
657 | # do we have useful data cached? | |
658 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: |
|
658 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: | |
659 | base = self.cache[1] |
|
659 | base = self.cache[1] | |
660 | text = self.cache[2] |
|
660 | text = self.cache[2] | |
661 | else: |
|
661 | else: | |
662 | text = self.chunk(base, df=df) |
|
662 | text = self.chunk(base, df=df) | |
663 |
|
663 | |||
664 | bins = [] |
|
664 | bins = [] | |
665 | for r in xrange(base + 1, rev + 1): |
|
665 | for r in xrange(base + 1, rev + 1): | |
666 | bins.append(self.chunk(r, df=df)) |
|
666 | bins.append(self.chunk(r, df=df)) | |
667 |
|
667 | |||
668 | text = self.patches(text, bins) |
|
668 | text = self.patches(text, bins) | |
669 |
|
669 | |||
670 | p1, p2 = self.parents(node) |
|
670 | p1, p2 = self.parents(node) | |
671 | if node != hash(text, p1, p2): |
|
671 | if node != hash(text, p1, p2): | |
672 | raise RevlogError(_("integrity check failed on %s:%d") |
|
672 | raise RevlogError(_("integrity check failed on %s:%d") | |
673 | % (self.datafile, rev)) |
|
673 | % (self.datafile, rev)) | |
674 |
|
674 | |||
675 | self.cache = (node, rev, text) |
|
675 | self.cache = (node, rev, text) | |
676 | return text |
|
676 | return text | |
677 |
|
677 | |||
678 | def checkinlinesize(self, tr, fp=None): |
|
678 | def checkinlinesize(self, tr, fp=None): | |
679 | if not self.inlinedata(): |
|
679 | if not self.inlinedata(): | |
680 | return |
|
680 | return | |
681 | if not fp: |
|
681 | if not fp: | |
682 | fp = self.opener(self.indexfile, 'r') |
|
682 | fp = self.opener(self.indexfile, 'r') | |
683 | size = fp.tell() |
|
683 | size = fp.tell() | |
684 | if size < 131072: |
|
684 | if size < 131072: | |
685 | return |
|
685 | return | |
686 | tr.add(self.datafile, 0) |
|
686 | tr.add(self.datafile, 0) | |
687 | df = self.opener(self.datafile, 'w') |
|
687 | df = self.opener(self.datafile, 'w') | |
688 | calc = struct.calcsize(self.indexformat) |
|
688 | calc = struct.calcsize(self.indexformat) | |
689 | for r in xrange(self.count()): |
|
689 | for r in xrange(self.count()): | |
690 | start = self.start(r) + (r + 1) * calc |
|
690 | start = self.start(r) + (r + 1) * calc | |
691 | length = self.length(r) |
|
691 | length = self.length(r) | |
692 | fp.seek(start) |
|
692 | fp.seek(start) | |
693 | d = fp.read(length) |
|
693 | d = fp.read(length) | |
694 | df.write(d) |
|
694 | df.write(d) | |
695 | fp.close() |
|
695 | fp.close() | |
696 | df.close() |
|
696 | df.close() | |
697 | fp = self.opener(self.indexfile, 'w', atomic=True) |
|
697 | fp = self.opener(self.indexfile, 'w', atomictemp=True) | |
698 | self.version &= ~(REVLOGNGINLINEDATA) |
|
698 | self.version &= ~(REVLOGNGINLINEDATA) | |
699 | if self.count(): |
|
699 | if self.count(): | |
700 | x = self.index[0] |
|
700 | x = self.index[0] | |
701 | e = struct.pack(self.indexformat, *x)[4:] |
|
701 | e = struct.pack(self.indexformat, *x)[4:] | |
702 | l = struct.pack(versionformat, self.version) |
|
702 | l = struct.pack(versionformat, self.version) | |
703 | fp.write(l) |
|
703 | fp.write(l) | |
704 | fp.write(e) |
|
704 | fp.write(e) | |
705 |
|
705 | |||
706 | for i in xrange(1, self.count()): |
|
706 | for i in xrange(1, self.count()): | |
707 | x = self.index[i] |
|
707 | x = self.index[i] | |
708 | e = struct.pack(self.indexformat, *x) |
|
708 | e = struct.pack(self.indexformat, *x) | |
709 | fp.write(e) |
|
709 | fp.write(e) | |
710 |
|
710 | |||
711 | fp.close() |
|
711 | # if we don't call rename, the temp file will never replace the | |
|
712 | # real index | |||
|
713 | fp.rename() | |||
712 | self.chunkcache = None |
|
714 | self.chunkcache = None | |
713 |
|
715 | |||
714 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
|
716 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): | |
715 | """add a revision to the log |
|
717 | """add a revision to the log | |
716 |
|
718 | |||
717 | text - the revision data to add |
|
719 | text - the revision data to add | |
718 | transaction - the transaction object used for rollback |
|
720 | transaction - the transaction object used for rollback | |
719 | link - the linkrev data to add |
|
721 | link - the linkrev data to add | |
720 | p1, p2 - the parent nodeids of the revision |
|
722 | p1, p2 - the parent nodeids of the revision | |
721 | d - an optional precomputed delta |
|
723 | d - an optional precomputed delta | |
722 | """ |
|
724 | """ | |
723 | if text is None: text = "" |
|
725 | if text is None: text = "" | |
724 | if p1 is None: p1 = self.tip() |
|
726 | if p1 is None: p1 = self.tip() | |
725 | if p2 is None: p2 = nullid |
|
727 | if p2 is None: p2 = nullid | |
726 |
|
728 | |||
727 | node = hash(text, p1, p2) |
|
729 | node = hash(text, p1, p2) | |
728 |
|
730 | |||
729 | if node in self.nodemap: |
|
731 | if node in self.nodemap: | |
730 | return node |
|
732 | return node | |
731 |
|
733 | |||
732 | n = self.count() |
|
734 | n = self.count() | |
733 | t = n - 1 |
|
735 | t = n - 1 | |
734 |
|
736 | |||
735 | if n: |
|
737 | if n: | |
736 | base = self.base(t) |
|
738 | base = self.base(t) | |
737 | start = self.start(base) |
|
739 | start = self.start(base) | |
738 | end = self.end(t) |
|
740 | end = self.end(t) | |
739 | if not d: |
|
741 | if not d: | |
740 | prev = self.revision(self.tip()) |
|
742 | prev = self.revision(self.tip()) | |
741 | d = self.diff(prev, str(text)) |
|
743 | d = self.diff(prev, str(text)) | |
742 | data = compress(d) |
|
744 | data = compress(d) | |
743 | l = len(data[1]) + len(data[0]) |
|
745 | l = len(data[1]) + len(data[0]) | |
744 | dist = end - start + l |
|
746 | dist = end - start + l | |
745 |
|
747 | |||
746 | # full versions are inserted when the needed deltas |
|
748 | # full versions are inserted when the needed deltas | |
747 | # become comparable to the uncompressed text |
|
749 | # become comparable to the uncompressed text | |
748 | if not n or dist > len(text) * 2: |
|
750 | if not n or dist > len(text) * 2: | |
749 | data = compress(text) |
|
751 | data = compress(text) | |
750 | l = len(data[1]) + len(data[0]) |
|
752 | l = len(data[1]) + len(data[0]) | |
751 | base = n |
|
753 | base = n | |
752 | else: |
|
754 | else: | |
753 | base = self.base(t) |
|
755 | base = self.base(t) | |
754 |
|
756 | |||
755 | offset = 0 |
|
757 | offset = 0 | |
756 | if t >= 0: |
|
758 | if t >= 0: | |
757 | offset = self.end(t) |
|
759 | offset = self.end(t) | |
758 |
|
760 | |||
759 | if self.version == 0: |
|
761 | if self.version == 0: | |
760 | e = (offset, l, base, link, p1, p2, node) |
|
762 | e = (offset, l, base, link, p1, p2, node) | |
761 | else: |
|
763 | else: | |
762 | e = (self.offset_type(offset, 0), l, len(text), |
|
764 | e = (self.offset_type(offset, 0), l, len(text), | |
763 | base, link, self.rev(p1), self.rev(p2), node) |
|
765 | base, link, self.rev(p1), self.rev(p2), node) | |
764 |
|
766 | |||
765 | self.index.append(e) |
|
767 | self.index.append(e) | |
766 | self.nodemap[node] = n |
|
768 | self.nodemap[node] = n | |
767 | entry = struct.pack(self.indexformat, *e) |
|
769 | entry = struct.pack(self.indexformat, *e) | |
768 |
|
770 | |||
769 | if not self.inlinedata(): |
|
771 | if not self.inlinedata(): | |
770 | transaction.add(self.datafile, offset) |
|
772 | transaction.add(self.datafile, offset) | |
771 | transaction.add(self.indexfile, n * len(entry)) |
|
773 | transaction.add(self.indexfile, n * len(entry)) | |
772 | f = self.opener(self.datafile, "a") |
|
774 | f = self.opener(self.datafile, "a") | |
773 | if data[0]: |
|
775 | if data[0]: | |
774 | f.write(data[0]) |
|
776 | f.write(data[0]) | |
775 | f.write(data[1]) |
|
777 | f.write(data[1]) | |
776 | f = self.opener(self.indexfile, "a") |
|
778 | f = self.opener(self.indexfile, "a") | |
777 | else: |
|
779 | else: | |
778 | f = self.opener(self.indexfile, "a+") |
|
780 | f = self.opener(self.indexfile, "a+") | |
779 | transaction.add(self.indexfile, f.tell()) |
|
781 | transaction.add(self.indexfile, f.tell()) | |
780 |
|
782 | |||
781 | if len(self.index) == 1 and self.version != 0: |
|
783 | if len(self.index) == 1 and self.version != 0: | |
782 | l = struct.pack(versionformat, self.version) |
|
784 | l = struct.pack(versionformat, self.version) | |
783 | f.write(l) |
|
785 | f.write(l) | |
784 | entry = entry[4:] |
|
786 | entry = entry[4:] | |
785 |
|
787 | |||
786 | f.write(entry) |
|
788 | f.write(entry) | |
787 |
|
789 | |||
788 | if self.inlinedata(): |
|
790 | if self.inlinedata(): | |
789 | f.write(data[0]) |
|
791 | f.write(data[0]) | |
790 | f.write(data[1]) |
|
792 | f.write(data[1]) | |
791 | self.checkinlinesize(transaction, f) |
|
793 | self.checkinlinesize(transaction, f) | |
792 |
|
794 | |||
793 | self.cache = (node, n, text) |
|
795 | self.cache = (node, n, text) | |
794 | return node |
|
796 | return node | |
795 |
|
797 | |||
796 | def ancestor(self, a, b): |
|
798 | def ancestor(self, a, b): | |
797 | """calculate the least common ancestor of nodes a and b""" |
|
799 | """calculate the least common ancestor of nodes a and b""" | |
798 | # calculate the distance of every node from root |
|
800 | # calculate the distance of every node from root | |
799 | dist = {nullid: 0} |
|
801 | dist = {nullid: 0} | |
800 | for i in xrange(self.count()): |
|
802 | for i in xrange(self.count()): | |
801 | n = self.node(i) |
|
803 | n = self.node(i) | |
802 | p1, p2 = self.parents(n) |
|
804 | p1, p2 = self.parents(n) | |
803 | dist[n] = max(dist[p1], dist[p2]) + 1 |
|
805 | dist[n] = max(dist[p1], dist[p2]) + 1 | |
804 |
|
806 | |||
805 | # traverse ancestors in order of decreasing distance from root |
|
807 | # traverse ancestors in order of decreasing distance from root | |
806 | def ancestors(node): |
|
808 | def ancestors(node): | |
807 | # we store negative distances because heap returns smallest member |
|
809 | # we store negative distances because heap returns smallest member | |
808 | h = [(-dist[node], node)] |
|
810 | h = [(-dist[node], node)] | |
809 | seen = {} |
|
811 | seen = {} | |
810 | while h: |
|
812 | while h: | |
811 | d, n = heapq.heappop(h) |
|
813 | d, n = heapq.heappop(h) | |
812 | if n not in seen: |
|
814 | if n not in seen: | |
813 | seen[n] = 1 |
|
815 | seen[n] = 1 | |
814 | yield (-d, n) |
|
816 | yield (-d, n) | |
815 | for p in self.parents(n): |
|
817 | for p in self.parents(n): | |
816 | heapq.heappush(h, (-dist[p], p)) |
|
818 | heapq.heappush(h, (-dist[p], p)) | |
817 |
|
819 | |||
818 | def generations(node): |
|
820 | def generations(node): | |
819 | sg, s = None, {} |
|
821 | sg, s = None, {} | |
820 | for g,n in ancestors(node): |
|
822 | for g,n in ancestors(node): | |
821 | if g != sg: |
|
823 | if g != sg: | |
822 | if sg: |
|
824 | if sg: | |
823 | yield sg, s |
|
825 | yield sg, s | |
824 | sg, s = g, {n:1} |
|
826 | sg, s = g, {n:1} | |
825 | else: |
|
827 | else: | |
826 | s[n] = 1 |
|
828 | s[n] = 1 | |
827 | yield sg, s |
|
829 | yield sg, s | |
828 |
|
830 | |||
829 | x = generations(a) |
|
831 | x = generations(a) | |
830 | y = generations(b) |
|
832 | y = generations(b) | |
831 | gx = x.next() |
|
833 | gx = x.next() | |
832 | gy = y.next() |
|
834 | gy = y.next() | |
833 |
|
835 | |||
834 | # increment each ancestor list until it is closer to root than |
|
836 | # increment each ancestor list until it is closer to root than | |
835 | # the other, or they match |
|
837 | # the other, or they match | |
836 | while 1: |
|
838 | while 1: | |
837 | #print "ancestor gen %s %s" % (gx[0], gy[0]) |
|
839 | #print "ancestor gen %s %s" % (gx[0], gy[0]) | |
838 | if gx[0] == gy[0]: |
|
840 | if gx[0] == gy[0]: | |
839 | # find the intersection |
|
841 | # find the intersection | |
840 | i = [ n for n in gx[1] if n in gy[1] ] |
|
842 | i = [ n for n in gx[1] if n in gy[1] ] | |
841 | if i: |
|
843 | if i: | |
842 | return i[0] |
|
844 | return i[0] | |
843 | else: |
|
845 | else: | |
844 | #print "next" |
|
846 | #print "next" | |
845 | gy = y.next() |
|
847 | gy = y.next() | |
846 | gx = x.next() |
|
848 | gx = x.next() | |
847 | elif gx[0] < gy[0]: |
|
849 | elif gx[0] < gy[0]: | |
848 | #print "next y" |
|
850 | #print "next y" | |
849 | gy = y.next() |
|
851 | gy = y.next() | |
850 | else: |
|
852 | else: | |
851 | #print "next x" |
|
853 | #print "next x" | |
852 | gx = x.next() |
|
854 | gx = x.next() | |
853 |
|
855 | |||
854 | def group(self, nodelist, lookup, infocollect=None): |
|
856 | def group(self, nodelist, lookup, infocollect=None): | |
855 | """calculate a delta group |
|
857 | """calculate a delta group | |
856 |
|
858 | |||
857 | Given a list of changeset revs, return a set of deltas and |
|
859 | Given a list of changeset revs, return a set of deltas and | |
858 | metadata corresponding to nodes. the first delta is |
|
860 | metadata corresponding to nodes. the first delta is | |
859 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to |
|
861 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to | |
860 | have this parent as it has all history before these |
|
862 | have this parent as it has all history before these | |
861 | changesets. parent is parent[0] |
|
863 | changesets. parent is parent[0] | |
862 | """ |
|
864 | """ | |
863 | revs = [self.rev(n) for n in nodelist] |
|
865 | revs = [self.rev(n) for n in nodelist] | |
864 |
|
866 | |||
865 | # if we don't have any revisions touched by these changesets, bail |
|
867 | # if we don't have any revisions touched by these changesets, bail | |
866 | if not revs: |
|
868 | if not revs: | |
867 | yield changegroup.closechunk() |
|
869 | yield changegroup.closechunk() | |
868 | return |
|
870 | return | |
869 |
|
871 | |||
870 | # add the parent of the first rev |
|
872 | # add the parent of the first rev | |
871 | p = self.parents(self.node(revs[0]))[0] |
|
873 | p = self.parents(self.node(revs[0]))[0] | |
872 | revs.insert(0, self.rev(p)) |
|
874 | revs.insert(0, self.rev(p)) | |
873 |
|
875 | |||
874 | # build deltas |
|
876 | # build deltas | |
875 | for d in xrange(0, len(revs) - 1): |
|
877 | for d in xrange(0, len(revs) - 1): | |
876 | a, b = revs[d], revs[d + 1] |
|
878 | a, b = revs[d], revs[d + 1] | |
877 | nb = self.node(b) |
|
879 | nb = self.node(b) | |
878 |
|
880 | |||
879 | if infocollect is not None: |
|
881 | if infocollect is not None: | |
880 | infocollect(nb) |
|
882 | infocollect(nb) | |
881 |
|
883 | |||
882 | d = self.revdiff(a, b) |
|
884 | d = self.revdiff(a, b) | |
883 | p = self.parents(nb) |
|
885 | p = self.parents(nb) | |
884 | meta = nb + p[0] + p[1] + lookup(nb) |
|
886 | meta = nb + p[0] + p[1] + lookup(nb) | |
885 | yield changegroup.genchunk("%s%s" % (meta, d)) |
|
887 | yield changegroup.genchunk("%s%s" % (meta, d)) | |
886 |
|
888 | |||
887 | yield changegroup.closechunk() |
|
889 | yield changegroup.closechunk() | |
888 |
|
890 | |||
889 | def addgroup(self, revs, linkmapper, transaction, unique=0): |
|
891 | def addgroup(self, revs, linkmapper, transaction, unique=0): | |
890 | """ |
|
892 | """ | |
891 | add a delta group |
|
893 | add a delta group | |
892 |
|
894 | |||
893 | given a set of deltas, add them to the revision log. the |
|
895 | given a set of deltas, add them to the revision log. the | |
894 | first delta is against its parent, which should be in our |
|
896 | first delta is against its parent, which should be in our | |
895 | log, the rest are against the previous delta. |
|
897 | log, the rest are against the previous delta. | |
896 | """ |
|
898 | """ | |
897 |
|
899 | |||
898 | #track the base of the current delta log |
|
900 | #track the base of the current delta log | |
899 | r = self.count() |
|
901 | r = self.count() | |
900 | t = r - 1 |
|
902 | t = r - 1 | |
901 | node = None |
|
903 | node = None | |
902 |
|
904 | |||
903 | base = prev = -1 |
|
905 | base = prev = -1 | |
904 | start = end = measure = 0 |
|
906 | start = end = measure = 0 | |
905 | if r: |
|
907 | if r: | |
906 | end = self.end(t) |
|
908 | end = self.end(t) | |
907 |
|
909 | |||
908 | ifh = self.opener(self.indexfile, "a+") |
|
910 | ifh = self.opener(self.indexfile, "a+") | |
909 | transaction.add(self.indexfile, ifh.tell()) |
|
911 | transaction.add(self.indexfile, ifh.tell()) | |
910 | if self.inlinedata(): |
|
912 | if self.inlinedata(): | |
911 | dfh = None |
|
913 | dfh = None | |
912 | else: |
|
914 | else: | |
913 | transaction.add(self.datafile, end) |
|
915 | transaction.add(self.datafile, end) | |
914 | dfh = self.opener(self.datafile, "a") |
|
916 | dfh = self.opener(self.datafile, "a") | |
915 |
|
917 | |||
916 | # loop through our set of deltas |
|
918 | # loop through our set of deltas | |
917 | chain = None |
|
919 | chain = None | |
918 | for chunk in revs: |
|
920 | for chunk in revs: | |
919 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
921 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) | |
920 | link = linkmapper(cs) |
|
922 | link = linkmapper(cs) | |
921 | if node in self.nodemap: |
|
923 | if node in self.nodemap: | |
922 | # this can happen if two branches make the same change |
|
924 | # this can happen if two branches make the same change | |
923 | # if unique: |
|
925 | # if unique: | |
924 | # raise RevlogError(_("already have %s") % hex(node[:4])) |
|
926 | # raise RevlogError(_("already have %s") % hex(node[:4])) | |
925 | chain = node |
|
927 | chain = node | |
926 | continue |
|
928 | continue | |
927 | delta = chunk[80:] |
|
929 | delta = chunk[80:] | |
928 |
|
930 | |||
929 | for p in (p1, p2): |
|
931 | for p in (p1, p2): | |
930 | if not p in self.nodemap: |
|
932 | if not p in self.nodemap: | |
931 | raise RevlogError(_("unknown parent %s") % short(p1)) |
|
933 | raise RevlogError(_("unknown parent %s") % short(p1)) | |
932 |
|
934 | |||
933 | if not chain: |
|
935 | if not chain: | |
934 | # retrieve the parent revision of the delta chain |
|
936 | # retrieve the parent revision of the delta chain | |
935 | chain = p1 |
|
937 | chain = p1 | |
936 | if not chain in self.nodemap: |
|
938 | if not chain in self.nodemap: | |
937 | raise RevlogError(_("unknown base %s") % short(chain[:4])) |
|
939 | raise RevlogError(_("unknown base %s") % short(chain[:4])) | |
938 |
|
940 | |||
939 | # full versions are inserted when the needed deltas become |
|
941 | # full versions are inserted when the needed deltas become | |
940 | # comparable to the uncompressed text or when the previous |
|
942 | # comparable to the uncompressed text or when the previous | |
941 | # version is not the one we have a delta against. We use |
|
943 | # version is not the one we have a delta against. We use | |
942 | # the size of the previous full rev as a proxy for the |
|
944 | # the size of the previous full rev as a proxy for the | |
943 | # current size. |
|
945 | # current size. | |
944 |
|
946 | |||
945 | if chain == prev: |
|
947 | if chain == prev: | |
946 | tempd = compress(delta) |
|
948 | tempd = compress(delta) | |
947 | cdelta = tempd[0] + tempd[1] |
|
949 | cdelta = tempd[0] + tempd[1] | |
948 |
|
950 | |||
949 | if chain != prev or (end - start + len(cdelta)) > measure * 2: |
|
951 | if chain != prev or (end - start + len(cdelta)) > measure * 2: | |
950 | # flush our writes here so we can read it in revision |
|
952 | # flush our writes here so we can read it in revision | |
951 | if dfh: |
|
953 | if dfh: | |
952 | dfh.flush() |
|
954 | dfh.flush() | |
953 | ifh.flush() |
|
955 | ifh.flush() | |
954 | text = self.revision(chain) |
|
956 | text = self.revision(chain) | |
955 | text = self.patches(text, [delta]) |
|
957 | text = self.patches(text, [delta]) | |
956 | chk = self.addrevision(text, transaction, link, p1, p2) |
|
958 | chk = self.addrevision(text, transaction, link, p1, p2) | |
957 | if chk != node: |
|
959 | if chk != node: | |
958 | raise RevlogError(_("consistency error adding group")) |
|
960 | raise RevlogError(_("consistency error adding group")) | |
959 | measure = len(text) |
|
961 | measure = len(text) | |
960 | else: |
|
962 | else: | |
961 | if self.version == 0: |
|
963 | if self.version == 0: | |
962 | e = (end, len(cdelta), base, link, p1, p2, node) |
|
964 | e = (end, len(cdelta), base, link, p1, p2, node) | |
963 | else: |
|
965 | else: | |
964 | e = (self.offset_type(end, 0), len(cdelta), -1, base, |
|
966 | e = (self.offset_type(end, 0), len(cdelta), -1, base, | |
965 | link, self.rev(p1), self.rev(p2), node) |
|
967 | link, self.rev(p1), self.rev(p2), node) | |
966 | self.index.append(e) |
|
968 | self.index.append(e) | |
967 | self.nodemap[node] = r |
|
969 | self.nodemap[node] = r | |
968 | if self.inlinedata(): |
|
970 | if self.inlinedata(): | |
969 | ifh.write(struct.pack(self.indexformat, *e)) |
|
971 | ifh.write(struct.pack(self.indexformat, *e)) | |
970 | ifh.write(cdelta) |
|
972 | ifh.write(cdelta) | |
971 | self.checkinlinesize(transaction, ifh) |
|
973 | self.checkinlinesize(transaction, ifh) | |
972 | if not self.inlinedata(): |
|
974 | if not self.inlinedata(): | |
973 | dfh = self.opener(self.datafile, "a") |
|
975 | dfh = self.opener(self.datafile, "a") | |
974 | ifh = self.opener(self.indexfile, "a") |
|
976 | ifh = self.opener(self.indexfile, "a") | |
975 | else: |
|
977 | else: | |
976 | if not dfh: |
|
978 | if not dfh: | |
977 | # addrevision switched from inline to conventional |
|
979 | # addrevision switched from inline to conventional | |
978 | # reopen the index |
|
980 | # reopen the index | |
979 | dfh = self.opener(self.datafile, "a") |
|
981 | dfh = self.opener(self.datafile, "a") | |
980 | ifh = self.opener(self.indexfile, "a") |
|
982 | ifh = self.opener(self.indexfile, "a") | |
981 | dfh.write(cdelta) |
|
983 | dfh.write(cdelta) | |
982 | ifh.write(struct.pack(self.indexformat, *e)) |
|
984 | ifh.write(struct.pack(self.indexformat, *e)) | |
983 |
|
985 | |||
984 | t, r, chain, prev = r, r + 1, node, node |
|
986 | t, r, chain, prev = r, r + 1, node, node | |
985 | base = self.base(t) |
|
987 | base = self.base(t) | |
986 | start = self.start(base) |
|
988 | start = self.start(base) | |
987 | end = self.end(t) |
|
989 | end = self.end(t) | |
988 |
|
990 | |||
989 | if node is None: |
|
991 | if node is None: | |
990 | raise RevlogError(_("group to be added is empty")) |
|
992 | raise RevlogError(_("group to be added is empty")) | |
991 | return node |
|
993 | return node | |
992 |
|
994 | |||
993 | def strip(self, rev, minlink): |
|
995 | def strip(self, rev, minlink): | |
994 | if self.count() == 0 or rev >= self.count(): |
|
996 | if self.count() == 0 or rev >= self.count(): | |
995 | return |
|
997 | return | |
996 |
|
998 | |||
997 | if isinstance(self.index, lazyindex): |
|
999 | if isinstance(self.index, lazyindex): | |
998 | self.loadindexmap() |
|
1000 | self.loadindexmap() | |
999 |
|
1001 | |||
1000 | # When stripping away a revision, we need to make sure it |
|
1002 | # When stripping away a revision, we need to make sure it | |
1001 | # does not actually belong to an older changeset. |
|
1003 | # does not actually belong to an older changeset. | |
1002 | # The minlink parameter defines the oldest revision |
|
1004 | # The minlink parameter defines the oldest revision | |
1003 | # we're allowed to strip away. |
|
1005 | # we're allowed to strip away. | |
1004 | while minlink > self.index[rev][-4]: |
|
1006 | while minlink > self.index[rev][-4]: | |
1005 | rev += 1 |
|
1007 | rev += 1 | |
1006 | if rev >= self.count(): |
|
1008 | if rev >= self.count(): | |
1007 | return |
|
1009 | return | |
1008 |
|
1010 | |||
1009 | # first truncate the files on disk |
|
1011 | # first truncate the files on disk | |
1010 | end = self.start(rev) |
|
1012 | end = self.start(rev) | |
1011 | if not self.inlinedata(): |
|
1013 | if not self.inlinedata(): | |
1012 | df = self.opener(self.datafile, "a") |
|
1014 | df = self.opener(self.datafile, "a") | |
1013 | df.truncate(end) |
|
1015 | df.truncate(end) | |
1014 | end = rev * struct.calcsize(self.indexformat) |
|
1016 | end = rev * struct.calcsize(self.indexformat) | |
1015 | else: |
|
1017 | else: | |
1016 | end += rev * struct.calcsize(self.indexformat) |
|
1018 | end += rev * struct.calcsize(self.indexformat) | |
1017 |
|
1019 | |||
1018 | indexf = self.opener(self.indexfile, "a") |
|
1020 | indexf = self.opener(self.indexfile, "a") | |
1019 | indexf.truncate(end) |
|
1021 | indexf.truncate(end) | |
1020 |
|
1022 | |||
1021 | # then reset internal state in memory to forget those revisions |
|
1023 | # then reset internal state in memory to forget those revisions | |
1022 | self.cache = None |
|
1024 | self.cache = None | |
1023 | self.chunkcache = None |
|
1025 | self.chunkcache = None | |
1024 | for x in xrange(rev, self.count()): |
|
1026 | for x in xrange(rev, self.count()): | |
1025 | del self.nodemap[self.node(x)] |
|
1027 | del self.nodemap[self.node(x)] | |
1026 |
|
1028 | |||
1027 | del self.index[rev:] |
|
1029 | del self.index[rev:] | |
1028 |
|
1030 | |||
1029 | def checksize(self): |
|
1031 | def checksize(self): | |
1030 | expected = 0 |
|
1032 | expected = 0 | |
1031 | if self.count(): |
|
1033 | if self.count(): | |
1032 | expected = self.end(self.count() - 1) |
|
1034 | expected = self.end(self.count() - 1) | |
1033 |
|
1035 | |||
1034 | try: |
|
1036 | try: | |
1035 | f = self.opener(self.datafile) |
|
1037 | f = self.opener(self.datafile) | |
1036 | f.seek(0, 2) |
|
1038 | f.seek(0, 2) | |
1037 | actual = f.tell() |
|
1039 | actual = f.tell() | |
1038 | dd = actual - expected |
|
1040 | dd = actual - expected | |
1039 | except IOError, inst: |
|
1041 | except IOError, inst: | |
1040 | if inst.errno != errno.ENOENT: |
|
1042 | if inst.errno != errno.ENOENT: | |
1041 | raise |
|
1043 | raise | |
1042 | dd = 0 |
|
1044 | dd = 0 | |
1043 |
|
1045 | |||
1044 | try: |
|
1046 | try: | |
1045 | f = self.opener(self.indexfile) |
|
1047 | f = self.opener(self.indexfile) | |
1046 | f.seek(0, 2) |
|
1048 | f.seek(0, 2) | |
1047 | actual = f.tell() |
|
1049 | actual = f.tell() | |
1048 | s = struct.calcsize(self.indexformat) |
|
1050 | s = struct.calcsize(self.indexformat) | |
1049 | i = actual / s |
|
1051 | i = actual / s | |
1050 | di = actual - (i * s) |
|
1052 | di = actual - (i * s) | |
1051 | if self.inlinedata(): |
|
1053 | if self.inlinedata(): | |
1052 | databytes = 0 |
|
1054 | databytes = 0 | |
1053 | for r in xrange(self.count()): |
|
1055 | for r in xrange(self.count()): | |
1054 | databytes += self.length(r) |
|
1056 | databytes += self.length(r) | |
1055 | dd = 0 |
|
1057 | dd = 0 | |
1056 | di = actual - self.count() * s - databytes |
|
1058 | di = actual - self.count() * s - databytes | |
1057 | except IOError, inst: |
|
1059 | except IOError, inst: | |
1058 | if inst.errno != errno.ENOENT: |
|
1060 | if inst.errno != errno.ENOENT: | |
1059 | raise |
|
1061 | raise | |
1060 | di = 0 |
|
1062 | di = 0 | |
1061 |
|
1063 | |||
1062 | return (dd, di) |
|
1064 | return (dd, di) | |
1063 |
|
1065 | |||
1064 |
|
1066 |
@@ -1,817 +1,832 b'' | |||||
1 | """ |
|
1 | """ | |
2 | util.py - Mercurial utility functions and platform specfic implementations |
|
2 | util.py - Mercurial utility functions and platform specfic implementations | |
3 |
|
3 | |||
4 | Copyright 2005 K. Thananchayan <thananck@yahoo.com> |
|
4 | Copyright 2005 K. Thananchayan <thananck@yahoo.com> | |
5 |
|
5 | |||
6 | This software may be used and distributed according to the terms |
|
6 | This software may be used and distributed according to the terms | |
7 | of the GNU General Public License, incorporated herein by reference. |
|
7 | of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | This contains helper routines that are independent of the SCM core and hide |
|
9 | This contains helper routines that are independent of the SCM core and hide | |
10 | platform-specific details from the core. |
|
10 | platform-specific details from the core. | |
11 | """ |
|
11 | """ | |
12 |
|
12 | |||
13 | import os, errno |
|
13 | import os, errno | |
14 | from i18n import gettext as _ |
|
14 | from i18n import gettext as _ | |
15 | from demandload import * |
|
15 | from demandload import * | |
16 | demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile") |
|
16 | demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile") | |
17 | demandload(globals(), "threading time") |
|
17 | demandload(globals(), "threading time") | |
18 |
|
18 | |||
19 | def pipefilter(s, cmd): |
|
19 | def pipefilter(s, cmd): | |
20 | '''filter string S through command CMD, returning its output''' |
|
20 | '''filter string S through command CMD, returning its output''' | |
21 | (pout, pin) = popen2.popen2(cmd, -1, 'b') |
|
21 | (pout, pin) = popen2.popen2(cmd, -1, 'b') | |
22 | def writer(): |
|
22 | def writer(): | |
23 | pin.write(s) |
|
23 | pin.write(s) | |
24 | pin.close() |
|
24 | pin.close() | |
25 |
|
25 | |||
26 | # we should use select instead on UNIX, but this will work on most |
|
26 | # we should use select instead on UNIX, but this will work on most | |
27 | # systems, including Windows |
|
27 | # systems, including Windows | |
28 | w = threading.Thread(target=writer) |
|
28 | w = threading.Thread(target=writer) | |
29 | w.start() |
|
29 | w.start() | |
30 | f = pout.read() |
|
30 | f = pout.read() | |
31 | pout.close() |
|
31 | pout.close() | |
32 | w.join() |
|
32 | w.join() | |
33 | return f |
|
33 | return f | |
34 |
|
34 | |||
35 | def tempfilter(s, cmd): |
|
35 | def tempfilter(s, cmd): | |
36 | '''filter string S through a pair of temporary files with CMD. |
|
36 | '''filter string S through a pair of temporary files with CMD. | |
37 | CMD is used as a template to create the real command to be run, |
|
37 | CMD is used as a template to create the real command to be run, | |
38 | with the strings INFILE and OUTFILE replaced by the real names of |
|
38 | with the strings INFILE and OUTFILE replaced by the real names of | |
39 | the temporary files generated.''' |
|
39 | the temporary files generated.''' | |
40 | inname, outname = None, None |
|
40 | inname, outname = None, None | |
41 | try: |
|
41 | try: | |
42 | infd, inname = tempfile.mkstemp(prefix='hgfin') |
|
42 | infd, inname = tempfile.mkstemp(prefix='hgfin') | |
43 | fp = os.fdopen(infd, 'wb') |
|
43 | fp = os.fdopen(infd, 'wb') | |
44 | fp.write(s) |
|
44 | fp.write(s) | |
45 | fp.close() |
|
45 | fp.close() | |
46 | outfd, outname = tempfile.mkstemp(prefix='hgfout') |
|
46 | outfd, outname = tempfile.mkstemp(prefix='hgfout') | |
47 | os.close(outfd) |
|
47 | os.close(outfd) | |
48 | cmd = cmd.replace('INFILE', inname) |
|
48 | cmd = cmd.replace('INFILE', inname) | |
49 | cmd = cmd.replace('OUTFILE', outname) |
|
49 | cmd = cmd.replace('OUTFILE', outname) | |
50 | code = os.system(cmd) |
|
50 | code = os.system(cmd) | |
51 | if code: raise Abort(_("command '%s' failed: %s") % |
|
51 | if code: raise Abort(_("command '%s' failed: %s") % | |
52 | (cmd, explain_exit(code))) |
|
52 | (cmd, explain_exit(code))) | |
53 | return open(outname, 'rb').read() |
|
53 | return open(outname, 'rb').read() | |
54 | finally: |
|
54 | finally: | |
55 | try: |
|
55 | try: | |
56 | if inname: os.unlink(inname) |
|
56 | if inname: os.unlink(inname) | |
57 | except: pass |
|
57 | except: pass | |
58 | try: |
|
58 | try: | |
59 | if outname: os.unlink(outname) |
|
59 | if outname: os.unlink(outname) | |
60 | except: pass |
|
60 | except: pass | |
61 |
|
61 | |||
62 | filtertable = { |
|
62 | filtertable = { | |
63 | 'tempfile:': tempfilter, |
|
63 | 'tempfile:': tempfilter, | |
64 | 'pipe:': pipefilter, |
|
64 | 'pipe:': pipefilter, | |
65 | } |
|
65 | } | |
66 |
|
66 | |||
67 | def filter(s, cmd): |
|
67 | def filter(s, cmd): | |
68 | "filter a string through a command that transforms its input to its output" |
|
68 | "filter a string through a command that transforms its input to its output" | |
69 | for name, fn in filtertable.iteritems(): |
|
69 | for name, fn in filtertable.iteritems(): | |
70 | if cmd.startswith(name): |
|
70 | if cmd.startswith(name): | |
71 | return fn(s, cmd[len(name):].lstrip()) |
|
71 | return fn(s, cmd[len(name):].lstrip()) | |
72 | return pipefilter(s, cmd) |
|
72 | return pipefilter(s, cmd) | |
73 |
|
73 | |||
74 | def patch(strip, patchname, ui): |
|
74 | def patch(strip, patchname, ui): | |
75 | """apply the patch <patchname> to the working directory. |
|
75 | """apply the patch <patchname> to the working directory. | |
76 | a list of patched files is returned""" |
|
76 | a list of patched files is returned""" | |
77 | fp = os.popen('patch -p%d < "%s"' % (strip, patchname)) |
|
77 | fp = os.popen('patch -p%d < "%s"' % (strip, patchname)) | |
78 | files = {} |
|
78 | files = {} | |
79 | for line in fp: |
|
79 | for line in fp: | |
80 | line = line.rstrip() |
|
80 | line = line.rstrip() | |
81 | ui.status("%s\n" % line) |
|
81 | ui.status("%s\n" % line) | |
82 | if line.startswith('patching file '): |
|
82 | if line.startswith('patching file '): | |
83 | pf = parse_patch_output(line) |
|
83 | pf = parse_patch_output(line) | |
84 | files.setdefault(pf, 1) |
|
84 | files.setdefault(pf, 1) | |
85 | code = fp.close() |
|
85 | code = fp.close() | |
86 | if code: |
|
86 | if code: | |
87 | raise Abort(_("patch command failed: %s") % explain_exit(code)[0]) |
|
87 | raise Abort(_("patch command failed: %s") % explain_exit(code)[0]) | |
88 | return files.keys() |
|
88 | return files.keys() | |
89 |
|
89 | |||
90 | def binary(s): |
|
90 | def binary(s): | |
91 | """return true if a string is binary data using diff's heuristic""" |
|
91 | """return true if a string is binary data using diff's heuristic""" | |
92 | if s and '\0' in s[:4096]: |
|
92 | if s and '\0' in s[:4096]: | |
93 | return True |
|
93 | return True | |
94 | return False |
|
94 | return False | |
95 |
|
95 | |||
96 | def unique(g): |
|
96 | def unique(g): | |
97 | """return the uniq elements of iterable g""" |
|
97 | """return the uniq elements of iterable g""" | |
98 | seen = {} |
|
98 | seen = {} | |
99 | for f in g: |
|
99 | for f in g: | |
100 | if f not in seen: |
|
100 | if f not in seen: | |
101 | seen[f] = 1 |
|
101 | seen[f] = 1 | |
102 | yield f |
|
102 | yield f | |
103 |
|
103 | |||
104 | class Abort(Exception): |
|
104 | class Abort(Exception): | |
105 | """Raised if a command needs to print an error and exit.""" |
|
105 | """Raised if a command needs to print an error and exit.""" | |
106 |
|
106 | |||
107 | def always(fn): return True |
|
107 | def always(fn): return True | |
108 | def never(fn): return False |
|
108 | def never(fn): return False | |
109 |
|
109 | |||
110 | def patkind(name, dflt_pat='glob'): |
|
110 | def patkind(name, dflt_pat='glob'): | |
111 | """Split a string into an optional pattern kind prefix and the |
|
111 | """Split a string into an optional pattern kind prefix and the | |
112 | actual pattern.""" |
|
112 | actual pattern.""" | |
113 | for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre': |
|
113 | for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre': | |
114 | if name.startswith(prefix + ':'): return name.split(':', 1) |
|
114 | if name.startswith(prefix + ':'): return name.split(':', 1) | |
115 | return dflt_pat, name |
|
115 | return dflt_pat, name | |
116 |
|
116 | |||
117 | def globre(pat, head='^', tail='$'): |
|
117 | def globre(pat, head='^', tail='$'): | |
118 | "convert a glob pattern into a regexp" |
|
118 | "convert a glob pattern into a regexp" | |
119 | i, n = 0, len(pat) |
|
119 | i, n = 0, len(pat) | |
120 | res = '' |
|
120 | res = '' | |
121 | group = False |
|
121 | group = False | |
122 | def peek(): return i < n and pat[i] |
|
122 | def peek(): return i < n and pat[i] | |
123 | while i < n: |
|
123 | while i < n: | |
124 | c = pat[i] |
|
124 | c = pat[i] | |
125 | i = i+1 |
|
125 | i = i+1 | |
126 | if c == '*': |
|
126 | if c == '*': | |
127 | if peek() == '*': |
|
127 | if peek() == '*': | |
128 | i += 1 |
|
128 | i += 1 | |
129 | res += '.*' |
|
129 | res += '.*' | |
130 | else: |
|
130 | else: | |
131 | res += '[^/]*' |
|
131 | res += '[^/]*' | |
132 | elif c == '?': |
|
132 | elif c == '?': | |
133 | res += '.' |
|
133 | res += '.' | |
134 | elif c == '[': |
|
134 | elif c == '[': | |
135 | j = i |
|
135 | j = i | |
136 | if j < n and pat[j] in '!]': |
|
136 | if j < n and pat[j] in '!]': | |
137 | j += 1 |
|
137 | j += 1 | |
138 | while j < n and pat[j] != ']': |
|
138 | while j < n and pat[j] != ']': | |
139 | j += 1 |
|
139 | j += 1 | |
140 | if j >= n: |
|
140 | if j >= n: | |
141 | res += '\\[' |
|
141 | res += '\\[' | |
142 | else: |
|
142 | else: | |
143 | stuff = pat[i:j].replace('\\','\\\\') |
|
143 | stuff = pat[i:j].replace('\\','\\\\') | |
144 | i = j + 1 |
|
144 | i = j + 1 | |
145 | if stuff[0] == '!': |
|
145 | if stuff[0] == '!': | |
146 | stuff = '^' + stuff[1:] |
|
146 | stuff = '^' + stuff[1:] | |
147 | elif stuff[0] == '^': |
|
147 | elif stuff[0] == '^': | |
148 | stuff = '\\' + stuff |
|
148 | stuff = '\\' + stuff | |
149 | res = '%s[%s]' % (res, stuff) |
|
149 | res = '%s[%s]' % (res, stuff) | |
150 | elif c == '{': |
|
150 | elif c == '{': | |
151 | group = True |
|
151 | group = True | |
152 | res += '(?:' |
|
152 | res += '(?:' | |
153 | elif c == '}' and group: |
|
153 | elif c == '}' and group: | |
154 | res += ')' |
|
154 | res += ')' | |
155 | group = False |
|
155 | group = False | |
156 | elif c == ',' and group: |
|
156 | elif c == ',' and group: | |
157 | res += '|' |
|
157 | res += '|' | |
158 | elif c == '\\': |
|
158 | elif c == '\\': | |
159 | p = peek() |
|
159 | p = peek() | |
160 | if p: |
|
160 | if p: | |
161 | i += 1 |
|
161 | i += 1 | |
162 | res += re.escape(p) |
|
162 | res += re.escape(p) | |
163 | else: |
|
163 | else: | |
164 | res += re.escape(c) |
|
164 | res += re.escape(c) | |
165 | else: |
|
165 | else: | |
166 | res += re.escape(c) |
|
166 | res += re.escape(c) | |
167 | return head + res + tail |
|
167 | return head + res + tail | |
168 |
|
168 | |||
169 | _globchars = {'[': 1, '{': 1, '*': 1, '?': 1} |
|
169 | _globchars = {'[': 1, '{': 1, '*': 1, '?': 1} | |
170 |
|
170 | |||
171 | def pathto(n1, n2): |
|
171 | def pathto(n1, n2): | |
172 | '''return the relative path from one place to another. |
|
172 | '''return the relative path from one place to another. | |
173 | this returns a path in the form used by the local filesystem, not hg.''' |
|
173 | this returns a path in the form used by the local filesystem, not hg.''' | |
174 | if not n1: return localpath(n2) |
|
174 | if not n1: return localpath(n2) | |
175 | a, b = n1.split('/'), n2.split('/') |
|
175 | a, b = n1.split('/'), n2.split('/') | |
176 | a.reverse() |
|
176 | a.reverse() | |
177 | b.reverse() |
|
177 | b.reverse() | |
178 | while a and b and a[-1] == b[-1]: |
|
178 | while a and b and a[-1] == b[-1]: | |
179 | a.pop() |
|
179 | a.pop() | |
180 | b.pop() |
|
180 | b.pop() | |
181 | b.reverse() |
|
181 | b.reverse() | |
182 | return os.sep.join((['..'] * len(a)) + b) |
|
182 | return os.sep.join((['..'] * len(a)) + b) | |
183 |
|
183 | |||
184 | def canonpath(root, cwd, myname): |
|
184 | def canonpath(root, cwd, myname): | |
185 | """return the canonical path of myname, given cwd and root""" |
|
185 | """return the canonical path of myname, given cwd and root""" | |
186 | if root == os.sep: |
|
186 | if root == os.sep: | |
187 | rootsep = os.sep |
|
187 | rootsep = os.sep | |
188 | else: |
|
188 | else: | |
189 | rootsep = root + os.sep |
|
189 | rootsep = root + os.sep | |
190 | name = myname |
|
190 | name = myname | |
191 | if not name.startswith(os.sep): |
|
191 | if not name.startswith(os.sep): | |
192 | name = os.path.join(root, cwd, name) |
|
192 | name = os.path.join(root, cwd, name) | |
193 | name = os.path.normpath(name) |
|
193 | name = os.path.normpath(name) | |
194 | if name.startswith(rootsep): |
|
194 | if name.startswith(rootsep): | |
195 | name = name[len(rootsep):] |
|
195 | name = name[len(rootsep):] | |
196 | audit_path(name) |
|
196 | audit_path(name) | |
197 | return pconvert(name) |
|
197 | return pconvert(name) | |
198 | elif name == root: |
|
198 | elif name == root: | |
199 | return '' |
|
199 | return '' | |
200 | else: |
|
200 | else: | |
201 | raise Abort('%s not under root' % myname) |
|
201 | raise Abort('%s not under root' % myname) | |
202 |
|
202 | |||
203 | def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None): |
|
203 | def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None): | |
204 | return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src) |
|
204 | return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src) | |
205 |
|
205 | |||
206 | def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None): |
|
206 | def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None): | |
207 | if os.name == 'nt': |
|
207 | if os.name == 'nt': | |
208 | dflt_pat = 'glob' |
|
208 | dflt_pat = 'glob' | |
209 | else: |
|
209 | else: | |
210 | dflt_pat = 'relpath' |
|
210 | dflt_pat = 'relpath' | |
211 | return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src) |
|
211 | return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src) | |
212 |
|
212 | |||
213 | def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src): |
|
213 | def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src): | |
214 | """build a function to match a set of file patterns |
|
214 | """build a function to match a set of file patterns | |
215 |
|
215 | |||
216 | arguments: |
|
216 | arguments: | |
217 | canonroot - the canonical root of the tree you're matching against |
|
217 | canonroot - the canonical root of the tree you're matching against | |
218 | cwd - the current working directory, if relevant |
|
218 | cwd - the current working directory, if relevant | |
219 | names - patterns to find |
|
219 | names - patterns to find | |
220 | inc - patterns to include |
|
220 | inc - patterns to include | |
221 | exc - patterns to exclude |
|
221 | exc - patterns to exclude | |
222 | head - a regex to prepend to patterns to control whether a match is rooted |
|
222 | head - a regex to prepend to patterns to control whether a match is rooted | |
223 |
|
223 | |||
224 | a pattern is one of: |
|
224 | a pattern is one of: | |
225 | 'glob:<rooted glob>' |
|
225 | 'glob:<rooted glob>' | |
226 | 're:<rooted regexp>' |
|
226 | 're:<rooted regexp>' | |
227 | 'path:<rooted path>' |
|
227 | 'path:<rooted path>' | |
228 | 'relglob:<relative glob>' |
|
228 | 'relglob:<relative glob>' | |
229 | 'relpath:<relative path>' |
|
229 | 'relpath:<relative path>' | |
230 | 'relre:<relative regexp>' |
|
230 | 'relre:<relative regexp>' | |
231 | '<rooted path or regexp>' |
|
231 | '<rooted path or regexp>' | |
232 |
|
232 | |||
233 | returns: |
|
233 | returns: | |
234 | a 3-tuple containing |
|
234 | a 3-tuple containing | |
235 | - list of explicit non-pattern names passed in |
|
235 | - list of explicit non-pattern names passed in | |
236 | - a bool match(filename) function |
|
236 | - a bool match(filename) function | |
237 | - a bool indicating if any patterns were passed in |
|
237 | - a bool indicating if any patterns were passed in | |
238 |
|
238 | |||
239 | todo: |
|
239 | todo: | |
240 | make head regex a rooted bool |
|
240 | make head regex a rooted bool | |
241 | """ |
|
241 | """ | |
242 |
|
242 | |||
243 | def contains_glob(name): |
|
243 | def contains_glob(name): | |
244 | for c in name: |
|
244 | for c in name: | |
245 | if c in _globchars: return True |
|
245 | if c in _globchars: return True | |
246 | return False |
|
246 | return False | |
247 |
|
247 | |||
248 | def regex(kind, name, tail): |
|
248 | def regex(kind, name, tail): | |
249 | '''convert a pattern into a regular expression''' |
|
249 | '''convert a pattern into a regular expression''' | |
250 | if kind == 're': |
|
250 | if kind == 're': | |
251 | return name |
|
251 | return name | |
252 | elif kind == 'path': |
|
252 | elif kind == 'path': | |
253 | return '^' + re.escape(name) + '(?:/|$)' |
|
253 | return '^' + re.escape(name) + '(?:/|$)' | |
254 | elif kind == 'relglob': |
|
254 | elif kind == 'relglob': | |
255 | return head + globre(name, '(?:|.*/)', tail) |
|
255 | return head + globre(name, '(?:|.*/)', tail) | |
256 | elif kind == 'relpath': |
|
256 | elif kind == 'relpath': | |
257 | return head + re.escape(name) + tail |
|
257 | return head + re.escape(name) + tail | |
258 | elif kind == 'relre': |
|
258 | elif kind == 'relre': | |
259 | if name.startswith('^'): |
|
259 | if name.startswith('^'): | |
260 | return name |
|
260 | return name | |
261 | return '.*' + name |
|
261 | return '.*' + name | |
262 | return head + globre(name, '', tail) |
|
262 | return head + globre(name, '', tail) | |
263 |
|
263 | |||
264 | def matchfn(pats, tail): |
|
264 | def matchfn(pats, tail): | |
265 | """build a matching function from a set of patterns""" |
|
265 | """build a matching function from a set of patterns""" | |
266 | if not pats: |
|
266 | if not pats: | |
267 | return |
|
267 | return | |
268 | matches = [] |
|
268 | matches = [] | |
269 | for k, p in pats: |
|
269 | for k, p in pats: | |
270 | try: |
|
270 | try: | |
271 | pat = '(?:%s)' % regex(k, p, tail) |
|
271 | pat = '(?:%s)' % regex(k, p, tail) | |
272 | matches.append(re.compile(pat).match) |
|
272 | matches.append(re.compile(pat).match) | |
273 | except re.error: |
|
273 | except re.error: | |
274 | if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p)) |
|
274 | if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p)) | |
275 | else: raise Abort("invalid pattern (%s): %s" % (k, p)) |
|
275 | else: raise Abort("invalid pattern (%s): %s" % (k, p)) | |
276 |
|
276 | |||
277 | def buildfn(text): |
|
277 | def buildfn(text): | |
278 | for m in matches: |
|
278 | for m in matches: | |
279 | r = m(text) |
|
279 | r = m(text) | |
280 | if r: |
|
280 | if r: | |
281 | return r |
|
281 | return r | |
282 |
|
282 | |||
283 | return buildfn |
|
283 | return buildfn | |
284 |
|
284 | |||
285 | def globprefix(pat): |
|
285 | def globprefix(pat): | |
286 | '''return the non-glob prefix of a path, e.g. foo/* -> foo''' |
|
286 | '''return the non-glob prefix of a path, e.g. foo/* -> foo''' | |
287 | root = [] |
|
287 | root = [] | |
288 | for p in pat.split(os.sep): |
|
288 | for p in pat.split(os.sep): | |
289 | if contains_glob(p): break |
|
289 | if contains_glob(p): break | |
290 | root.append(p) |
|
290 | root.append(p) | |
291 | return '/'.join(root) |
|
291 | return '/'.join(root) | |
292 |
|
292 | |||
293 | pats = [] |
|
293 | pats = [] | |
294 | files = [] |
|
294 | files = [] | |
295 | roots = [] |
|
295 | roots = [] | |
296 | for kind, name in [patkind(p, dflt_pat) for p in names]: |
|
296 | for kind, name in [patkind(p, dflt_pat) for p in names]: | |
297 | if kind in ('glob', 'relpath'): |
|
297 | if kind in ('glob', 'relpath'): | |
298 | name = canonpath(canonroot, cwd, name) |
|
298 | name = canonpath(canonroot, cwd, name) | |
299 | if name == '': |
|
299 | if name == '': | |
300 | kind, name = 'glob', '**' |
|
300 | kind, name = 'glob', '**' | |
301 | if kind in ('glob', 'path', 're'): |
|
301 | if kind in ('glob', 'path', 're'): | |
302 | pats.append((kind, name)) |
|
302 | pats.append((kind, name)) | |
303 | if kind == 'glob': |
|
303 | if kind == 'glob': | |
304 | root = globprefix(name) |
|
304 | root = globprefix(name) | |
305 | if root: roots.append(root) |
|
305 | if root: roots.append(root) | |
306 | elif kind == 'relpath': |
|
306 | elif kind == 'relpath': | |
307 | files.append((kind, name)) |
|
307 | files.append((kind, name)) | |
308 | roots.append(name) |
|
308 | roots.append(name) | |
309 |
|
309 | |||
310 | patmatch = matchfn(pats, '$') or always |
|
310 | patmatch = matchfn(pats, '$') or always | |
311 | filematch = matchfn(files, '(?:/|$)') or always |
|
311 | filematch = matchfn(files, '(?:/|$)') or always | |
312 | incmatch = always |
|
312 | incmatch = always | |
313 | if inc: |
|
313 | if inc: | |
314 | incmatch = matchfn(map(patkind, inc), '(?:/|$)') |
|
314 | incmatch = matchfn(map(patkind, inc), '(?:/|$)') | |
315 | excmatch = lambda fn: False |
|
315 | excmatch = lambda fn: False | |
316 | if exc: |
|
316 | if exc: | |
317 | excmatch = matchfn(map(patkind, exc), '(?:/|$)') |
|
317 | excmatch = matchfn(map(patkind, exc), '(?:/|$)') | |
318 |
|
318 | |||
319 | return (roots, |
|
319 | return (roots, | |
320 | lambda fn: (incmatch(fn) and not excmatch(fn) and |
|
320 | lambda fn: (incmatch(fn) and not excmatch(fn) and | |
321 | (fn.endswith('/') or |
|
321 | (fn.endswith('/') or | |
322 | (not pats and not files) or |
|
322 | (not pats and not files) or | |
323 | (pats and patmatch(fn)) or |
|
323 | (pats and patmatch(fn)) or | |
324 | (files and filematch(fn)))), |
|
324 | (files and filematch(fn)))), | |
325 | (inc or exc or (pats and pats != [('glob', '**')])) and True) |
|
325 | (inc or exc or (pats and pats != [('glob', '**')])) and True) | |
326 |
|
326 | |||
327 | def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None): |
|
327 | def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None): | |
328 | '''enhanced shell command execution. |
|
328 | '''enhanced shell command execution. | |
329 | run with environment maybe modified, maybe in different dir. |
|
329 | run with environment maybe modified, maybe in different dir. | |
330 |
|
330 | |||
331 | if command fails and onerr is None, return status. if ui object, |
|
331 | if command fails and onerr is None, return status. if ui object, | |
332 | print error message and return status, else raise onerr object as |
|
332 | print error message and return status, else raise onerr object as | |
333 | exception.''' |
|
333 | exception.''' | |
334 | oldenv = {} |
|
334 | oldenv = {} | |
335 | for k in environ: |
|
335 | for k in environ: | |
336 | oldenv[k] = os.environ.get(k) |
|
336 | oldenv[k] = os.environ.get(k) | |
337 | if cwd is not None: |
|
337 | if cwd is not None: | |
338 | oldcwd = os.getcwd() |
|
338 | oldcwd = os.getcwd() | |
339 | try: |
|
339 | try: | |
340 | for k, v in environ.iteritems(): |
|
340 | for k, v in environ.iteritems(): | |
341 | os.environ[k] = str(v) |
|
341 | os.environ[k] = str(v) | |
342 | if cwd is not None and oldcwd != cwd: |
|
342 | if cwd is not None and oldcwd != cwd: | |
343 | os.chdir(cwd) |
|
343 | os.chdir(cwd) | |
344 | rc = os.system(cmd) |
|
344 | rc = os.system(cmd) | |
345 | if rc and onerr: |
|
345 | if rc and onerr: | |
346 | errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]), |
|
346 | errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]), | |
347 | explain_exit(rc)[0]) |
|
347 | explain_exit(rc)[0]) | |
348 | if errprefix: |
|
348 | if errprefix: | |
349 | errmsg = '%s: %s' % (errprefix, errmsg) |
|
349 | errmsg = '%s: %s' % (errprefix, errmsg) | |
350 | try: |
|
350 | try: | |
351 | onerr.warn(errmsg + '\n') |
|
351 | onerr.warn(errmsg + '\n') | |
352 | except AttributeError: |
|
352 | except AttributeError: | |
353 | raise onerr(errmsg) |
|
353 | raise onerr(errmsg) | |
354 | return rc |
|
354 | return rc | |
355 | finally: |
|
355 | finally: | |
356 | for k, v in oldenv.iteritems(): |
|
356 | for k, v in oldenv.iteritems(): | |
357 | if v is None: |
|
357 | if v is None: | |
358 | del os.environ[k] |
|
358 | del os.environ[k] | |
359 | else: |
|
359 | else: | |
360 | os.environ[k] = v |
|
360 | os.environ[k] = v | |
361 | if cwd is not None and oldcwd != cwd: |
|
361 | if cwd is not None and oldcwd != cwd: | |
362 | os.chdir(oldcwd) |
|
362 | os.chdir(oldcwd) | |
363 |
|
363 | |||
364 | def rename(src, dst): |
|
364 | def rename(src, dst): | |
365 | """forcibly rename a file""" |
|
365 | """forcibly rename a file""" | |
366 | try: |
|
366 | try: | |
367 | os.rename(src, dst) |
|
367 | os.rename(src, dst) | |
368 | except: |
|
368 | except: | |
369 | os.unlink(dst) |
|
369 | os.unlink(dst) | |
370 | os.rename(src, dst) |
|
370 | os.rename(src, dst) | |
371 |
|
371 | |||
372 | def unlink(f): |
|
372 | def unlink(f): | |
373 | """unlink and remove the directory if it is empty""" |
|
373 | """unlink and remove the directory if it is empty""" | |
374 | os.unlink(f) |
|
374 | os.unlink(f) | |
375 | # try removing directories that might now be empty |
|
375 | # try removing directories that might now be empty | |
376 | try: os.removedirs(os.path.dirname(f)) |
|
376 | try: os.removedirs(os.path.dirname(f)) | |
377 | except: pass |
|
377 | except: pass | |
378 |
|
378 | |||
379 | def copyfiles(src, dst, hardlink=None): |
|
379 | def copyfiles(src, dst, hardlink=None): | |
380 | """Copy a directory tree using hardlinks if possible""" |
|
380 | """Copy a directory tree using hardlinks if possible""" | |
381 |
|
381 | |||
382 | if hardlink is None: |
|
382 | if hardlink is None: | |
383 | hardlink = (os.stat(src).st_dev == |
|
383 | hardlink = (os.stat(src).st_dev == | |
384 | os.stat(os.path.dirname(dst)).st_dev) |
|
384 | os.stat(os.path.dirname(dst)).st_dev) | |
385 |
|
385 | |||
386 | if os.path.isdir(src): |
|
386 | if os.path.isdir(src): | |
387 | os.mkdir(dst) |
|
387 | os.mkdir(dst) | |
388 | for name in os.listdir(src): |
|
388 | for name in os.listdir(src): | |
389 | srcname = os.path.join(src, name) |
|
389 | srcname = os.path.join(src, name) | |
390 | dstname = os.path.join(dst, name) |
|
390 | dstname = os.path.join(dst, name) | |
391 | copyfiles(srcname, dstname, hardlink) |
|
391 | copyfiles(srcname, dstname, hardlink) | |
392 | else: |
|
392 | else: | |
393 | if hardlink: |
|
393 | if hardlink: | |
394 | try: |
|
394 | try: | |
395 | os_link(src, dst) |
|
395 | os_link(src, dst) | |
396 | except: |
|
396 | except: | |
397 | hardlink = False |
|
397 | hardlink = False | |
398 | shutil.copy(src, dst) |
|
398 | shutil.copy(src, dst) | |
399 | else: |
|
399 | else: | |
400 | shutil.copy(src, dst) |
|
400 | shutil.copy(src, dst) | |
401 |
|
401 | |||
402 | def audit_path(path): |
|
402 | def audit_path(path): | |
403 | """Abort if path contains dangerous components""" |
|
403 | """Abort if path contains dangerous components""" | |
404 | parts = os.path.normcase(path).split(os.sep) |
|
404 | parts = os.path.normcase(path).split(os.sep) | |
405 | if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '') |
|
405 | if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '') | |
406 | or os.pardir in parts): |
|
406 | or os.pardir in parts): | |
407 | raise Abort(_("path contains illegal component: %s\n") % path) |
|
407 | raise Abort(_("path contains illegal component: %s\n") % path) | |
408 |
|
408 | |||
409 | def opener(base, audit=True): |
|
409 | def opener(base, audit=True): | |
410 | """ |
|
410 | """ | |
411 | return a function that opens files relative to base |
|
411 | return a function that opens files relative to base | |
412 |
|
412 | |||
413 | this function is used to hide the details of COW semantics and |
|
413 | this function is used to hide the details of COW semantics and | |
414 | remote file access from higher level code. |
|
414 | remote file access from higher level code. | |
415 | """ |
|
415 | """ | |
416 | p = base |
|
416 | p = base | |
417 | audit_p = audit |
|
417 | audit_p = audit | |
418 |
|
418 | |||
419 | def mktempcopy(name): |
|
419 | def mktempcopy(name): | |
420 | d, fn = os.path.split(name) |
|
420 | d, fn = os.path.split(name) | |
421 | fd, temp = tempfile.mkstemp(prefix=fn, dir=d) |
|
421 | fd, temp = tempfile.mkstemp(prefix=fn, dir=d) | |
422 | fp = os.fdopen(fd, "wb") |
|
422 | fp = os.fdopen(fd, "wb") | |
423 | try: |
|
423 | try: | |
424 | fp.write(file(name, "rb").read()) |
|
424 | fp.write(file(name, "rb").read()) | |
425 | except: |
|
425 | except: | |
426 | try: os.unlink(temp) |
|
426 | try: os.unlink(temp) | |
427 | except: pass |
|
427 | except: pass | |
428 | raise |
|
428 | raise | |
429 | fp.close() |
|
429 | fp.close() | |
430 | st = os.lstat(name) |
|
430 | st = os.lstat(name) | |
431 | os.chmod(temp, st.st_mode) |
|
431 | os.chmod(temp, st.st_mode) | |
432 | return temp |
|
432 | return temp | |
433 |
|
433 | |||
434 | class atomicfile(file): |
|
434 | class atomictempfile(file): | |
435 |
"""the file will only be copied |
|
435 | """the file will only be copied when rename is called""" | |
436 |
def __init__(self, name, mode |
|
436 | def __init__(self, name, mode): | |
437 | self.__name = name |
|
437 | self.__name = name | |
438 | self.temp = mktempcopy(name) |
|
438 | self.temp = mktempcopy(name) | |
439 | file.__init__(self, self.temp, mode) |
|
439 | file.__init__(self, self.temp, mode) | |
440 |
def |
|
440 | def rename(self): | |
441 | if not self.closed: |
|
441 | if not self.closed: | |
442 | file.close(self) |
|
442 | file.close(self) | |
443 | rename(self.temp, self.__name) |
|
443 | rename(self.temp, self.__name) | |
444 | def __del__(self): |
|
444 | def __del__(self): | |
445 |
self.close |
|
445 | if not self.closed: | |
|
446 | try: | |||
|
447 | os.unlink(self.temp) | |||
|
448 | except: pass | |||
|
449 | file.close(self) | |||
446 |
|
450 | |||
447 | def o(path, mode="r", text=False, atomic=False): |
|
451 | class atomicfile(atomictempfile): | |
|
452 | """the file will only be copied on close""" | |||
|
453 | def __init__(self, name, mode): | |||
|
454 | atomictempfile.__init__(self, name, mode) | |||
|
455 | def close(self): | |||
|
456 | self.rename() | |||
|
457 | def __del__(self): | |||
|
458 | self.rename() | |||
|
459 | ||||
|
460 | def o(path, mode="r", text=False, atomic=False, atomictemp=False): | |||
448 | if audit_p: |
|
461 | if audit_p: | |
449 | audit_path(path) |
|
462 | audit_path(path) | |
450 | f = os.path.join(p, path) |
|
463 | f = os.path.join(p, path) | |
451 |
|
464 | |||
452 | if not text: |
|
465 | if not text: | |
453 | mode += "b" # for that other OS |
|
466 | mode += "b" # for that other OS | |
454 |
|
467 | |||
455 | if mode[0] != "r": |
|
468 | if mode[0] != "r": | |
456 | try: |
|
469 | try: | |
457 | nlink = nlinks(f) |
|
470 | nlink = nlinks(f) | |
458 | except OSError: |
|
471 | except OSError: | |
459 | d = os.path.dirname(f) |
|
472 | d = os.path.dirname(f) | |
460 | if not os.path.isdir(d): |
|
473 | if not os.path.isdir(d): | |
461 | os.makedirs(d) |
|
474 | os.makedirs(d) | |
462 | else: |
|
475 | else: | |
463 | if atomic: |
|
476 | if atomic: | |
464 | return atomicfile(f, mode) |
|
477 | return atomicfile(f, mode) | |
|
478 | elif atomictemp: | |||
|
479 | return atomictempfile(f, mode) | |||
465 | if nlink > 1: |
|
480 | if nlink > 1: | |
466 | rename(mktempcopy(f), f) |
|
481 | rename(mktempcopy(f), f) | |
467 | return file(f, mode) |
|
482 | return file(f, mode) | |
468 |
|
483 | |||
469 | return o |
|
484 | return o | |
470 |
|
485 | |||
471 | def _makelock_file(info, pathname): |
|
486 | def _makelock_file(info, pathname): | |
472 | ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL) |
|
487 | ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL) | |
473 | os.write(ld, info) |
|
488 | os.write(ld, info) | |
474 | os.close(ld) |
|
489 | os.close(ld) | |
475 |
|
490 | |||
476 | def _readlock_file(pathname): |
|
491 | def _readlock_file(pathname): | |
477 | return file(pathname).read() |
|
492 | return file(pathname).read() | |
478 |
|
493 | |||
479 | def nlinks(pathname): |
|
494 | def nlinks(pathname): | |
480 | """Return number of hardlinks for the given file.""" |
|
495 | """Return number of hardlinks for the given file.""" | |
481 | return os.stat(pathname).st_nlink |
|
496 | return os.stat(pathname).st_nlink | |
482 |
|
497 | |||
483 | if hasattr(os, 'link'): |
|
498 | if hasattr(os, 'link'): | |
484 | os_link = os.link |
|
499 | os_link = os.link | |
485 | else: |
|
500 | else: | |
486 | def os_link(src, dst): |
|
501 | def os_link(src, dst): | |
487 | raise OSError(0, _("Hardlinks not supported")) |
|
502 | raise OSError(0, _("Hardlinks not supported")) | |
488 |
|
503 | |||
489 | # Platform specific variants |
|
504 | # Platform specific variants | |
490 | if os.name == 'nt': |
|
505 | if os.name == 'nt': | |
491 | demandload(globals(), "msvcrt") |
|
506 | demandload(globals(), "msvcrt") | |
492 | nulldev = 'NUL:' |
|
507 | nulldev = 'NUL:' | |
493 |
|
508 | |||
494 | class winstdout: |
|
509 | class winstdout: | |
495 | '''stdout on windows misbehaves if sent through a pipe''' |
|
510 | '''stdout on windows misbehaves if sent through a pipe''' | |
496 |
|
511 | |||
497 | def __init__(self, fp): |
|
512 | def __init__(self, fp): | |
498 | self.fp = fp |
|
513 | self.fp = fp | |
499 |
|
514 | |||
500 | def __getattr__(self, key): |
|
515 | def __getattr__(self, key): | |
501 | return getattr(self.fp, key) |
|
516 | return getattr(self.fp, key) | |
502 |
|
517 | |||
503 | def close(self): |
|
518 | def close(self): | |
504 | try: |
|
519 | try: | |
505 | self.fp.close() |
|
520 | self.fp.close() | |
506 | except: pass |
|
521 | except: pass | |
507 |
|
522 | |||
508 | def write(self, s): |
|
523 | def write(self, s): | |
509 | try: |
|
524 | try: | |
510 | return self.fp.write(s) |
|
525 | return self.fp.write(s) | |
511 | except IOError, inst: |
|
526 | except IOError, inst: | |
512 | if inst.errno != 0: raise |
|
527 | if inst.errno != 0: raise | |
513 | self.close() |
|
528 | self.close() | |
514 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
529 | raise IOError(errno.EPIPE, 'Broken pipe') | |
515 |
|
530 | |||
516 | sys.stdout = winstdout(sys.stdout) |
|
531 | sys.stdout = winstdout(sys.stdout) | |
517 |
|
532 | |||
518 | def os_rcpath(): |
|
533 | def os_rcpath(): | |
519 | '''return default os-specific hgrc search path''' |
|
534 | '''return default os-specific hgrc search path''' | |
520 | try: |
|
535 | try: | |
521 | import win32api, win32process |
|
536 | import win32api, win32process | |
522 | proc = win32api.GetCurrentProcess() |
|
537 | proc = win32api.GetCurrentProcess() | |
523 | filename = win32process.GetModuleFileNameEx(proc, 0) |
|
538 | filename = win32process.GetModuleFileNameEx(proc, 0) | |
524 | systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') |
|
539 | systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') | |
525 | except ImportError: |
|
540 | except ImportError: | |
526 | systemrc = r'c:\mercurial\mercurial.ini' |
|
541 | systemrc = r'c:\mercurial\mercurial.ini' | |
527 |
|
542 | |||
528 | return [systemrc, |
|
543 | return [systemrc, | |
529 | os.path.join(os.path.expanduser('~'), 'mercurial.ini')] |
|
544 | os.path.join(os.path.expanduser('~'), 'mercurial.ini')] | |
530 |
|
545 | |||
531 | def parse_patch_output(output_line): |
|
546 | def parse_patch_output(output_line): | |
532 | """parses the output produced by patch and returns the file name""" |
|
547 | """parses the output produced by patch and returns the file name""" | |
533 | pf = output_line[14:] |
|
548 | pf = output_line[14:] | |
534 | if pf[0] == '`': |
|
549 | if pf[0] == '`': | |
535 | pf = pf[1:-1] # Remove the quotes |
|
550 | pf = pf[1:-1] # Remove the quotes | |
536 | return pf |
|
551 | return pf | |
537 |
|
552 | |||
538 | try: # Mark Hammond's win32all package allows better functionality on Windows |
|
553 | try: # Mark Hammond's win32all package allows better functionality on Windows | |
539 | import win32api, win32con, win32file, pywintypes |
|
554 | import win32api, win32con, win32file, pywintypes | |
540 |
|
555 | |||
541 | # create hard links using win32file module |
|
556 | # create hard links using win32file module | |
542 | def os_link(src, dst): # NB will only succeed on NTFS |
|
557 | def os_link(src, dst): # NB will only succeed on NTFS | |
543 | win32file.CreateHardLink(dst, src) |
|
558 | win32file.CreateHardLink(dst, src) | |
544 |
|
559 | |||
545 | def nlinks(pathname): |
|
560 | def nlinks(pathname): | |
546 | """Return number of hardlinks for the given file.""" |
|
561 | """Return number of hardlinks for the given file.""" | |
547 | try: |
|
562 | try: | |
548 | fh = win32file.CreateFile(pathname, |
|
563 | fh = win32file.CreateFile(pathname, | |
549 | win32file.GENERIC_READ, win32file.FILE_SHARE_READ, |
|
564 | win32file.GENERIC_READ, win32file.FILE_SHARE_READ, | |
550 | None, win32file.OPEN_EXISTING, 0, None) |
|
565 | None, win32file.OPEN_EXISTING, 0, None) | |
551 | res = win32file.GetFileInformationByHandle(fh) |
|
566 | res = win32file.GetFileInformationByHandle(fh) | |
552 | fh.Close() |
|
567 | fh.Close() | |
553 | return res[7] |
|
568 | return res[7] | |
554 | except: |
|
569 | except: | |
555 | return os.stat(pathname).st_nlink |
|
570 | return os.stat(pathname).st_nlink | |
556 |
|
571 | |||
557 | def testpid(pid): |
|
572 | def testpid(pid): | |
558 | '''return True if pid is still running or unable to |
|
573 | '''return True if pid is still running or unable to | |
559 | determine, False otherwise''' |
|
574 | determine, False otherwise''' | |
560 | try: |
|
575 | try: | |
561 | import win32process, winerror |
|
576 | import win32process, winerror | |
562 | handle = win32api.OpenProcess( |
|
577 | handle = win32api.OpenProcess( | |
563 | win32con.PROCESS_QUERY_INFORMATION, False, pid) |
|
578 | win32con.PROCESS_QUERY_INFORMATION, False, pid) | |
564 | if handle: |
|
579 | if handle: | |
565 | status = win32process.GetExitCodeProcess(handle) |
|
580 | status = win32process.GetExitCodeProcess(handle) | |
566 | return status == win32con.STILL_ACTIVE |
|
581 | return status == win32con.STILL_ACTIVE | |
567 | except pywintypes.error, details: |
|
582 | except pywintypes.error, details: | |
568 | return details[0] != winerror.ERROR_INVALID_PARAMETER |
|
583 | return details[0] != winerror.ERROR_INVALID_PARAMETER | |
569 | return True |
|
584 | return True | |
570 |
|
585 | |||
571 | except ImportError: |
|
586 | except ImportError: | |
572 | def testpid(pid): |
|
587 | def testpid(pid): | |
573 | '''return False if pid dead, True if running or not known''' |
|
588 | '''return False if pid dead, True if running or not known''' | |
574 | return True |
|
589 | return True | |
575 |
|
590 | |||
576 | def is_exec(f, last): |
|
591 | def is_exec(f, last): | |
577 | return last |
|
592 | return last | |
578 |
|
593 | |||
579 | def set_exec(f, mode): |
|
594 | def set_exec(f, mode): | |
580 | pass |
|
595 | pass | |
581 |
|
596 | |||
582 | def set_binary(fd): |
|
597 | def set_binary(fd): | |
583 | msvcrt.setmode(fd.fileno(), os.O_BINARY) |
|
598 | msvcrt.setmode(fd.fileno(), os.O_BINARY) | |
584 |
|
599 | |||
585 | def pconvert(path): |
|
600 | def pconvert(path): | |
586 | return path.replace("\\", "/") |
|
601 | return path.replace("\\", "/") | |
587 |
|
602 | |||
588 | def localpath(path): |
|
603 | def localpath(path): | |
589 | return path.replace('/', '\\') |
|
604 | return path.replace('/', '\\') | |
590 |
|
605 | |||
591 | def normpath(path): |
|
606 | def normpath(path): | |
592 | return pconvert(os.path.normpath(path)) |
|
607 | return pconvert(os.path.normpath(path)) | |
593 |
|
608 | |||
594 | makelock = _makelock_file |
|
609 | makelock = _makelock_file | |
595 | readlock = _readlock_file |
|
610 | readlock = _readlock_file | |
596 |
|
611 | |||
597 | def explain_exit(code): |
|
612 | def explain_exit(code): | |
598 | return _("exited with status %d") % code, code |
|
613 | return _("exited with status %d") % code, code | |
599 |
|
614 | |||
600 | else: |
|
615 | else: | |
601 | nulldev = '/dev/null' |
|
616 | nulldev = '/dev/null' | |
602 |
|
617 | |||
603 | def rcfiles(path): |
|
618 | def rcfiles(path): | |
604 | rcs = [os.path.join(path, 'hgrc')] |
|
619 | rcs = [os.path.join(path, 'hgrc')] | |
605 | rcdir = os.path.join(path, 'hgrc.d') |
|
620 | rcdir = os.path.join(path, 'hgrc.d') | |
606 | try: |
|
621 | try: | |
607 | rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir) |
|
622 | rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir) | |
608 | if f.endswith(".rc")]) |
|
623 | if f.endswith(".rc")]) | |
609 | except OSError, inst: pass |
|
624 | except OSError, inst: pass | |
610 | return rcs |
|
625 | return rcs | |
611 |
|
626 | |||
612 | def os_rcpath(): |
|
627 | def os_rcpath(): | |
613 | '''return default os-specific hgrc search path''' |
|
628 | '''return default os-specific hgrc search path''' | |
614 | path = [] |
|
629 | path = [] | |
615 | if len(sys.argv) > 0: |
|
630 | if len(sys.argv) > 0: | |
616 | path.extend(rcfiles(os.path.dirname(sys.argv[0]) + |
|
631 | path.extend(rcfiles(os.path.dirname(sys.argv[0]) + | |
617 | '/../etc/mercurial')) |
|
632 | '/../etc/mercurial')) | |
618 | path.extend(rcfiles('/etc/mercurial')) |
|
633 | path.extend(rcfiles('/etc/mercurial')) | |
619 | path.append(os.path.expanduser('~/.hgrc')) |
|
634 | path.append(os.path.expanduser('~/.hgrc')) | |
620 | path = [os.path.normpath(f) for f in path] |
|
635 | path = [os.path.normpath(f) for f in path] | |
621 | return path |
|
636 | return path | |
622 |
|
637 | |||
623 | def parse_patch_output(output_line): |
|
638 | def parse_patch_output(output_line): | |
624 | """parses the output produced by patch and returns the file name""" |
|
639 | """parses the output produced by patch and returns the file name""" | |
625 | pf = output_line[14:] |
|
640 | pf = output_line[14:] | |
626 | if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0: |
|
641 | if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0: | |
627 | pf = pf[1:-1] # Remove the quotes |
|
642 | pf = pf[1:-1] # Remove the quotes | |
628 | return pf |
|
643 | return pf | |
629 |
|
644 | |||
630 | def is_exec(f, last): |
|
645 | def is_exec(f, last): | |
631 | """check whether a file is executable""" |
|
646 | """check whether a file is executable""" | |
632 | return (os.stat(f).st_mode & 0100 != 0) |
|
647 | return (os.stat(f).st_mode & 0100 != 0) | |
633 |
|
648 | |||
634 | def set_exec(f, mode): |
|
649 | def set_exec(f, mode): | |
635 | s = os.stat(f).st_mode |
|
650 | s = os.stat(f).st_mode | |
636 | if (s & 0100 != 0) == mode: |
|
651 | if (s & 0100 != 0) == mode: | |
637 | return |
|
652 | return | |
638 | if mode: |
|
653 | if mode: | |
639 | # Turn on +x for every +r bit when making a file executable |
|
654 | # Turn on +x for every +r bit when making a file executable | |
640 | # and obey umask. |
|
655 | # and obey umask. | |
641 | umask = os.umask(0) |
|
656 | umask = os.umask(0) | |
642 | os.umask(umask) |
|
657 | os.umask(umask) | |
643 | os.chmod(f, s | (s & 0444) >> 2 & ~umask) |
|
658 | os.chmod(f, s | (s & 0444) >> 2 & ~umask) | |
644 | else: |
|
659 | else: | |
645 | os.chmod(f, s & 0666) |
|
660 | os.chmod(f, s & 0666) | |
646 |
|
661 | |||
647 | def set_binary(fd): |
|
662 | def set_binary(fd): | |
648 | pass |
|
663 | pass | |
649 |
|
664 | |||
650 | def pconvert(path): |
|
665 | def pconvert(path): | |
651 | return path |
|
666 | return path | |
652 |
|
667 | |||
653 | def localpath(path): |
|
668 | def localpath(path): | |
654 | return path |
|
669 | return path | |
655 |
|
670 | |||
656 | normpath = os.path.normpath |
|
671 | normpath = os.path.normpath | |
657 |
|
672 | |||
658 | def makelock(info, pathname): |
|
673 | def makelock(info, pathname): | |
659 | try: |
|
674 | try: | |
660 | os.symlink(info, pathname) |
|
675 | os.symlink(info, pathname) | |
661 | except OSError, why: |
|
676 | except OSError, why: | |
662 | if why.errno == errno.EEXIST: |
|
677 | if why.errno == errno.EEXIST: | |
663 | raise |
|
678 | raise | |
664 | else: |
|
679 | else: | |
665 | _makelock_file(info, pathname) |
|
680 | _makelock_file(info, pathname) | |
666 |
|
681 | |||
667 | def readlock(pathname): |
|
682 | def readlock(pathname): | |
668 | try: |
|
683 | try: | |
669 | return os.readlink(pathname) |
|
684 | return os.readlink(pathname) | |
670 | except OSError, why: |
|
685 | except OSError, why: | |
671 | if why.errno == errno.EINVAL: |
|
686 | if why.errno == errno.EINVAL: | |
672 | return _readlock_file(pathname) |
|
687 | return _readlock_file(pathname) | |
673 | else: |
|
688 | else: | |
674 | raise |
|
689 | raise | |
675 |
|
690 | |||
676 | def testpid(pid): |
|
691 | def testpid(pid): | |
677 | '''return False if pid dead, True if running or not sure''' |
|
692 | '''return False if pid dead, True if running or not sure''' | |
678 | try: |
|
693 | try: | |
679 | os.kill(pid, 0) |
|
694 | os.kill(pid, 0) | |
680 | return True |
|
695 | return True | |
681 | except OSError, inst: |
|
696 | except OSError, inst: | |
682 | return inst.errno != errno.ESRCH |
|
697 | return inst.errno != errno.ESRCH | |
683 |
|
698 | |||
684 | def explain_exit(code): |
|
699 | def explain_exit(code): | |
685 | """return a 2-tuple (desc, code) describing a process's status""" |
|
700 | """return a 2-tuple (desc, code) describing a process's status""" | |
686 | if os.WIFEXITED(code): |
|
701 | if os.WIFEXITED(code): | |
687 | val = os.WEXITSTATUS(code) |
|
702 | val = os.WEXITSTATUS(code) | |
688 | return _("exited with status %d") % val, val |
|
703 | return _("exited with status %d") % val, val | |
689 | elif os.WIFSIGNALED(code): |
|
704 | elif os.WIFSIGNALED(code): | |
690 | val = os.WTERMSIG(code) |
|
705 | val = os.WTERMSIG(code) | |
691 | return _("killed by signal %d") % val, val |
|
706 | return _("killed by signal %d") % val, val | |
692 | elif os.WIFSTOPPED(code): |
|
707 | elif os.WIFSTOPPED(code): | |
693 | val = os.WSTOPSIG(code) |
|
708 | val = os.WSTOPSIG(code) | |
694 | return _("stopped by signal %d") % val, val |
|
709 | return _("stopped by signal %d") % val, val | |
695 | raise ValueError(_("invalid exit code")) |
|
710 | raise ValueError(_("invalid exit code")) | |
696 |
|
711 | |||
697 | class chunkbuffer(object): |
|
712 | class chunkbuffer(object): | |
698 | """Allow arbitrary sized chunks of data to be efficiently read from an |
|
713 | """Allow arbitrary sized chunks of data to be efficiently read from an | |
699 | iterator over chunks of arbitrary size.""" |
|
714 | iterator over chunks of arbitrary size.""" | |
700 |
|
715 | |||
701 | def __init__(self, in_iter, targetsize = 2**16): |
|
716 | def __init__(self, in_iter, targetsize = 2**16): | |
702 | """in_iter is the iterator that's iterating over the input chunks. |
|
717 | """in_iter is the iterator that's iterating over the input chunks. | |
703 | targetsize is how big a buffer to try to maintain.""" |
|
718 | targetsize is how big a buffer to try to maintain.""" | |
704 | self.in_iter = iter(in_iter) |
|
719 | self.in_iter = iter(in_iter) | |
705 | self.buf = '' |
|
720 | self.buf = '' | |
706 | self.targetsize = int(targetsize) |
|
721 | self.targetsize = int(targetsize) | |
707 | if self.targetsize <= 0: |
|
722 | if self.targetsize <= 0: | |
708 | raise ValueError(_("targetsize must be greater than 0, was %d") % |
|
723 | raise ValueError(_("targetsize must be greater than 0, was %d") % | |
709 | targetsize) |
|
724 | targetsize) | |
710 | self.iterempty = False |
|
725 | self.iterempty = False | |
711 |
|
726 | |||
712 | def fillbuf(self): |
|
727 | def fillbuf(self): | |
713 | """Ignore target size; read every chunk from iterator until empty.""" |
|
728 | """Ignore target size; read every chunk from iterator until empty.""" | |
714 | if not self.iterempty: |
|
729 | if not self.iterempty: | |
715 | collector = cStringIO.StringIO() |
|
730 | collector = cStringIO.StringIO() | |
716 | collector.write(self.buf) |
|
731 | collector.write(self.buf) | |
717 | for ch in self.in_iter: |
|
732 | for ch in self.in_iter: | |
718 | collector.write(ch) |
|
733 | collector.write(ch) | |
719 | self.buf = collector.getvalue() |
|
734 | self.buf = collector.getvalue() | |
720 | self.iterempty = True |
|
735 | self.iterempty = True | |
721 |
|
736 | |||
722 | def read(self, l): |
|
737 | def read(self, l): | |
723 | """Read L bytes of data from the iterator of chunks of data. |
|
738 | """Read L bytes of data from the iterator of chunks of data. | |
724 | Returns less than L bytes if the iterator runs dry.""" |
|
739 | Returns less than L bytes if the iterator runs dry.""" | |
725 | if l > len(self.buf) and not self.iterempty: |
|
740 | if l > len(self.buf) and not self.iterempty: | |
726 | # Clamp to a multiple of self.targetsize |
|
741 | # Clamp to a multiple of self.targetsize | |
727 | targetsize = self.targetsize * ((l // self.targetsize) + 1) |
|
742 | targetsize = self.targetsize * ((l // self.targetsize) + 1) | |
728 | collector = cStringIO.StringIO() |
|
743 | collector = cStringIO.StringIO() | |
729 | collector.write(self.buf) |
|
744 | collector.write(self.buf) | |
730 | collected = len(self.buf) |
|
745 | collected = len(self.buf) | |
731 | for chunk in self.in_iter: |
|
746 | for chunk in self.in_iter: | |
732 | collector.write(chunk) |
|
747 | collector.write(chunk) | |
733 | collected += len(chunk) |
|
748 | collected += len(chunk) | |
734 | if collected >= targetsize: |
|
749 | if collected >= targetsize: | |
735 | break |
|
750 | break | |
736 | if collected < targetsize: |
|
751 | if collected < targetsize: | |
737 | self.iterempty = True |
|
752 | self.iterempty = True | |
738 | self.buf = collector.getvalue() |
|
753 | self.buf = collector.getvalue() | |
739 | s, self.buf = self.buf[:l], buffer(self.buf, l) |
|
754 | s, self.buf = self.buf[:l], buffer(self.buf, l) | |
740 | return s |
|
755 | return s | |
741 |
|
756 | |||
742 | def filechunkiter(f, size = 65536): |
|
757 | def filechunkiter(f, size = 65536): | |
743 | """Create a generator that produces all the data in the file size |
|
758 | """Create a generator that produces all the data in the file size | |
744 | (default 65536) bytes at a time. Chunks may be less than size |
|
759 | (default 65536) bytes at a time. Chunks may be less than size | |
745 | bytes if the chunk is the last chunk in the file, or the file is a |
|
760 | bytes if the chunk is the last chunk in the file, or the file is a | |
746 | socket or some other type of file that sometimes reads less data |
|
761 | socket or some other type of file that sometimes reads less data | |
747 | than is requested.""" |
|
762 | than is requested.""" | |
748 | s = f.read(size) |
|
763 | s = f.read(size) | |
749 | while len(s) > 0: |
|
764 | while len(s) > 0: | |
750 | yield s |
|
765 | yield s | |
751 | s = f.read(size) |
|
766 | s = f.read(size) | |
752 |
|
767 | |||
753 | def makedate(): |
|
768 | def makedate(): | |
754 | lt = time.localtime() |
|
769 | lt = time.localtime() | |
755 | if lt[8] == 1 and time.daylight: |
|
770 | if lt[8] == 1 and time.daylight: | |
756 | tz = time.altzone |
|
771 | tz = time.altzone | |
757 | else: |
|
772 | else: | |
758 | tz = time.timezone |
|
773 | tz = time.timezone | |
759 | return time.mktime(lt), tz |
|
774 | return time.mktime(lt), tz | |
760 |
|
775 | |||
761 | def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True): |
|
776 | def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True): | |
762 | """represent a (unixtime, offset) tuple as a localized time. |
|
777 | """represent a (unixtime, offset) tuple as a localized time. | |
763 | unixtime is seconds since the epoch, and offset is the time zone's |
|
778 | unixtime is seconds since the epoch, and offset is the time zone's | |
764 | number of seconds away from UTC. if timezone is false, do not |
|
779 | number of seconds away from UTC. if timezone is false, do not | |
765 | append time zone to string.""" |
|
780 | append time zone to string.""" | |
766 | t, tz = date or makedate() |
|
781 | t, tz = date or makedate() | |
767 | s = time.strftime(format, time.gmtime(float(t) - tz)) |
|
782 | s = time.strftime(format, time.gmtime(float(t) - tz)) | |
768 | if timezone: |
|
783 | if timezone: | |
769 | s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60)) |
|
784 | s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60)) | |
770 | return s |
|
785 | return s | |
771 |
|
786 | |||
772 | def shortuser(user): |
|
787 | def shortuser(user): | |
773 | """Return a short representation of a user name or email address.""" |
|
788 | """Return a short representation of a user name or email address.""" | |
774 | f = user.find('@') |
|
789 | f = user.find('@') | |
775 | if f >= 0: |
|
790 | if f >= 0: | |
776 | user = user[:f] |
|
791 | user = user[:f] | |
777 | f = user.find('<') |
|
792 | f = user.find('<') | |
778 | if f >= 0: |
|
793 | if f >= 0: | |
779 | user = user[f+1:] |
|
794 | user = user[f+1:] | |
780 | return user |
|
795 | return user | |
781 |
|
796 | |||
782 | def walkrepos(path): |
|
797 | def walkrepos(path): | |
783 | '''yield every hg repository under path, recursively.''' |
|
798 | '''yield every hg repository under path, recursively.''' | |
784 | def errhandler(err): |
|
799 | def errhandler(err): | |
785 | if err.filename == path: |
|
800 | if err.filename == path: | |
786 | raise err |
|
801 | raise err | |
787 |
|
802 | |||
788 | for root, dirs, files in os.walk(path, onerror=errhandler): |
|
803 | for root, dirs, files in os.walk(path, onerror=errhandler): | |
789 | for d in dirs: |
|
804 | for d in dirs: | |
790 | if d == '.hg': |
|
805 | if d == '.hg': | |
791 | yield root |
|
806 | yield root | |
792 | dirs[:] = [] |
|
807 | dirs[:] = [] | |
793 | break |
|
808 | break | |
794 |
|
809 | |||
795 | _rcpath = None |
|
810 | _rcpath = None | |
796 |
|
811 | |||
797 | def rcpath(): |
|
812 | def rcpath(): | |
798 | '''return hgrc search path. if env var HGRCPATH is set, use it. |
|
813 | '''return hgrc search path. if env var HGRCPATH is set, use it. | |
799 | for each item in path, if directory, use files ending in .rc, |
|
814 | for each item in path, if directory, use files ending in .rc, | |
800 | else use item. |
|
815 | else use item. | |
801 | make HGRCPATH empty to only look in .hg/hgrc of current repo. |
|
816 | make HGRCPATH empty to only look in .hg/hgrc of current repo. | |
802 | if no HGRCPATH, use default os-specific path.''' |
|
817 | if no HGRCPATH, use default os-specific path.''' | |
803 | global _rcpath |
|
818 | global _rcpath | |
804 | if _rcpath is None: |
|
819 | if _rcpath is None: | |
805 | if 'HGRCPATH' in os.environ: |
|
820 | if 'HGRCPATH' in os.environ: | |
806 | _rcpath = [] |
|
821 | _rcpath = [] | |
807 | for p in os.environ['HGRCPATH'].split(os.pathsep): |
|
822 | for p in os.environ['HGRCPATH'].split(os.pathsep): | |
808 | if not p: continue |
|
823 | if not p: continue | |
809 | if os.path.isdir(p): |
|
824 | if os.path.isdir(p): | |
810 | for f in os.listdir(p): |
|
825 | for f in os.listdir(p): | |
811 | if f.endswith('.rc'): |
|
826 | if f.endswith('.rc'): | |
812 | _rcpath.append(os.path.join(p, f)) |
|
827 | _rcpath.append(os.path.join(p, f)) | |
813 | else: |
|
828 | else: | |
814 | _rcpath.append(p) |
|
829 | _rcpath.append(p) | |
815 | else: |
|
830 | else: | |
816 | _rcpath = os_rcpath() |
|
831 | _rcpath = os_rcpath() | |
817 | return _rcpath |
|
832 | return _rcpath |
General Comments 0
You need to be logged in to leave comments.
Login now