##// END OF EJS Templates
put license and copyright info into comment blocks
Martin Geisler -
r8226:8b2cd04a default
parent child Browse files
Show More
@@ -1,298 +1,296 b''
1 """
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 bundlerepo.py - repository class for viewing uncompressed bundles
2 #
3
3 # This provides a read-only repository interface to bundles as if
4 This provides a read-only repository interface to bundles as if
4 # they were part of the actual repository.
5 they were part of the actual repository.
5 #
6
6 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
7 #
8
8 # This software may be used and distributed according to the terms of the
9 This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2, incorporated herein by reference.
10 GNU General Public License version 2, incorporated herein by reference.
11 """
12
10
13 from node import nullid
11 from node import nullid
14 from i18n import _
12 from i18n import _
15 import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff
13 import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff
16 import localrepo, changelog, manifest, filelog, revlog, error
14 import localrepo, changelog, manifest, filelog, revlog, error
17
15
18 class bundlerevlog(revlog.revlog):
16 class bundlerevlog(revlog.revlog):
19 def __init__(self, opener, indexfile, bundlefile,
17 def __init__(self, opener, indexfile, bundlefile,
20 linkmapper=None):
18 linkmapper=None):
21 # How it works:
19 # How it works:
22 # to retrieve a revision, we need to know the offset of
20 # to retrieve a revision, we need to know the offset of
23 # the revision in the bundlefile (an opened file).
21 # the revision in the bundlefile (an opened file).
24 #
22 #
25 # We store this offset in the index (start), to differentiate a
23 # We store this offset in the index (start), to differentiate a
26 # rev in the bundle and from a rev in the revlog, we check
24 # rev in the bundle and from a rev in the revlog, we check
27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
25 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 # (it is bigger since we store the node to which the delta is)
26 # (it is bigger since we store the node to which the delta is)
29 #
27 #
30 revlog.revlog.__init__(self, opener, indexfile)
28 revlog.revlog.__init__(self, opener, indexfile)
31 self.bundlefile = bundlefile
29 self.bundlefile = bundlefile
32 self.basemap = {}
30 self.basemap = {}
33 def chunkpositer():
31 def chunkpositer():
34 for chunk in changegroup.chunkiter(bundlefile):
32 for chunk in changegroup.chunkiter(bundlefile):
35 pos = bundlefile.tell()
33 pos = bundlefile.tell()
36 yield chunk, pos - len(chunk)
34 yield chunk, pos - len(chunk)
37 n = len(self)
35 n = len(self)
38 prev = None
36 prev = None
39 for chunk, start in chunkpositer():
37 for chunk, start in chunkpositer():
40 size = len(chunk)
38 size = len(chunk)
41 if size < 80:
39 if size < 80:
42 raise util.Abort(_("invalid changegroup"))
40 raise util.Abort(_("invalid changegroup"))
43 start += 80
41 start += 80
44 size -= 80
42 size -= 80
45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
43 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
46 if node in self.nodemap:
44 if node in self.nodemap:
47 prev = node
45 prev = node
48 continue
46 continue
49 for p in (p1, p2):
47 for p in (p1, p2):
50 if not p in self.nodemap:
48 if not p in self.nodemap:
51 raise error.LookupError(p1, self.indexfile,
49 raise error.LookupError(p1, self.indexfile,
52 _("unknown parent"))
50 _("unknown parent"))
53 if linkmapper is None:
51 if linkmapper is None:
54 link = n
52 link = n
55 else:
53 else:
56 link = linkmapper(cs)
54 link = linkmapper(cs)
57
55
58 if not prev:
56 if not prev:
59 prev = p1
57 prev = p1
60 # start, size, full unc. size, base (unused), link, p1, p2, node
58 # start, size, full unc. size, base (unused), link, p1, p2, node
61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
59 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 self.rev(p1), self.rev(p2), node)
60 self.rev(p1), self.rev(p2), node)
63 self.basemap[n] = prev
61 self.basemap[n] = prev
64 self.index.insert(-1, e)
62 self.index.insert(-1, e)
65 self.nodemap[node] = n
63 self.nodemap[node] = n
66 prev = node
64 prev = node
67 n += 1
65 n += 1
68
66
69 def bundle(self, rev):
67 def bundle(self, rev):
70 """is rev from the bundle"""
68 """is rev from the bundle"""
71 if rev < 0:
69 if rev < 0:
72 return False
70 return False
73 return rev in self.basemap
71 return rev in self.basemap
74 def bundlebase(self, rev): return self.basemap[rev]
72 def bundlebase(self, rev): return self.basemap[rev]
75 def chunk(self, rev, df=None, cachelen=4096):
73 def chunk(self, rev, df=None, cachelen=4096):
76 # Warning: in case of bundle, the diff is against bundlebase,
74 # Warning: in case of bundle, the diff is against bundlebase,
77 # not against rev - 1
75 # not against rev - 1
78 # XXX: could use some caching
76 # XXX: could use some caching
79 if not self.bundle(rev):
77 if not self.bundle(rev):
80 return revlog.revlog.chunk(self, rev, df)
78 return revlog.revlog.chunk(self, rev, df)
81 self.bundlefile.seek(self.start(rev))
79 self.bundlefile.seek(self.start(rev))
82 return self.bundlefile.read(self.length(rev))
80 return self.bundlefile.read(self.length(rev))
83
81
84 def revdiff(self, rev1, rev2):
82 def revdiff(self, rev1, rev2):
85 """return or calculate a delta between two revisions"""
83 """return or calculate a delta between two revisions"""
86 if self.bundle(rev1) and self.bundle(rev2):
84 if self.bundle(rev1) and self.bundle(rev2):
87 # hot path for bundle
85 # hot path for bundle
88 revb = self.rev(self.bundlebase(rev2))
86 revb = self.rev(self.bundlebase(rev2))
89 if revb == rev1:
87 if revb == rev1:
90 return self.chunk(rev2)
88 return self.chunk(rev2)
91 elif not self.bundle(rev1) and not self.bundle(rev2):
89 elif not self.bundle(rev1) and not self.bundle(rev2):
92 return revlog.revlog.revdiff(self, rev1, rev2)
90 return revlog.revlog.revdiff(self, rev1, rev2)
93
91
94 return mdiff.textdiff(self.revision(self.node(rev1)),
92 return mdiff.textdiff(self.revision(self.node(rev1)),
95 self.revision(self.node(rev2)))
93 self.revision(self.node(rev2)))
96
94
97 def revision(self, node):
95 def revision(self, node):
98 """return an uncompressed revision of a given"""
96 """return an uncompressed revision of a given"""
99 if node == nullid: return ""
97 if node == nullid: return ""
100
98
101 text = None
99 text = None
102 chain = []
100 chain = []
103 iter_node = node
101 iter_node = node
104 rev = self.rev(iter_node)
102 rev = self.rev(iter_node)
105 # reconstruct the revision if it is from a changegroup
103 # reconstruct the revision if it is from a changegroup
106 while self.bundle(rev):
104 while self.bundle(rev):
107 if self._cache and self._cache[0] == iter_node:
105 if self._cache and self._cache[0] == iter_node:
108 text = self._cache[2]
106 text = self._cache[2]
109 break
107 break
110 chain.append(rev)
108 chain.append(rev)
111 iter_node = self.bundlebase(rev)
109 iter_node = self.bundlebase(rev)
112 rev = self.rev(iter_node)
110 rev = self.rev(iter_node)
113 if text is None:
111 if text is None:
114 text = revlog.revlog.revision(self, iter_node)
112 text = revlog.revlog.revision(self, iter_node)
115
113
116 while chain:
114 while chain:
117 delta = self.chunk(chain.pop())
115 delta = self.chunk(chain.pop())
118 text = mdiff.patches(text, [delta])
116 text = mdiff.patches(text, [delta])
119
117
120 p1, p2 = self.parents(node)
118 p1, p2 = self.parents(node)
121 if node != revlog.hash(text, p1, p2):
119 if node != revlog.hash(text, p1, p2):
122 raise error.RevlogError(_("integrity check failed on %s:%d")
120 raise error.RevlogError(_("integrity check failed on %s:%d")
123 % (self.datafile, self.rev(node)))
121 % (self.datafile, self.rev(node)))
124
122
125 self._cache = (node, self.rev(node), text)
123 self._cache = (node, self.rev(node), text)
126 return text
124 return text
127
125
128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
126 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 raise NotImplementedError
127 raise NotImplementedError
130 def addgroup(self, revs, linkmapper, transaction):
128 def addgroup(self, revs, linkmapper, transaction):
131 raise NotImplementedError
129 raise NotImplementedError
132 def strip(self, rev, minlink):
130 def strip(self, rev, minlink):
133 raise NotImplementedError
131 raise NotImplementedError
134 def checksize(self):
132 def checksize(self):
135 raise NotImplementedError
133 raise NotImplementedError
136
134
137 class bundlechangelog(bundlerevlog, changelog.changelog):
135 class bundlechangelog(bundlerevlog, changelog.changelog):
138 def __init__(self, opener, bundlefile):
136 def __init__(self, opener, bundlefile):
139 changelog.changelog.__init__(self, opener)
137 changelog.changelog.__init__(self, opener)
140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
138 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
141
139
142 class bundlemanifest(bundlerevlog, manifest.manifest):
140 class bundlemanifest(bundlerevlog, manifest.manifest):
143 def __init__(self, opener, bundlefile, linkmapper):
141 def __init__(self, opener, bundlefile, linkmapper):
144 manifest.manifest.__init__(self, opener)
142 manifest.manifest.__init__(self, opener)
145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
143 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
146 linkmapper)
144 linkmapper)
147
145
148 class bundlefilelog(bundlerevlog, filelog.filelog):
146 class bundlefilelog(bundlerevlog, filelog.filelog):
149 def __init__(self, opener, path, bundlefile, linkmapper):
147 def __init__(self, opener, path, bundlefile, linkmapper):
150 filelog.filelog.__init__(self, opener, path)
148 filelog.filelog.__init__(self, opener, path)
151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
149 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
152 linkmapper)
150 linkmapper)
153
151
154 class bundlerepository(localrepo.localrepository):
152 class bundlerepository(localrepo.localrepository):
155 def __init__(self, ui, path, bundlename):
153 def __init__(self, ui, path, bundlename):
156 self._tempparent = None
154 self._tempparent = None
157 try:
155 try:
158 localrepo.localrepository.__init__(self, ui, path)
156 localrepo.localrepository.__init__(self, ui, path)
159 except error.RepoError:
157 except error.RepoError:
160 self._tempparent = tempfile.mkdtemp()
158 self._tempparent = tempfile.mkdtemp()
161 localrepo.instance(ui,self._tempparent,1)
159 localrepo.instance(ui,self._tempparent,1)
162 localrepo.localrepository.__init__(self, ui, self._tempparent)
160 localrepo.localrepository.__init__(self, ui, self._tempparent)
163
161
164 if path:
162 if path:
165 self._url = 'bundle:' + path + '+' + bundlename
163 self._url = 'bundle:' + path + '+' + bundlename
166 else:
164 else:
167 self._url = 'bundle:' + bundlename
165 self._url = 'bundle:' + bundlename
168
166
169 self.tempfile = None
167 self.tempfile = None
170 self.bundlefile = open(bundlename, "rb")
168 self.bundlefile = open(bundlename, "rb")
171 header = self.bundlefile.read(6)
169 header = self.bundlefile.read(6)
172 if not header.startswith("HG"):
170 if not header.startswith("HG"):
173 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
171 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
174 elif not header.startswith("HG10"):
172 elif not header.startswith("HG10"):
175 raise util.Abort(_("%s: unknown bundle version") % bundlename)
173 raise util.Abort(_("%s: unknown bundle version") % bundlename)
176 elif (header == "HG10BZ") or (header == "HG10GZ"):
174 elif (header == "HG10BZ") or (header == "HG10GZ"):
177 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
175 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
178 suffix=".hg10un", dir=self.path)
176 suffix=".hg10un", dir=self.path)
179 self.tempfile = temp
177 self.tempfile = temp
180 fptemp = os.fdopen(fdtemp, 'wb')
178 fptemp = os.fdopen(fdtemp, 'wb')
181 def generator(f):
179 def generator(f):
182 if header == "HG10BZ":
180 if header == "HG10BZ":
183 zd = bz2.BZ2Decompressor()
181 zd = bz2.BZ2Decompressor()
184 zd.decompress("BZ")
182 zd.decompress("BZ")
185 elif header == "HG10GZ":
183 elif header == "HG10GZ":
186 zd = zlib.decompressobj()
184 zd = zlib.decompressobj()
187 for chunk in f:
185 for chunk in f:
188 yield zd.decompress(chunk)
186 yield zd.decompress(chunk)
189 gen = generator(util.filechunkiter(self.bundlefile, 4096))
187 gen = generator(util.filechunkiter(self.bundlefile, 4096))
190
188
191 try:
189 try:
192 fptemp.write("HG10UN")
190 fptemp.write("HG10UN")
193 for chunk in gen:
191 for chunk in gen:
194 fptemp.write(chunk)
192 fptemp.write(chunk)
195 finally:
193 finally:
196 fptemp.close()
194 fptemp.close()
197 self.bundlefile.close()
195 self.bundlefile.close()
198
196
199 self.bundlefile = open(self.tempfile, "rb")
197 self.bundlefile = open(self.tempfile, "rb")
200 # seek right after the header
198 # seek right after the header
201 self.bundlefile.seek(6)
199 self.bundlefile.seek(6)
202 elif header == "HG10UN":
200 elif header == "HG10UN":
203 # nothing to do
201 # nothing to do
204 pass
202 pass
205 else:
203 else:
206 raise util.Abort(_("%s: unknown bundle compression type")
204 raise util.Abort(_("%s: unknown bundle compression type")
207 % bundlename)
205 % bundlename)
208 # dict with the mapping 'filename' -> position in the bundle
206 # dict with the mapping 'filename' -> position in the bundle
209 self.bundlefilespos = {}
207 self.bundlefilespos = {}
210
208
211 def __getattr__(self, name):
209 def __getattr__(self, name):
212 if name == 'changelog':
210 if name == 'changelog':
213 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
211 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
214 self.manstart = self.bundlefile.tell()
212 self.manstart = self.bundlefile.tell()
215 return self.changelog
213 return self.changelog
216 elif name == 'manifest':
214 elif name == 'manifest':
217 self.bundlefile.seek(self.manstart)
215 self.bundlefile.seek(self.manstart)
218 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
216 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
219 self.changelog.rev)
217 self.changelog.rev)
220 self.filestart = self.bundlefile.tell()
218 self.filestart = self.bundlefile.tell()
221 return self.manifest
219 return self.manifest
222 elif name == 'manstart':
220 elif name == 'manstart':
223 self.changelog
221 self.changelog
224 return self.manstart
222 return self.manstart
225 elif name == 'filestart':
223 elif name == 'filestart':
226 self.manifest
224 self.manifest
227 return self.filestart
225 return self.filestart
228 else:
226 else:
229 return localrepo.localrepository.__getattr__(self, name)
227 return localrepo.localrepository.__getattr__(self, name)
230
228
231 def url(self):
229 def url(self):
232 return self._url
230 return self._url
233
231
234 def file(self, f):
232 def file(self, f):
235 if not self.bundlefilespos:
233 if not self.bundlefilespos:
236 self.bundlefile.seek(self.filestart)
234 self.bundlefile.seek(self.filestart)
237 while 1:
235 while 1:
238 chunk = changegroup.getchunk(self.bundlefile)
236 chunk = changegroup.getchunk(self.bundlefile)
239 if not chunk:
237 if not chunk:
240 break
238 break
241 self.bundlefilespos[chunk] = self.bundlefile.tell()
239 self.bundlefilespos[chunk] = self.bundlefile.tell()
242 for c in changegroup.chunkiter(self.bundlefile):
240 for c in changegroup.chunkiter(self.bundlefile):
243 pass
241 pass
244
242
245 if f[0] == '/':
243 if f[0] == '/':
246 f = f[1:]
244 f = f[1:]
247 if f in self.bundlefilespos:
245 if f in self.bundlefilespos:
248 self.bundlefile.seek(self.bundlefilespos[f])
246 self.bundlefile.seek(self.bundlefilespos[f])
249 return bundlefilelog(self.sopener, f, self.bundlefile,
247 return bundlefilelog(self.sopener, f, self.bundlefile,
250 self.changelog.rev)
248 self.changelog.rev)
251 else:
249 else:
252 return filelog.filelog(self.sopener, f)
250 return filelog.filelog(self.sopener, f)
253
251
254 def close(self):
252 def close(self):
255 """Close assigned bundle file immediately."""
253 """Close assigned bundle file immediately."""
256 self.bundlefile.close()
254 self.bundlefile.close()
257
255
258 def __del__(self):
256 def __del__(self):
259 bundlefile = getattr(self, 'bundlefile', None)
257 bundlefile = getattr(self, 'bundlefile', None)
260 if bundlefile and not bundlefile.closed:
258 if bundlefile and not bundlefile.closed:
261 bundlefile.close()
259 bundlefile.close()
262 tempfile = getattr(self, 'tempfile', None)
260 tempfile = getattr(self, 'tempfile', None)
263 if tempfile is not None:
261 if tempfile is not None:
264 os.unlink(tempfile)
262 os.unlink(tempfile)
265 if self._tempparent:
263 if self._tempparent:
266 shutil.rmtree(self._tempparent, True)
264 shutil.rmtree(self._tempparent, True)
267
265
268 def cancopy(self):
266 def cancopy(self):
269 return False
267 return False
270
268
271 def getcwd(self):
269 def getcwd(self):
272 return os.getcwd() # always outside the repo
270 return os.getcwd() # always outside the repo
273
271
274 def instance(ui, path, create):
272 def instance(ui, path, create):
275 if create:
273 if create:
276 raise util.Abort(_('cannot create new bundle repository'))
274 raise util.Abort(_('cannot create new bundle repository'))
277 parentpath = ui.config("bundle", "mainreporoot", "")
275 parentpath = ui.config("bundle", "mainreporoot", "")
278 if parentpath:
276 if parentpath:
279 # Try to make the full path relative so we get a nice, short URL.
277 # Try to make the full path relative so we get a nice, short URL.
280 # In particular, we don't want temp dir names in test outputs.
278 # In particular, we don't want temp dir names in test outputs.
281 cwd = os.getcwd()
279 cwd = os.getcwd()
282 if parentpath == cwd:
280 if parentpath == cwd:
283 parentpath = ''
281 parentpath = ''
284 else:
282 else:
285 cwd = os.path.join(cwd,'')
283 cwd = os.path.join(cwd,'')
286 if parentpath.startswith(cwd):
284 if parentpath.startswith(cwd):
287 parentpath = parentpath[len(cwd):]
285 parentpath = parentpath[len(cwd):]
288 path = util.drop_scheme('file', path)
286 path = util.drop_scheme('file', path)
289 if path.startswith('bundle:'):
287 if path.startswith('bundle:'):
290 path = util.drop_scheme('bundle', path)
288 path = util.drop_scheme('bundle', path)
291 s = path.split("+", 1)
289 s = path.split("+", 1)
292 if len(s) == 1:
290 if len(s) == 1:
293 repopath, bundlename = parentpath, s[0]
291 repopath, bundlename = parentpath, s[0]
294 else:
292 else:
295 repopath, bundlename = s
293 repopath, bundlename = s
296 else:
294 else:
297 repopath, bundlename = parentpath, path
295 repopath, bundlename = parentpath, path
298 return bundlerepository(ui, repopath, bundlename)
296 return bundlerepository(ui, repopath, bundlename)
@@ -1,141 +1,139 b''
1 """
1 # changegroup.py - Mercurial changegroup manipulation functions
2 changegroup.py - Mercurial changegroup manipulation functions
2 #
3
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
5
5 # This software may be used and distributed according to the terms of the
6 This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
7 GNU General Public License version 2, incorporated herein by reference.
8 """
9
7
10 from i18n import _
8 from i18n import _
11 import struct, os, bz2, zlib, util, tempfile
9 import struct, os, bz2, zlib, util, tempfile
12
10
13 def getchunk(source):
11 def getchunk(source):
14 """get a chunk from a changegroup"""
12 """get a chunk from a changegroup"""
15 d = source.read(4)
13 d = source.read(4)
16 if not d:
14 if not d:
17 return ""
15 return ""
18 l = struct.unpack(">l", d)[0]
16 l = struct.unpack(">l", d)[0]
19 if l <= 4:
17 if l <= 4:
20 return ""
18 return ""
21 d = source.read(l - 4)
19 d = source.read(l - 4)
22 if len(d) < l - 4:
20 if len(d) < l - 4:
23 raise util.Abort(_("premature EOF reading chunk"
21 raise util.Abort(_("premature EOF reading chunk"
24 " (got %d bytes, expected %d)")
22 " (got %d bytes, expected %d)")
25 % (len(d), l - 4))
23 % (len(d), l - 4))
26 return d
24 return d
27
25
28 def chunkiter(source):
26 def chunkiter(source):
29 """iterate through the chunks in source"""
27 """iterate through the chunks in source"""
30 while 1:
28 while 1:
31 c = getchunk(source)
29 c = getchunk(source)
32 if not c:
30 if not c:
33 break
31 break
34 yield c
32 yield c
35
33
36 def chunkheader(length):
34 def chunkheader(length):
37 """build a changegroup chunk header"""
35 """build a changegroup chunk header"""
38 return struct.pack(">l", length + 4)
36 return struct.pack(">l", length + 4)
39
37
40 def closechunk():
38 def closechunk():
41 return struct.pack(">l", 0)
39 return struct.pack(">l", 0)
42
40
43 class nocompress(object):
41 class nocompress(object):
44 def compress(self, x):
42 def compress(self, x):
45 return x
43 return x
46 def flush(self):
44 def flush(self):
47 return ""
45 return ""
48
46
49 bundletypes = {
47 bundletypes = {
50 "": ("", nocompress),
48 "": ("", nocompress),
51 "HG10UN": ("HG10UN", nocompress),
49 "HG10UN": ("HG10UN", nocompress),
52 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
50 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
53 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
51 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
54 }
52 }
55
53
56 # hgweb uses this list to communicate it's preferred type
54 # hgweb uses this list to communicate it's preferred type
57 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
55 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
58
56
59 def writebundle(cg, filename, bundletype):
57 def writebundle(cg, filename, bundletype):
60 """Write a bundle file and return its filename.
58 """Write a bundle file and return its filename.
61
59
62 Existing files will not be overwritten.
60 Existing files will not be overwritten.
63 If no filename is specified, a temporary file is created.
61 If no filename is specified, a temporary file is created.
64 bz2 compression can be turned off.
62 bz2 compression can be turned off.
65 The bundle file will be deleted in case of errors.
63 The bundle file will be deleted in case of errors.
66 """
64 """
67
65
68 fh = None
66 fh = None
69 cleanup = None
67 cleanup = None
70 try:
68 try:
71 if filename:
69 if filename:
72 fh = open(filename, "wb")
70 fh = open(filename, "wb")
73 else:
71 else:
74 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
72 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
75 fh = os.fdopen(fd, "wb")
73 fh = os.fdopen(fd, "wb")
76 cleanup = filename
74 cleanup = filename
77
75
78 header, compressor = bundletypes[bundletype]
76 header, compressor = bundletypes[bundletype]
79 fh.write(header)
77 fh.write(header)
80 z = compressor()
78 z = compressor()
81
79
82 # parse the changegroup data, otherwise we will block
80 # parse the changegroup data, otherwise we will block
83 # in case of sshrepo because we don't know the end of the stream
81 # in case of sshrepo because we don't know the end of the stream
84
82
85 # an empty chunkiter is the end of the changegroup
83 # an empty chunkiter is the end of the changegroup
86 # a changegroup has at least 2 chunkiters (changelog and manifest).
84 # a changegroup has at least 2 chunkiters (changelog and manifest).
87 # after that, an empty chunkiter is the end of the changegroup
85 # after that, an empty chunkiter is the end of the changegroup
88 empty = False
86 empty = False
89 count = 0
87 count = 0
90 while not empty or count <= 2:
88 while not empty or count <= 2:
91 empty = True
89 empty = True
92 count += 1
90 count += 1
93 for chunk in chunkiter(cg):
91 for chunk in chunkiter(cg):
94 empty = False
92 empty = False
95 fh.write(z.compress(chunkheader(len(chunk))))
93 fh.write(z.compress(chunkheader(len(chunk))))
96 pos = 0
94 pos = 0
97 while pos < len(chunk):
95 while pos < len(chunk):
98 next = pos + 2**20
96 next = pos + 2**20
99 fh.write(z.compress(chunk[pos:next]))
97 fh.write(z.compress(chunk[pos:next]))
100 pos = next
98 pos = next
101 fh.write(z.compress(closechunk()))
99 fh.write(z.compress(closechunk()))
102 fh.write(z.flush())
100 fh.write(z.flush())
103 cleanup = None
101 cleanup = None
104 return filename
102 return filename
105 finally:
103 finally:
106 if fh is not None:
104 if fh is not None:
107 fh.close()
105 fh.close()
108 if cleanup is not None:
106 if cleanup is not None:
109 os.unlink(cleanup)
107 os.unlink(cleanup)
110
108
111 def unbundle(header, fh):
109 def unbundle(header, fh):
112 if header == 'HG10UN':
110 if header == 'HG10UN':
113 return fh
111 return fh
114 elif not header.startswith('HG'):
112 elif not header.startswith('HG'):
115 # old client with uncompressed bundle
113 # old client with uncompressed bundle
116 def generator(f):
114 def generator(f):
117 yield header
115 yield header
118 for chunk in f:
116 for chunk in f:
119 yield chunk
117 yield chunk
120 elif header == 'HG10GZ':
118 elif header == 'HG10GZ':
121 def generator(f):
119 def generator(f):
122 zd = zlib.decompressobj()
120 zd = zlib.decompressobj()
123 for chunk in f:
121 for chunk in f:
124 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
125 elif header == 'HG10BZ':
123 elif header == 'HG10BZ':
126 def generator(f):
124 def generator(f):
127 zd = bz2.BZ2Decompressor()
125 zd = bz2.BZ2Decompressor()
128 zd.decompress("BZ")
126 zd.decompress("BZ")
129 for chunk in util.filechunkiter(f, 4096):
127 for chunk in util.filechunkiter(f, 4096):
130 yield zd.decompress(chunk)
128 yield zd.decompress(chunk)
131 return util.chunkbuffer(generator(fh))
129 return util.chunkbuffer(generator(fh))
132
130
133 def readbundle(fh, fname):
131 def readbundle(fh, fname):
134 header = fh.read(6)
132 header = fh.read(6)
135 if not header.startswith('HG'):
133 if not header.startswith('HG'):
136 raise util.Abort(_('%s: not a Mercurial bundle file') % fname)
134 raise util.Abort(_('%s: not a Mercurial bundle file') % fname)
137 if not header.startswith('HG10'):
135 if not header.startswith('HG10'):
138 raise util.Abort(_('%s: unknown bundle version') % fname)
136 raise util.Abort(_('%s: unknown bundle version') % fname)
139 elif header not in bundletypes:
137 elif header not in bundletypes:
140 raise util.Abort(_('%s: unknown bundle compression type') % fname)
138 raise util.Abort(_('%s: unknown bundle compression type') % fname)
141 return unbundle(header, fh)
139 return unbundle(header, fh)
@@ -1,585 +1,583 b''
1 """
1 # dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
2 #
3
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
5
5 # This software may be used and distributed according to the terms of the
6 This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
7 GNU General Public License version 2, incorporated herein by reference.
8 """
9
7
10 from node import nullid
8 from node import nullid
11 from i18n import _
9 from i18n import _
12 import struct, os, stat, util, errno, ignore
10 import struct, os, stat, util, errno, ignore
13 import cStringIO, osutil, sys, parsers
11 import cStringIO, osutil, sys, parsers
14
12
15 _unknown = ('?', 0, 0, 0)
13 _unknown = ('?', 0, 0, 0)
16 _format = ">cllll"
14 _format = ">cllll"
17
15
18 def _finddirs(path):
16 def _finddirs(path):
19 pos = path.rfind('/')
17 pos = path.rfind('/')
20 while pos != -1:
18 while pos != -1:
21 yield path[:pos]
19 yield path[:pos]
22 pos = path.rfind('/', 0, pos)
20 pos = path.rfind('/', 0, pos)
23
21
24 def _incdirs(dirs, path):
22 def _incdirs(dirs, path):
25 for base in _finddirs(path):
23 for base in _finddirs(path):
26 if base in dirs:
24 if base in dirs:
27 dirs[base] += 1
25 dirs[base] += 1
28 return
26 return
29 dirs[base] = 1
27 dirs[base] = 1
30
28
31 def _decdirs(dirs, path):
29 def _decdirs(dirs, path):
32 for base in _finddirs(path):
30 for base in _finddirs(path):
33 if dirs[base] > 1:
31 if dirs[base] > 1:
34 dirs[base] -= 1
32 dirs[base] -= 1
35 return
33 return
36 del dirs[base]
34 del dirs[base]
37
35
38 class dirstate(object):
36 class dirstate(object):
39
37
40 def __init__(self, opener, ui, root):
38 def __init__(self, opener, ui, root):
41 self._opener = opener
39 self._opener = opener
42 self._root = root
40 self._root = root
43 self._rootdir = os.path.join(root, '')
41 self._rootdir = os.path.join(root, '')
44 self._dirty = False
42 self._dirty = False
45 self._dirtypl = False
43 self._dirtypl = False
46 self._ui = ui
44 self._ui = ui
47
45
48 def __getattr__(self, name):
46 def __getattr__(self, name):
49 if name == '_map':
47 if name == '_map':
50 self._read()
48 self._read()
51 return self._map
49 return self._map
52 elif name == '_copymap':
50 elif name == '_copymap':
53 self._read()
51 self._read()
54 return self._copymap
52 return self._copymap
55 elif name == '_foldmap':
53 elif name == '_foldmap':
56 _foldmap = {}
54 _foldmap = {}
57 for name in self._map:
55 for name in self._map:
58 norm = os.path.normcase(name)
56 norm = os.path.normcase(name)
59 _foldmap[norm] = name
57 _foldmap[norm] = name
60 self._foldmap = _foldmap
58 self._foldmap = _foldmap
61 return self._foldmap
59 return self._foldmap
62 elif name == '_branch':
60 elif name == '_branch':
63 try:
61 try:
64 self._branch = (self._opener("branch").read().strip()
62 self._branch = (self._opener("branch").read().strip()
65 or "default")
63 or "default")
66 except IOError:
64 except IOError:
67 self._branch = "default"
65 self._branch = "default"
68 return self._branch
66 return self._branch
69 elif name == '_pl':
67 elif name == '_pl':
70 self._pl = [nullid, nullid]
68 self._pl = [nullid, nullid]
71 try:
69 try:
72 st = self._opener("dirstate").read(40)
70 st = self._opener("dirstate").read(40)
73 if len(st) == 40:
71 if len(st) == 40:
74 self._pl = st[:20], st[20:40]
72 self._pl = st[:20], st[20:40]
75 except IOError, err:
73 except IOError, err:
76 if err.errno != errno.ENOENT: raise
74 if err.errno != errno.ENOENT: raise
77 return self._pl
75 return self._pl
78 elif name == '_dirs':
76 elif name == '_dirs':
79 dirs = {}
77 dirs = {}
80 for f,s in self._map.iteritems():
78 for f,s in self._map.iteritems():
81 if s[0] != 'r':
79 if s[0] != 'r':
82 _incdirs(dirs, f)
80 _incdirs(dirs, f)
83 self._dirs = dirs
81 self._dirs = dirs
84 return self._dirs
82 return self._dirs
85 elif name == '_ignore':
83 elif name == '_ignore':
86 files = [self._join('.hgignore')]
84 files = [self._join('.hgignore')]
87 for name, path in self._ui.configitems("ui"):
85 for name, path in self._ui.configitems("ui"):
88 if name == 'ignore' or name.startswith('ignore.'):
86 if name == 'ignore' or name.startswith('ignore.'):
89 files.append(os.path.expanduser(path))
87 files.append(os.path.expanduser(path))
90 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
88 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
91 return self._ignore
89 return self._ignore
92 elif name == '_slash':
90 elif name == '_slash':
93 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
91 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
94 return self._slash
92 return self._slash
95 elif name == '_checklink':
93 elif name == '_checklink':
96 self._checklink = util.checklink(self._root)
94 self._checklink = util.checklink(self._root)
97 return self._checklink
95 return self._checklink
98 elif name == '_checkexec':
96 elif name == '_checkexec':
99 self._checkexec = util.checkexec(self._root)
97 self._checkexec = util.checkexec(self._root)
100 return self._checkexec
98 return self._checkexec
101 elif name == '_checkcase':
99 elif name == '_checkcase':
102 self._checkcase = not util.checkcase(self._join('.hg'))
100 self._checkcase = not util.checkcase(self._join('.hg'))
103 return self._checkcase
101 return self._checkcase
104 elif name == 'normalize':
102 elif name == 'normalize':
105 if self._checkcase:
103 if self._checkcase:
106 self.normalize = self._normalize
104 self.normalize = self._normalize
107 else:
105 else:
108 self.normalize = lambda x, y=False: x
106 self.normalize = lambda x, y=False: x
109 return self.normalize
107 return self.normalize
110 else:
108 else:
111 raise AttributeError(name)
109 raise AttributeError(name)
112
110
113 def _join(self, f):
111 def _join(self, f):
114 # much faster than os.path.join()
112 # much faster than os.path.join()
115 # it's safe because f is always a relative path
113 # it's safe because f is always a relative path
116 return self._rootdir + f
114 return self._rootdir + f
117
115
118 def flagfunc(self, fallback):
116 def flagfunc(self, fallback):
119 if self._checklink:
117 if self._checklink:
120 if self._checkexec:
118 if self._checkexec:
121 def f(x):
119 def f(x):
122 p = self._join(x)
120 p = self._join(x)
123 if os.path.islink(p):
121 if os.path.islink(p):
124 return 'l'
122 return 'l'
125 if util.is_exec(p):
123 if util.is_exec(p):
126 return 'x'
124 return 'x'
127 return ''
125 return ''
128 return f
126 return f
129 def f(x):
127 def f(x):
130 if os.path.islink(self._join(x)):
128 if os.path.islink(self._join(x)):
131 return 'l'
129 return 'l'
132 if 'x' in fallback(x):
130 if 'x' in fallback(x):
133 return 'x'
131 return 'x'
134 return ''
132 return ''
135 return f
133 return f
136 if self._checkexec:
134 if self._checkexec:
137 def f(x):
135 def f(x):
138 if 'l' in fallback(x):
136 if 'l' in fallback(x):
139 return 'l'
137 return 'l'
140 if util.is_exec(self._join(x)):
138 if util.is_exec(self._join(x)):
141 return 'x'
139 return 'x'
142 return ''
140 return ''
143 return f
141 return f
144 return fallback
142 return fallback
145
143
146 def getcwd(self):
144 def getcwd(self):
147 cwd = os.getcwd()
145 cwd = os.getcwd()
148 if cwd == self._root: return ''
146 if cwd == self._root: return ''
149 # self._root ends with a path separator if self._root is '/' or 'C:\'
147 # self._root ends with a path separator if self._root is '/' or 'C:\'
150 rootsep = self._root
148 rootsep = self._root
151 if not util.endswithsep(rootsep):
149 if not util.endswithsep(rootsep):
152 rootsep += os.sep
150 rootsep += os.sep
153 if cwd.startswith(rootsep):
151 if cwd.startswith(rootsep):
154 return cwd[len(rootsep):]
152 return cwd[len(rootsep):]
155 else:
153 else:
156 # we're outside the repo. return an absolute path.
154 # we're outside the repo. return an absolute path.
157 return cwd
155 return cwd
158
156
159 def pathto(self, f, cwd=None):
157 def pathto(self, f, cwd=None):
160 if cwd is None:
158 if cwd is None:
161 cwd = self.getcwd()
159 cwd = self.getcwd()
162 path = util.pathto(self._root, cwd, f)
160 path = util.pathto(self._root, cwd, f)
163 if self._slash:
161 if self._slash:
164 return util.normpath(path)
162 return util.normpath(path)
165 return path
163 return path
166
164
167 def __getitem__(self, key):
165 def __getitem__(self, key):
168 ''' current states:
166 ''' current states:
169 n normal
167 n normal
170 m needs merging
168 m needs merging
171 r marked for removal
169 r marked for removal
172 a marked for addition
170 a marked for addition
173 ? not tracked'''
171 ? not tracked'''
174 return self._map.get(key, ("?",))[0]
172 return self._map.get(key, ("?",))[0]
175
173
176 def __contains__(self, key):
174 def __contains__(self, key):
177 return key in self._map
175 return key in self._map
178
176
179 def __iter__(self):
177 def __iter__(self):
180 for x in sorted(self._map):
178 for x in sorted(self._map):
181 yield x
179 yield x
182
180
183 def parents(self):
181 def parents(self):
184 return self._pl
182 return self._pl
185
183
186 def branch(self):
184 def branch(self):
187 return self._branch
185 return self._branch
188
186
189 def setparents(self, p1, p2=nullid):
187 def setparents(self, p1, p2=nullid):
190 self._dirty = self._dirtypl = True
188 self._dirty = self._dirtypl = True
191 self._pl = p1, p2
189 self._pl = p1, p2
192
190
193 def setbranch(self, branch):
191 def setbranch(self, branch):
194 self._branch = branch
192 self._branch = branch
195 self._opener("branch", "w").write(branch + '\n')
193 self._opener("branch", "w").write(branch + '\n')
196
194
197 def _read(self):
195 def _read(self):
198 self._map = {}
196 self._map = {}
199 self._copymap = {}
197 self._copymap = {}
200 try:
198 try:
201 st = self._opener("dirstate").read()
199 st = self._opener("dirstate").read()
202 except IOError, err:
200 except IOError, err:
203 if err.errno != errno.ENOENT: raise
201 if err.errno != errno.ENOENT: raise
204 return
202 return
205 if not st:
203 if not st:
206 return
204 return
207
205
208 p = parsers.parse_dirstate(self._map, self._copymap, st)
206 p = parsers.parse_dirstate(self._map, self._copymap, st)
209 if not self._dirtypl:
207 if not self._dirtypl:
210 self._pl = p
208 self._pl = p
211
209
212 def invalidate(self):
210 def invalidate(self):
213 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
211 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
214 if a in self.__dict__:
212 if a in self.__dict__:
215 delattr(self, a)
213 delattr(self, a)
216 self._dirty = False
214 self._dirty = False
217
215
218 def copy(self, source, dest):
216 def copy(self, source, dest):
219 """Mark dest as a copy of source. Unmark dest if source is None.
217 """Mark dest as a copy of source. Unmark dest if source is None.
220 """
218 """
221 if source == dest:
219 if source == dest:
222 return
220 return
223 self._dirty = True
221 self._dirty = True
224 if source is not None:
222 if source is not None:
225 self._copymap[dest] = source
223 self._copymap[dest] = source
226 elif dest in self._copymap:
224 elif dest in self._copymap:
227 del self._copymap[dest]
225 del self._copymap[dest]
228
226
229 def copied(self, file):
227 def copied(self, file):
230 return self._copymap.get(file, None)
228 return self._copymap.get(file, None)
231
229
232 def copies(self):
230 def copies(self):
233 return self._copymap
231 return self._copymap
234
232
235 def _droppath(self, f):
233 def _droppath(self, f):
236 if self[f] not in "?r" and "_dirs" in self.__dict__:
234 if self[f] not in "?r" and "_dirs" in self.__dict__:
237 _decdirs(self._dirs, f)
235 _decdirs(self._dirs, f)
238
236
239 def _addpath(self, f, check=False):
237 def _addpath(self, f, check=False):
240 oldstate = self[f]
238 oldstate = self[f]
241 if check or oldstate == "r":
239 if check or oldstate == "r":
242 if '\r' in f or '\n' in f:
240 if '\r' in f or '\n' in f:
243 raise util.Abort(
241 raise util.Abort(
244 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
242 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
245 if f in self._dirs:
243 if f in self._dirs:
246 raise util.Abort(_('directory %r already in dirstate') % f)
244 raise util.Abort(_('directory %r already in dirstate') % f)
247 # shadows
245 # shadows
248 for d in _finddirs(f):
246 for d in _finddirs(f):
249 if d in self._dirs:
247 if d in self._dirs:
250 break
248 break
251 if d in self._map and self[d] != 'r':
249 if d in self._map and self[d] != 'r':
252 raise util.Abort(
250 raise util.Abort(
253 _('file %r in dirstate clashes with %r') % (d, f))
251 _('file %r in dirstate clashes with %r') % (d, f))
254 if oldstate in "?r" and "_dirs" in self.__dict__:
252 if oldstate in "?r" and "_dirs" in self.__dict__:
255 _incdirs(self._dirs, f)
253 _incdirs(self._dirs, f)
256
254
257 def normal(self, f):
255 def normal(self, f):
258 'mark a file normal and clean'
256 'mark a file normal and clean'
259 self._dirty = True
257 self._dirty = True
260 self._addpath(f)
258 self._addpath(f)
261 s = os.lstat(self._join(f))
259 s = os.lstat(self._join(f))
262 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
260 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
263 if f in self._copymap:
261 if f in self._copymap:
264 del self._copymap[f]
262 del self._copymap[f]
265
263
266 def normallookup(self, f):
264 def normallookup(self, f):
267 'mark a file normal, but possibly dirty'
265 'mark a file normal, but possibly dirty'
268 if self._pl[1] != nullid and f in self._map:
266 if self._pl[1] != nullid and f in self._map:
269 # if there is a merge going on and the file was either
267 # if there is a merge going on and the file was either
270 # in state 'm' or dirty before being removed, restore that state.
268 # in state 'm' or dirty before being removed, restore that state.
271 entry = self._map[f]
269 entry = self._map[f]
272 if entry[0] == 'r' and entry[2] in (-1, -2):
270 if entry[0] == 'r' and entry[2] in (-1, -2):
273 source = self._copymap.get(f)
271 source = self._copymap.get(f)
274 if entry[2] == -1:
272 if entry[2] == -1:
275 self.merge(f)
273 self.merge(f)
276 elif entry[2] == -2:
274 elif entry[2] == -2:
277 self.normaldirty(f)
275 self.normaldirty(f)
278 if source:
276 if source:
279 self.copy(source, f)
277 self.copy(source, f)
280 return
278 return
281 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
279 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
282 return
280 return
283 self._dirty = True
281 self._dirty = True
284 self._addpath(f)
282 self._addpath(f)
285 self._map[f] = ('n', 0, -1, -1)
283 self._map[f] = ('n', 0, -1, -1)
286 if f in self._copymap:
284 if f in self._copymap:
287 del self._copymap[f]
285 del self._copymap[f]
288
286
289 def normaldirty(self, f):
287 def normaldirty(self, f):
290 'mark a file normal, but dirty'
288 'mark a file normal, but dirty'
291 self._dirty = True
289 self._dirty = True
292 self._addpath(f)
290 self._addpath(f)
293 self._map[f] = ('n', 0, -2, -1)
291 self._map[f] = ('n', 0, -2, -1)
294 if f in self._copymap:
292 if f in self._copymap:
295 del self._copymap[f]
293 del self._copymap[f]
296
294
297 def add(self, f):
295 def add(self, f):
298 'mark a file added'
296 'mark a file added'
299 self._dirty = True
297 self._dirty = True
300 self._addpath(f, True)
298 self._addpath(f, True)
301 self._map[f] = ('a', 0, -1, -1)
299 self._map[f] = ('a', 0, -1, -1)
302 if f in self._copymap:
300 if f in self._copymap:
303 del self._copymap[f]
301 del self._copymap[f]
304
302
305 def remove(self, f):
303 def remove(self, f):
306 'mark a file removed'
304 'mark a file removed'
307 self._dirty = True
305 self._dirty = True
308 self._droppath(f)
306 self._droppath(f)
309 size = 0
307 size = 0
310 if self._pl[1] != nullid and f in self._map:
308 if self._pl[1] != nullid and f in self._map:
311 entry = self._map[f]
309 entry = self._map[f]
312 if entry[0] == 'm':
310 if entry[0] == 'm':
313 size = -1
311 size = -1
314 elif entry[0] == 'n' and entry[2] == -2:
312 elif entry[0] == 'n' and entry[2] == -2:
315 size = -2
313 size = -2
316 self._map[f] = ('r', 0, size, 0)
314 self._map[f] = ('r', 0, size, 0)
317 if size == 0 and f in self._copymap:
315 if size == 0 and f in self._copymap:
318 del self._copymap[f]
316 del self._copymap[f]
319
317
320 def merge(self, f):
318 def merge(self, f):
321 'mark a file merged'
319 'mark a file merged'
322 self._dirty = True
320 self._dirty = True
323 s = os.lstat(self._join(f))
321 s = os.lstat(self._join(f))
324 self._addpath(f)
322 self._addpath(f)
325 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
323 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
326 if f in self._copymap:
324 if f in self._copymap:
327 del self._copymap[f]
325 del self._copymap[f]
328
326
329 def forget(self, f):
327 def forget(self, f):
330 'forget a file'
328 'forget a file'
331 self._dirty = True
329 self._dirty = True
332 try:
330 try:
333 self._droppath(f)
331 self._droppath(f)
334 del self._map[f]
332 del self._map[f]
335 except KeyError:
333 except KeyError:
336 self._ui.warn(_("not in dirstate: %s\n") % f)
334 self._ui.warn(_("not in dirstate: %s\n") % f)
337
335
338 def _normalize(self, path, knownpath=False):
336 def _normalize(self, path, knownpath=False):
339 norm_path = os.path.normcase(path)
337 norm_path = os.path.normcase(path)
340 fold_path = self._foldmap.get(norm_path, None)
338 fold_path = self._foldmap.get(norm_path, None)
341 if fold_path is None:
339 if fold_path is None:
342 if knownpath or not os.path.exists(os.path.join(self._root, path)):
340 if knownpath or not os.path.exists(os.path.join(self._root, path)):
343 fold_path = path
341 fold_path = path
344 else:
342 else:
345 fold_path = self._foldmap.setdefault(norm_path,
343 fold_path = self._foldmap.setdefault(norm_path,
346 util.fspath(path, self._root))
344 util.fspath(path, self._root))
347 return fold_path
345 return fold_path
348
346
349 def clear(self):
347 def clear(self):
350 self._map = {}
348 self._map = {}
351 if "_dirs" in self.__dict__:
349 if "_dirs" in self.__dict__:
352 delattr(self, "_dirs");
350 delattr(self, "_dirs");
353 self._copymap = {}
351 self._copymap = {}
354 self._pl = [nullid, nullid]
352 self._pl = [nullid, nullid]
355 self._dirty = True
353 self._dirty = True
356
354
357 def rebuild(self, parent, files):
355 def rebuild(self, parent, files):
358 self.clear()
356 self.clear()
359 for f in files:
357 for f in files:
360 if 'x' in files.flags(f):
358 if 'x' in files.flags(f):
361 self._map[f] = ('n', 0777, -1, 0)
359 self._map[f] = ('n', 0777, -1, 0)
362 else:
360 else:
363 self._map[f] = ('n', 0666, -1, 0)
361 self._map[f] = ('n', 0666, -1, 0)
364 self._pl = (parent, nullid)
362 self._pl = (parent, nullid)
365 self._dirty = True
363 self._dirty = True
366
364
367 def write(self):
365 def write(self):
368 if not self._dirty:
366 if not self._dirty:
369 return
367 return
370 st = self._opener("dirstate", "w", atomictemp=True)
368 st = self._opener("dirstate", "w", atomictemp=True)
371
369
372 try:
370 try:
373 gran = int(self._ui.config('dirstate', 'granularity', 1))
371 gran = int(self._ui.config('dirstate', 'granularity', 1))
374 except ValueError:
372 except ValueError:
375 gran = 1
373 gran = 1
376 limit = sys.maxint
374 limit = sys.maxint
377 if gran > 0:
375 if gran > 0:
378 limit = util.fstat(st).st_mtime - gran
376 limit = util.fstat(st).st_mtime - gran
379
377
380 cs = cStringIO.StringIO()
378 cs = cStringIO.StringIO()
381 copymap = self._copymap
379 copymap = self._copymap
382 pack = struct.pack
380 pack = struct.pack
383 write = cs.write
381 write = cs.write
384 write("".join(self._pl))
382 write("".join(self._pl))
385 for f, e in self._map.iteritems():
383 for f, e in self._map.iteritems():
386 if f in copymap:
384 if f in copymap:
387 f = "%s\0%s" % (f, copymap[f])
385 f = "%s\0%s" % (f, copymap[f])
388 if e[3] > limit and e[0] == 'n':
386 if e[3] > limit and e[0] == 'n':
389 e = (e[0], 0, -1, -1)
387 e = (e[0], 0, -1, -1)
390 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
388 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
391 write(e)
389 write(e)
392 write(f)
390 write(f)
393 st.write(cs.getvalue())
391 st.write(cs.getvalue())
394 st.rename()
392 st.rename()
395 self._dirty = self._dirtypl = False
393 self._dirty = self._dirtypl = False
396
394
397 def _dirignore(self, f):
395 def _dirignore(self, f):
398 if f == '.':
396 if f == '.':
399 return False
397 return False
400 if self._ignore(f):
398 if self._ignore(f):
401 return True
399 return True
402 for p in _finddirs(f):
400 for p in _finddirs(f):
403 if self._ignore(p):
401 if self._ignore(p):
404 return True
402 return True
405 return False
403 return False
406
404
407 def walk(self, match, unknown, ignored):
405 def walk(self, match, unknown, ignored):
408 '''
406 '''
409 walk recursively through the directory tree, finding all files
407 walk recursively through the directory tree, finding all files
410 matched by the match function
408 matched by the match function
411
409
412 results are yielded in a tuple (filename, stat), where stat
410 results are yielded in a tuple (filename, stat), where stat
413 and st is the stat result if the file was found in the directory.
411 and st is the stat result if the file was found in the directory.
414 '''
412 '''
415
413
416 def fwarn(f, msg):
414 def fwarn(f, msg):
417 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
415 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
418 return False
416 return False
419 badfn = fwarn
417 badfn = fwarn
420 if hasattr(match, 'bad'):
418 if hasattr(match, 'bad'):
421 badfn = match.bad
419 badfn = match.bad
422
420
423 def badtype(f, mode):
421 def badtype(f, mode):
424 kind = 'unknown'
422 kind = 'unknown'
425 if stat.S_ISCHR(mode): kind = _('character device')
423 if stat.S_ISCHR(mode): kind = _('character device')
426 elif stat.S_ISBLK(mode): kind = _('block device')
424 elif stat.S_ISBLK(mode): kind = _('block device')
427 elif stat.S_ISFIFO(mode): kind = _('fifo')
425 elif stat.S_ISFIFO(mode): kind = _('fifo')
428 elif stat.S_ISSOCK(mode): kind = _('socket')
426 elif stat.S_ISSOCK(mode): kind = _('socket')
429 elif stat.S_ISDIR(mode): kind = _('directory')
427 elif stat.S_ISDIR(mode): kind = _('directory')
430 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
428 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
431 % (self.pathto(f), kind))
429 % (self.pathto(f), kind))
432
430
433 ignore = self._ignore
431 ignore = self._ignore
434 dirignore = self._dirignore
432 dirignore = self._dirignore
435 if ignored:
433 if ignored:
436 ignore = util.never
434 ignore = util.never
437 dirignore = util.never
435 dirignore = util.never
438 elif not unknown:
436 elif not unknown:
439 # if unknown and ignored are False, skip step 2
437 # if unknown and ignored are False, skip step 2
440 ignore = util.always
438 ignore = util.always
441 dirignore = util.always
439 dirignore = util.always
442
440
443 matchfn = match.matchfn
441 matchfn = match.matchfn
444 dmap = self._map
442 dmap = self._map
445 normpath = util.normpath
443 normpath = util.normpath
446 normalize = self.normalize
444 normalize = self.normalize
447 listdir = osutil.listdir
445 listdir = osutil.listdir
448 lstat = os.lstat
446 lstat = os.lstat
449 getkind = stat.S_IFMT
447 getkind = stat.S_IFMT
450 dirkind = stat.S_IFDIR
448 dirkind = stat.S_IFDIR
451 regkind = stat.S_IFREG
449 regkind = stat.S_IFREG
452 lnkkind = stat.S_IFLNK
450 lnkkind = stat.S_IFLNK
453 join = self._join
451 join = self._join
454 work = []
452 work = []
455 wadd = work.append
453 wadd = work.append
456
454
457 files = set(match.files())
455 files = set(match.files())
458 if not files or '.' in files:
456 if not files or '.' in files:
459 files = ['']
457 files = ['']
460 results = {'.hg': None}
458 results = {'.hg': None}
461
459
462 # step 1: find all explicit files
460 # step 1: find all explicit files
463 for ff in sorted(files):
461 for ff in sorted(files):
464 nf = normalize(normpath(ff))
462 nf = normalize(normpath(ff))
465 if nf in results:
463 if nf in results:
466 continue
464 continue
467
465
468 try:
466 try:
469 st = lstat(join(nf))
467 st = lstat(join(nf))
470 kind = getkind(st.st_mode)
468 kind = getkind(st.st_mode)
471 if kind == dirkind:
469 if kind == dirkind:
472 if not dirignore(nf):
470 if not dirignore(nf):
473 wadd(nf)
471 wadd(nf)
474 elif kind == regkind or kind == lnkkind:
472 elif kind == regkind or kind == lnkkind:
475 results[nf] = st
473 results[nf] = st
476 else:
474 else:
477 badtype(ff, kind)
475 badtype(ff, kind)
478 if nf in dmap:
476 if nf in dmap:
479 results[nf] = None
477 results[nf] = None
480 except OSError, inst:
478 except OSError, inst:
481 keep = False
479 keep = False
482 prefix = nf + "/"
480 prefix = nf + "/"
483 for fn in dmap:
481 for fn in dmap:
484 if nf == fn or fn.startswith(prefix):
482 if nf == fn or fn.startswith(prefix):
485 keep = True
483 keep = True
486 break
484 break
487 if not keep:
485 if not keep:
488 if inst.errno != errno.ENOENT:
486 if inst.errno != errno.ENOENT:
489 fwarn(ff, inst.strerror)
487 fwarn(ff, inst.strerror)
490 elif badfn(ff, inst.strerror):
488 elif badfn(ff, inst.strerror):
491 if (nf in dmap or not ignore(nf)) and matchfn(nf):
489 if (nf in dmap or not ignore(nf)) and matchfn(nf):
492 results[nf] = None
490 results[nf] = None
493
491
494 # step 2: visit subdirectories
492 # step 2: visit subdirectories
495 while work:
493 while work:
496 nd = work.pop()
494 nd = work.pop()
497 if hasattr(match, 'dir'):
495 if hasattr(match, 'dir'):
498 match.dir(nd)
496 match.dir(nd)
499 skip = None
497 skip = None
500 if nd == '.':
498 if nd == '.':
501 nd = ''
499 nd = ''
502 else:
500 else:
503 skip = '.hg'
501 skip = '.hg'
504 try:
502 try:
505 entries = listdir(join(nd), stat=True, skip=skip)
503 entries = listdir(join(nd), stat=True, skip=skip)
506 except OSError, inst:
504 except OSError, inst:
507 if inst.errno == errno.EACCES:
505 if inst.errno == errno.EACCES:
508 fwarn(nd, inst.strerror)
506 fwarn(nd, inst.strerror)
509 continue
507 continue
510 raise
508 raise
511 for f, kind, st in entries:
509 for f, kind, st in entries:
512 nf = normalize(nd and (nd + "/" + f) or f, True)
510 nf = normalize(nd and (nd + "/" + f) or f, True)
513 if nf not in results:
511 if nf not in results:
514 if kind == dirkind:
512 if kind == dirkind:
515 if not ignore(nf):
513 if not ignore(nf):
516 wadd(nf)
514 wadd(nf)
517 if nf in dmap and matchfn(nf):
515 if nf in dmap and matchfn(nf):
518 results[nf] = None
516 results[nf] = None
519 elif kind == regkind or kind == lnkkind:
517 elif kind == regkind or kind == lnkkind:
520 if nf in dmap:
518 if nf in dmap:
521 if matchfn(nf):
519 if matchfn(nf):
522 results[nf] = st
520 results[nf] = st
523 elif matchfn(nf) and not ignore(nf):
521 elif matchfn(nf) and not ignore(nf):
524 results[nf] = st
522 results[nf] = st
525 elif nf in dmap and matchfn(nf):
523 elif nf in dmap and matchfn(nf):
526 results[nf] = None
524 results[nf] = None
527
525
528 # step 3: report unseen items in the dmap hash
526 # step 3: report unseen items in the dmap hash
529 visit = sorted([f for f in dmap if f not in results and match(f)])
527 visit = sorted([f for f in dmap if f not in results and match(f)])
530 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
528 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
531 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
529 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
532 st = None
530 st = None
533 results[nf] = st
531 results[nf] = st
534
532
535 del results['.hg']
533 del results['.hg']
536 return results
534 return results
537
535
538 def status(self, match, ignored, clean, unknown):
536 def status(self, match, ignored, clean, unknown):
539 listignored, listclean, listunknown = ignored, clean, unknown
537 listignored, listclean, listunknown = ignored, clean, unknown
540 lookup, modified, added, unknown, ignored = [], [], [], [], []
538 lookup, modified, added, unknown, ignored = [], [], [], [], []
541 removed, deleted, clean = [], [], []
539 removed, deleted, clean = [], [], []
542
540
543 dmap = self._map
541 dmap = self._map
544 ladd = lookup.append
542 ladd = lookup.append
545 madd = modified.append
543 madd = modified.append
546 aadd = added.append
544 aadd = added.append
547 uadd = unknown.append
545 uadd = unknown.append
548 iadd = ignored.append
546 iadd = ignored.append
549 radd = removed.append
547 radd = removed.append
550 dadd = deleted.append
548 dadd = deleted.append
551 cadd = clean.append
549 cadd = clean.append
552
550
553 for fn, st in self.walk(match, listunknown, listignored).iteritems():
551 for fn, st in self.walk(match, listunknown, listignored).iteritems():
554 if fn not in dmap:
552 if fn not in dmap:
555 if (listignored or match.exact(fn)) and self._dirignore(fn):
553 if (listignored or match.exact(fn)) and self._dirignore(fn):
556 if listignored:
554 if listignored:
557 iadd(fn)
555 iadd(fn)
558 elif listunknown:
556 elif listunknown:
559 uadd(fn)
557 uadd(fn)
560 continue
558 continue
561
559
562 state, mode, size, time = dmap[fn]
560 state, mode, size, time = dmap[fn]
563
561
564 if not st and state in "nma":
562 if not st and state in "nma":
565 dadd(fn)
563 dadd(fn)
566 elif state == 'n':
564 elif state == 'n':
567 if (size >= 0 and
565 if (size >= 0 and
568 (size != st.st_size
566 (size != st.st_size
569 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
567 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
570 or size == -2
568 or size == -2
571 or fn in self._copymap):
569 or fn in self._copymap):
572 madd(fn)
570 madd(fn)
573 elif time != int(st.st_mtime):
571 elif time != int(st.st_mtime):
574 ladd(fn)
572 ladd(fn)
575 elif listclean:
573 elif listclean:
576 cadd(fn)
574 cadd(fn)
577 elif state == 'm':
575 elif state == 'm':
578 madd(fn)
576 madd(fn)
579 elif state == 'a':
577 elif state == 'a':
580 aadd(fn)
578 aadd(fn)
581 elif state == 'r':
579 elif state == 'r':
582 radd(fn)
580 radd(fn)
583
581
584 return (lookup, modified, added, removed, deleted, unknown, ignored,
582 return (lookup, modified, added, removed, deleted, unknown, ignored,
585 clean)
583 clean)
@@ -1,76 +1,74 b''
1 """
1 # encoding.py - character transcoding support for Mercurial
2 encoding.py - character transcoding support for Mercurial
2 #
3
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
5
5 # This software may be used and distributed according to the terms of the
6 This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
7 GNU General Public License version 2, incorporated herein by reference.
8 """
9
7
10 import sys, unicodedata, locale, os, error
8 import sys, unicodedata, locale, os, error
11
9
12 _encodingfixup = {'646': 'ascii', 'ANSI_X3.4-1968': 'ascii'}
10 _encodingfixup = {'646': 'ascii', 'ANSI_X3.4-1968': 'ascii'}
13
11
14 try:
12 try:
15 encoding = os.environ.get("HGENCODING")
13 encoding = os.environ.get("HGENCODING")
16 if sys.platform == 'darwin' and not encoding:
14 if sys.platform == 'darwin' and not encoding:
17 # On darwin, getpreferredencoding ignores the locale environment and
15 # On darwin, getpreferredencoding ignores the locale environment and
18 # always returns mac-roman. We override this if the environment is
16 # always returns mac-roman. We override this if the environment is
19 # not C (has been customized by the user).
17 # not C (has been customized by the user).
20 locale.setlocale(locale.LC_CTYPE, '')
18 locale.setlocale(locale.LC_CTYPE, '')
21 encoding = locale.getlocale()[1]
19 encoding = locale.getlocale()[1]
22 if not encoding:
20 if not encoding:
23 encoding = locale.getpreferredencoding() or 'ascii'
21 encoding = locale.getpreferredencoding() or 'ascii'
24 encoding = _encodingfixup.get(encoding, encoding)
22 encoding = _encodingfixup.get(encoding, encoding)
25 except locale.Error:
23 except locale.Error:
26 encoding = 'ascii'
24 encoding = 'ascii'
27 encodingmode = os.environ.get("HGENCODINGMODE", "strict")
25 encodingmode = os.environ.get("HGENCODINGMODE", "strict")
28 fallbackencoding = 'ISO-8859-1'
26 fallbackencoding = 'ISO-8859-1'
29
27
30 def tolocal(s):
28 def tolocal(s):
31 """
29 """
32 Convert a string from internal UTF-8 to local encoding
30 Convert a string from internal UTF-8 to local encoding
33
31
34 All internal strings should be UTF-8 but some repos before the
32 All internal strings should be UTF-8 but some repos before the
35 implementation of locale support may contain latin1 or possibly
33 implementation of locale support may contain latin1 or possibly
36 other character sets. We attempt to decode everything strictly
34 other character sets. We attempt to decode everything strictly
37 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
35 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
38 replace unknown characters.
36 replace unknown characters.
39 """
37 """
40 for e in ('UTF-8', fallbackencoding):
38 for e in ('UTF-8', fallbackencoding):
41 try:
39 try:
42 u = s.decode(e) # attempt strict decoding
40 u = s.decode(e) # attempt strict decoding
43 return u.encode(encoding, "replace")
41 return u.encode(encoding, "replace")
44 except LookupError, k:
42 except LookupError, k:
45 raise error.Abort("%s, please check your locale settings" % k)
43 raise error.Abort("%s, please check your locale settings" % k)
46 except UnicodeDecodeError:
44 except UnicodeDecodeError:
47 pass
45 pass
48 u = s.decode("utf-8", "replace") # last ditch
46 u = s.decode("utf-8", "replace") # last ditch
49 return u.encode(encoding, "replace")
47 return u.encode(encoding, "replace")
50
48
51 def fromlocal(s):
49 def fromlocal(s):
52 """
50 """
53 Convert a string from the local character encoding to UTF-8
51 Convert a string from the local character encoding to UTF-8
54
52
55 We attempt to decode strings using the encoding mode set by
53 We attempt to decode strings using the encoding mode set by
56 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
54 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
57 characters will cause an error message. Other modes include
55 characters will cause an error message. Other modes include
58 'replace', which replaces unknown characters with a special
56 'replace', which replaces unknown characters with a special
59 Unicode character, and 'ignore', which drops the character.
57 Unicode character, and 'ignore', which drops the character.
60 """
58 """
61 try:
59 try:
62 return s.decode(encoding, encodingmode).encode("utf-8")
60 return s.decode(encoding, encodingmode).encode("utf-8")
63 except UnicodeDecodeError, inst:
61 except UnicodeDecodeError, inst:
64 sub = s[max(0, inst.start-10):inst.start+10]
62 sub = s[max(0, inst.start-10):inst.start+10]
65 raise error.Abort("decoding near '%s': %s!" % (sub, inst))
63 raise error.Abort("decoding near '%s': %s!" % (sub, inst))
66 except LookupError, k:
64 except LookupError, k:
67 raise error.Abort("%s, please check your locale settings" % k)
65 raise error.Abort("%s, please check your locale settings" % k)
68
66
69 def colwidth(s):
67 def colwidth(s):
70 "Find the column width of a UTF-8 string for display"
68 "Find the column width of a UTF-8 string for display"
71 d = s.decode(encoding, 'replace')
69 d = s.decode(encoding, 'replace')
72 if hasattr(unicodedata, 'east_asian_width'):
70 if hasattr(unicodedata, 'east_asian_width'):
73 w = unicodedata.east_asian_width
71 w = unicodedata.east_asian_width
74 return sum([w(c) in 'WF' and 2 or 1 for c in d])
72 return sum([w(c) in 'WF' and 2 or 1 for c in d])
75 return len(d)
73 return len(d)
76
74
@@ -1,70 +1,68 b''
1 """
1 # error.py - Mercurial exceptions
2 error.py - Mercurial exceptions
2 #
3
3 # This allows us to catch exceptions at higher levels without forcing imports
4 This allows us to catch exceptions at higher levels without forcing imports
4 #
5
5 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
6 Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
6 #
7
7 # This software may be used and distributed according to the terms of the
8 This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
9 GNU General Public License version 2, incorporated herein by reference.
10 """
11
9
12 # Do not import anything here, please
10 # Do not import anything here, please
13
11
14 class RevlogError(Exception):
12 class RevlogError(Exception):
15 pass
13 pass
16
14
17 class LookupError(RevlogError, KeyError):
15 class LookupError(RevlogError, KeyError):
18 def __init__(self, name, index, message):
16 def __init__(self, name, index, message):
19 self.name = name
17 self.name = name
20 if isinstance(name, str) and len(name) == 20:
18 if isinstance(name, str) and len(name) == 20:
21 from node import short
19 from node import short
22 name = short(name)
20 name = short(name)
23 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
21 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
24
22
25 def __str__(self):
23 def __str__(self):
26 return RevlogError.__str__(self)
24 return RevlogError.__str__(self)
27
25
28 class ParseError(Exception):
26 class ParseError(Exception):
29 """Exception raised on errors in parsing the command line."""
27 """Exception raised on errors in parsing the command line."""
30
28
31 class ConfigError(Exception):
29 class ConfigError(Exception):
32 'Exception raised when parsing config files'
30 'Exception raised when parsing config files'
33
31
34 class RepoError(Exception):
32 class RepoError(Exception):
35 pass
33 pass
36
34
37 class CapabilityError(RepoError):
35 class CapabilityError(RepoError):
38 pass
36 pass
39
37
40 class LockError(IOError):
38 class LockError(IOError):
41 def __init__(self, errno, strerror, filename, desc):
39 def __init__(self, errno, strerror, filename, desc):
42 IOError.__init__(self, errno, strerror, filename)
40 IOError.__init__(self, errno, strerror, filename)
43 self.desc = desc
41 self.desc = desc
44
42
45 class LockHeld(LockError):
43 class LockHeld(LockError):
46 def __init__(self, errno, filename, desc, locker):
44 def __init__(self, errno, filename, desc, locker):
47 LockError.__init__(self, errno, 'Lock held', filename, desc)
45 LockError.__init__(self, errno, 'Lock held', filename, desc)
48 self.locker = locker
46 self.locker = locker
49
47
50 class LockUnavailable(LockError):
48 class LockUnavailable(LockError):
51 pass
49 pass
52
50
53 class ResponseError(Exception):
51 class ResponseError(Exception):
54 """Raised to print an error with part of output and exit."""
52 """Raised to print an error with part of output and exit."""
55
53
56 class UnknownCommand(Exception):
54 class UnknownCommand(Exception):
57 """Exception raised if command is not in the command table."""
55 """Exception raised if command is not in the command table."""
58
56
59 class AmbiguousCommand(Exception):
57 class AmbiguousCommand(Exception):
60 """Exception raised if command shortcut matches more than one command."""
58 """Exception raised if command shortcut matches more than one command."""
61
59
62 # derived from KeyboardInterrupt to simplify some breakout code
60 # derived from KeyboardInterrupt to simplify some breakout code
63 class SignalInterrupt(KeyboardInterrupt):
61 class SignalInterrupt(KeyboardInterrupt):
64 """Exception raised on SIGTERM and SIGHUP."""
62 """Exception raised on SIGTERM and SIGHUP."""
65
63
66 class SignatureError(Exception):
64 class SignatureError(Exception):
67 pass
65 pass
68
66
69 class Abort(Exception):
67 class Abort(Exception):
70 """Raised if a command needs to print an error and exit."""
68 """Raised if a command needs to print an error and exit."""
@@ -1,49 +1,47 b''
1 """
1 # i18n.py - internationalization support for mercurial
2 i18n.py - internationalization support for mercurial
2 #
3
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
5
5 # This software may be used and distributed according to the terms of the
6 This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
7 GNU General Public License version 2, incorporated herein by reference.
8 """
9
7
10 import gettext, sys, os, encoding
8 import gettext, sys, os, encoding
11
9
12 # modelled after templater.templatepath:
10 # modelled after templater.templatepath:
13 if hasattr(sys, 'frozen'):
11 if hasattr(sys, 'frozen'):
14 module = sys.executable
12 module = sys.executable
15 else:
13 else:
16 module = __file__
14 module = __file__
17
15
18 base = os.path.dirname(module)
16 base = os.path.dirname(module)
19 for dir in ('.', '..'):
17 for dir in ('.', '..'):
20 localedir = os.path.normpath(os.path.join(base, dir, 'locale'))
18 localedir = os.path.normpath(os.path.join(base, dir, 'locale'))
21 if os.path.isdir(localedir):
19 if os.path.isdir(localedir):
22 break
20 break
23
21
24 t = gettext.translation('hg', localedir, fallback=True)
22 t = gettext.translation('hg', localedir, fallback=True)
25
23
26 def gettext(message):
24 def gettext(message):
27 """Translate message.
25 """Translate message.
28
26
29 The message is looked up in the catalog to get a Unicode string,
27 The message is looked up in the catalog to get a Unicode string,
30 which is encoded in the local encoding before being returned.
28 which is encoded in the local encoding before being returned.
31
29
32 Important: message is restricted to characters in the encoding
30 Important: message is restricted to characters in the encoding
33 given by sys.getdefaultencoding() which is most likely 'ascii'.
31 given by sys.getdefaultencoding() which is most likely 'ascii'.
34 """
32 """
35 # If message is None, t.ugettext will return u'None' as the
33 # If message is None, t.ugettext will return u'None' as the
36 # translation whereas our callers expect us to return None.
34 # translation whereas our callers expect us to return None.
37 if message is None:
35 if message is None:
38 return message
36 return message
39
37
40 # We cannot just run the text through encoding.tolocal since that
38 # We cannot just run the text through encoding.tolocal since that
41 # leads to infinite recursion when encoding._encoding is invalid.
39 # leads to infinite recursion when encoding._encoding is invalid.
42 try:
40 try:
43 u = t.ugettext(message)
41 u = t.ugettext(message)
44 return u.encode(encoding.encoding, "replace")
42 return u.encode(encoding.encoding, "replace")
45 except LookupError:
43 except LookupError:
46 return message
44 return message
47
45
48 _ = gettext
46 _ = gettext
49
47
@@ -1,20 +1,18 b''
1 """
1 # node.py - basic nodeid manipulation for mercurial
2 node.py - basic nodeid manipulation for mercurial
2 #
3
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
5
5 # This software may be used and distributed according to the terms of the
6 This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
7 GNU General Public License version 2, incorporated herein by reference.
8 """
9
7
10 import binascii
8 import binascii
11
9
12 nullrev = -1
10 nullrev = -1
13 nullid = "\0" * 20
11 nullid = "\0" * 20
14
12
15 # This ugly style has a noticeable effect in manifest parsing
13 # This ugly style has a noticeable effect in manifest parsing
16 hex = binascii.hexlify
14 hex = binascii.hexlify
17 bin = binascii.unhexlify
15 bin = binascii.unhexlify
18
16
19 def short(node):
17 def short(node):
20 return hex(node[:6])
18 return hex(node[:6])
@@ -1,219 +1,217 b''
1 """
1 # posix.py - Posix utility function implementations for Mercurial
2 posix.py - Posix utility function implementations for Mercurial
2 #
3
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
5
5 # This software may be used and distributed according to the terms of the
6 This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
7 GNU General Public License version 2, incorporated herein by reference.
8 """
9
7
10 from i18n import _
8 from i18n import _
11 import os, sys, osutil, errno, stat, getpass, pwd, grp
9 import os, sys, osutil, errno, stat, getpass, pwd, grp
12
10
13 posixfile = file
11 posixfile = file
14 nulldev = '/dev/null'
12 nulldev = '/dev/null'
15 normpath = os.path.normpath
13 normpath = os.path.normpath
16 samestat = os.path.samestat
14 samestat = os.path.samestat
17
15
18 umask = os.umask(0)
16 umask = os.umask(0)
19 os.umask(umask)
17 os.umask(umask)
20
18
21 def openhardlinks():
19 def openhardlinks():
22 '''return true if it is safe to hold open file handles to hardlinks'''
20 '''return true if it is safe to hold open file handles to hardlinks'''
23 return True
21 return True
24
22
25 def rcfiles(path):
23 def rcfiles(path):
26 rcs = [os.path.join(path, 'hgrc')]
24 rcs = [os.path.join(path, 'hgrc')]
27 rcdir = os.path.join(path, 'hgrc.d')
25 rcdir = os.path.join(path, 'hgrc.d')
28 try:
26 try:
29 rcs.extend([os.path.join(rcdir, f)
27 rcs.extend([os.path.join(rcdir, f)
30 for f, kind in osutil.listdir(rcdir)
28 for f, kind in osutil.listdir(rcdir)
31 if f.endswith(".rc")])
29 if f.endswith(".rc")])
32 except OSError:
30 except OSError:
33 pass
31 pass
34 return rcs
32 return rcs
35
33
36 def system_rcpath():
34 def system_rcpath():
37 path = []
35 path = []
38 # old mod_python does not set sys.argv
36 # old mod_python does not set sys.argv
39 if len(getattr(sys, 'argv', [])) > 0:
37 if len(getattr(sys, 'argv', [])) > 0:
40 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
38 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
41 '/../etc/mercurial'))
39 '/../etc/mercurial'))
42 path.extend(rcfiles('/etc/mercurial'))
40 path.extend(rcfiles('/etc/mercurial'))
43 return path
41 return path
44
42
45 def user_rcpath():
43 def user_rcpath():
46 return [os.path.expanduser('~/.hgrc')]
44 return [os.path.expanduser('~/.hgrc')]
47
45
48 def parse_patch_output(output_line):
46 def parse_patch_output(output_line):
49 """parses the output produced by patch and returns the file name"""
47 """parses the output produced by patch and returns the file name"""
50 pf = output_line[14:]
48 pf = output_line[14:]
51 if os.sys.platform == 'OpenVMS':
49 if os.sys.platform == 'OpenVMS':
52 if pf[0] == '`':
50 if pf[0] == '`':
53 pf = pf[1:-1] # Remove the quotes
51 pf = pf[1:-1] # Remove the quotes
54 else:
52 else:
55 if pf.startswith("'") and pf.endswith("'") and " " in pf:
53 if pf.startswith("'") and pf.endswith("'") and " " in pf:
56 pf = pf[1:-1] # Remove the quotes
54 pf = pf[1:-1] # Remove the quotes
57 return pf
55 return pf
58
56
59 def sshargs(sshcmd, host, user, port):
57 def sshargs(sshcmd, host, user, port):
60 '''Build argument list for ssh'''
58 '''Build argument list for ssh'''
61 args = user and ("%s@%s" % (user, host)) or host
59 args = user and ("%s@%s" % (user, host)) or host
62 return port and ("%s -p %s" % (args, port)) or args
60 return port and ("%s -p %s" % (args, port)) or args
63
61
64 def is_exec(f):
62 def is_exec(f):
65 """check whether a file is executable"""
63 """check whether a file is executable"""
66 return (os.lstat(f).st_mode & 0100 != 0)
64 return (os.lstat(f).st_mode & 0100 != 0)
67
65
68 def set_flags(f, l, x):
66 def set_flags(f, l, x):
69 s = os.lstat(f).st_mode
67 s = os.lstat(f).st_mode
70 if l:
68 if l:
71 if not stat.S_ISLNK(s):
69 if not stat.S_ISLNK(s):
72 # switch file to link
70 # switch file to link
73 data = file(f).read()
71 data = file(f).read()
74 os.unlink(f)
72 os.unlink(f)
75 try:
73 try:
76 os.symlink(data, f)
74 os.symlink(data, f)
77 except:
75 except:
78 # failed to make a link, rewrite file
76 # failed to make a link, rewrite file
79 file(f, "w").write(data)
77 file(f, "w").write(data)
80 # no chmod needed at this point
78 # no chmod needed at this point
81 return
79 return
82 if stat.S_ISLNK(s):
80 if stat.S_ISLNK(s):
83 # switch link to file
81 # switch link to file
84 data = os.readlink(f)
82 data = os.readlink(f)
85 os.unlink(f)
83 os.unlink(f)
86 file(f, "w").write(data)
84 file(f, "w").write(data)
87 s = 0666 & ~umask # avoid restatting for chmod
85 s = 0666 & ~umask # avoid restatting for chmod
88
86
89 sx = s & 0100
87 sx = s & 0100
90 if x and not sx:
88 if x and not sx:
91 # Turn on +x for every +r bit when making a file executable
89 # Turn on +x for every +r bit when making a file executable
92 # and obey umask.
90 # and obey umask.
93 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
91 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
94 elif not x and sx:
92 elif not x and sx:
95 # Turn off all +x bits
93 # Turn off all +x bits
96 os.chmod(f, s & 0666)
94 os.chmod(f, s & 0666)
97
95
98 def set_binary(fd):
96 def set_binary(fd):
99 pass
97 pass
100
98
101 def pconvert(path):
99 def pconvert(path):
102 return path
100 return path
103
101
104 def localpath(path):
102 def localpath(path):
105 return path
103 return path
106
104
107 def shellquote(s):
105 def shellquote(s):
108 if os.sys.platform == 'OpenVMS':
106 if os.sys.platform == 'OpenVMS':
109 return '"%s"' % s
107 return '"%s"' % s
110 else:
108 else:
111 return "'%s'" % s.replace("'", "'\\''")
109 return "'%s'" % s.replace("'", "'\\''")
112
110
113 def quotecommand(cmd):
111 def quotecommand(cmd):
114 return cmd
112 return cmd
115
113
116 def popen(command, mode='r'):
114 def popen(command, mode='r'):
117 return os.popen(command, mode)
115 return os.popen(command, mode)
118
116
119 def testpid(pid):
117 def testpid(pid):
120 '''return False if pid dead, True if running or not sure'''
118 '''return False if pid dead, True if running or not sure'''
121 if os.sys.platform == 'OpenVMS':
119 if os.sys.platform == 'OpenVMS':
122 return True
120 return True
123 try:
121 try:
124 os.kill(pid, 0)
122 os.kill(pid, 0)
125 return True
123 return True
126 except OSError, inst:
124 except OSError, inst:
127 return inst.errno != errno.ESRCH
125 return inst.errno != errno.ESRCH
128
126
129 def explain_exit(code):
127 def explain_exit(code):
130 """return a 2-tuple (desc, code) describing a process's status"""
128 """return a 2-tuple (desc, code) describing a process's status"""
131 if os.WIFEXITED(code):
129 if os.WIFEXITED(code):
132 val = os.WEXITSTATUS(code)
130 val = os.WEXITSTATUS(code)
133 return _("exited with status %d") % val, val
131 return _("exited with status %d") % val, val
134 elif os.WIFSIGNALED(code):
132 elif os.WIFSIGNALED(code):
135 val = os.WTERMSIG(code)
133 val = os.WTERMSIG(code)
136 return _("killed by signal %d") % val, val
134 return _("killed by signal %d") % val, val
137 elif os.WIFSTOPPED(code):
135 elif os.WIFSTOPPED(code):
138 val = os.WSTOPSIG(code)
136 val = os.WSTOPSIG(code)
139 return _("stopped by signal %d") % val, val
137 return _("stopped by signal %d") % val, val
140 raise ValueError(_("invalid exit code"))
138 raise ValueError(_("invalid exit code"))
141
139
142 def isowner(fp, st=None):
140 def isowner(fp, st=None):
143 """Return True if the file object f belongs to the current user.
141 """Return True if the file object f belongs to the current user.
144
142
145 The return value of a util.fstat(f) may be passed as the st argument.
143 The return value of a util.fstat(f) may be passed as the st argument.
146 """
144 """
147 if st is None:
145 if st is None:
148 st = fstat(fp)
146 st = fstat(fp)
149 return st.st_uid == os.getuid()
147 return st.st_uid == os.getuid()
150
148
151 def find_exe(command):
149 def find_exe(command):
152 '''Find executable for command searching like which does.
150 '''Find executable for command searching like which does.
153 If command is a basename then PATH is searched for command.
151 If command is a basename then PATH is searched for command.
154 PATH isn't searched if command is an absolute or relative path.
152 PATH isn't searched if command is an absolute or relative path.
155 If command isn't found None is returned.'''
153 If command isn't found None is returned.'''
156 if sys.platform == 'OpenVMS':
154 if sys.platform == 'OpenVMS':
157 return command
155 return command
158
156
159 def findexisting(executable):
157 def findexisting(executable):
160 'Will return executable if existing file'
158 'Will return executable if existing file'
161 if os.path.exists(executable):
159 if os.path.exists(executable):
162 return executable
160 return executable
163 return None
161 return None
164
162
165 if os.sep in command:
163 if os.sep in command:
166 return findexisting(command)
164 return findexisting(command)
167
165
168 for path in os.environ.get('PATH', '').split(os.pathsep):
166 for path in os.environ.get('PATH', '').split(os.pathsep):
169 executable = findexisting(os.path.join(path, command))
167 executable = findexisting(os.path.join(path, command))
170 if executable is not None:
168 if executable is not None:
171 return executable
169 return executable
172 return None
170 return None
173
171
174 def set_signal_handler():
172 def set_signal_handler():
175 pass
173 pass
176
174
177 def statfiles(files):
175 def statfiles(files):
178 'Stat each file in files and yield stat or None if file does not exist.'
176 'Stat each file in files and yield stat or None if file does not exist.'
179 lstat = os.lstat
177 lstat = os.lstat
180 for nf in files:
178 for nf in files:
181 try:
179 try:
182 st = lstat(nf)
180 st = lstat(nf)
183 except OSError, err:
181 except OSError, err:
184 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
182 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
185 raise
183 raise
186 st = None
184 st = None
187 yield st
185 yield st
188
186
189 def getuser():
187 def getuser():
190 '''return name of current user'''
188 '''return name of current user'''
191 return getpass.getuser()
189 return getpass.getuser()
192
190
193 def expand_glob(pats):
191 def expand_glob(pats):
194 '''On Windows, expand the implicit globs in a list of patterns'''
192 '''On Windows, expand the implicit globs in a list of patterns'''
195 return list(pats)
193 return list(pats)
196
194
197 def username(uid=None):
195 def username(uid=None):
198 """Return the name of the user with the given uid.
196 """Return the name of the user with the given uid.
199
197
200 If uid is None, return the name of the current user."""
198 If uid is None, return the name of the current user."""
201
199
202 if uid is None:
200 if uid is None:
203 uid = os.getuid()
201 uid = os.getuid()
204 try:
202 try:
205 return pwd.getpwuid(uid)[0]
203 return pwd.getpwuid(uid)[0]
206 except KeyError:
204 except KeyError:
207 return str(uid)
205 return str(uid)
208
206
209 def groupname(gid=None):
207 def groupname(gid=None):
210 """Return the name of the group with the given gid.
208 """Return the name of the group with the given gid.
211
209
212 If gid is None, return the name of the current group."""
210 If gid is None, return the name of the current group."""
213
211
214 if gid is None:
212 if gid is None:
215 gid = os.getgid()
213 gid = os.getgid()
216 try:
214 try:
217 return grp.getgrgid(gid)[0]
215 return grp.getgrgid(gid)[0]
218 except KeyError:
216 except KeyError:
219 return str(gid)
217 return str(gid)
@@ -1,1369 +1,1367 b''
1 """
1 # revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
2 #
3
3 # This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
4 # and O(changes) merge between branches
5 and O(changes) merge between branches
5 #
6
6 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 #
8
8 # This software may be used and distributed according to the terms of the
9 This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2, incorporated herein by reference.
10 GNU General Public License version 2, incorporated herein by reference.
11 """
12
10
13 # import stuff from node for others to import from revlog
11 # import stuff from node for others to import from revlog
14 from node import bin, hex, nullid, nullrev, short #@UnusedImport
12 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from i18n import _
13 from i18n import _
16 import changegroup, errno, ancestor, mdiff, parsers
14 import changegroup, errno, ancestor, mdiff, parsers
17 import struct, util, zlib, error
15 import struct, util, zlib, error
18
16
19 _pack = struct.pack
17 _pack = struct.pack
20 _unpack = struct.unpack
18 _unpack = struct.unpack
21 _compress = zlib.compress
19 _compress = zlib.compress
22 _decompress = zlib.decompress
20 _decompress = zlib.decompress
23 _sha = util.sha1
21 _sha = util.sha1
24
22
25 # revlog flags
23 # revlog flags
26 REVLOGV0 = 0
24 REVLOGV0 = 0
27 REVLOGNG = 1
25 REVLOGNG = 1
28 REVLOGNGINLINEDATA = (1 << 16)
26 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
27 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
30 REVLOG_DEFAULT_FORMAT = REVLOGNG
28 REVLOG_DEFAULT_FORMAT = REVLOGNG
31 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
29 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
32
30
33 RevlogError = error.RevlogError
31 RevlogError = error.RevlogError
34 LookupError = error.LookupError
32 LookupError = error.LookupError
35
33
36 def getoffset(q):
34 def getoffset(q):
37 return int(q >> 16)
35 return int(q >> 16)
38
36
39 def gettype(q):
37 def gettype(q):
40 return int(q & 0xFFFF)
38 return int(q & 0xFFFF)
41
39
42 def offset_type(offset, type):
40 def offset_type(offset, type):
43 return long(long(offset) << 16 | type)
41 return long(long(offset) << 16 | type)
44
42
45 nullhash = _sha(nullid)
43 nullhash = _sha(nullid)
46
44
47 def hash(text, p1, p2):
45 def hash(text, p1, p2):
48 """generate a hash from the given text and its parent hashes
46 """generate a hash from the given text and its parent hashes
49
47
50 This hash combines both the current file contents and its history
48 This hash combines both the current file contents and its history
51 in a manner that makes it easy to distinguish nodes with the same
49 in a manner that makes it easy to distinguish nodes with the same
52 content in the revision graph.
50 content in the revision graph.
53 """
51 """
54 # As of now, if one of the parent node is null, p2 is null
52 # As of now, if one of the parent node is null, p2 is null
55 if p2 == nullid:
53 if p2 == nullid:
56 # deep copy of a hash is faster than creating one
54 # deep copy of a hash is faster than creating one
57 s = nullhash.copy()
55 s = nullhash.copy()
58 s.update(p1)
56 s.update(p1)
59 else:
57 else:
60 # none of the parent nodes are nullid
58 # none of the parent nodes are nullid
61 l = [p1, p2]
59 l = [p1, p2]
62 l.sort()
60 l.sort()
63 s = _sha(l[0])
61 s = _sha(l[0])
64 s.update(l[1])
62 s.update(l[1])
65 s.update(text)
63 s.update(text)
66 return s.digest()
64 return s.digest()
67
65
68 def compress(text):
66 def compress(text):
69 """ generate a possibly-compressed representation of text """
67 """ generate a possibly-compressed representation of text """
70 if not text:
68 if not text:
71 return ("", text)
69 return ("", text)
72 l = len(text)
70 l = len(text)
73 bin = None
71 bin = None
74 if l < 44:
72 if l < 44:
75 pass
73 pass
76 elif l > 1000000:
74 elif l > 1000000:
77 # zlib makes an internal copy, thus doubling memory usage for
75 # zlib makes an internal copy, thus doubling memory usage for
78 # large files, so lets do this in pieces
76 # large files, so lets do this in pieces
79 z = zlib.compressobj()
77 z = zlib.compressobj()
80 p = []
78 p = []
81 pos = 0
79 pos = 0
82 while pos < l:
80 while pos < l:
83 pos2 = pos + 2**20
81 pos2 = pos + 2**20
84 p.append(z.compress(text[pos:pos2]))
82 p.append(z.compress(text[pos:pos2]))
85 pos = pos2
83 pos = pos2
86 p.append(z.flush())
84 p.append(z.flush())
87 if sum(map(len, p)) < l:
85 if sum(map(len, p)) < l:
88 bin = "".join(p)
86 bin = "".join(p)
89 else:
87 else:
90 bin = _compress(text)
88 bin = _compress(text)
91 if bin is None or len(bin) > l:
89 if bin is None or len(bin) > l:
92 if text[0] == '\0':
90 if text[0] == '\0':
93 return ("", text)
91 return ("", text)
94 return ('u', text)
92 return ('u', text)
95 return ("", bin)
93 return ("", bin)
96
94
97 def decompress(bin):
95 def decompress(bin):
98 """ decompress the given input """
96 """ decompress the given input """
99 if not bin:
97 if not bin:
100 return bin
98 return bin
101 t = bin[0]
99 t = bin[0]
102 if t == '\0':
100 if t == '\0':
103 return bin
101 return bin
104 if t == 'x':
102 if t == 'x':
105 return _decompress(bin)
103 return _decompress(bin)
106 if t == 'u':
104 if t == 'u':
107 return bin[1:]
105 return bin[1:]
108 raise RevlogError(_("unknown compression type %r") % t)
106 raise RevlogError(_("unknown compression type %r") % t)
109
107
110 class lazyparser(object):
108 class lazyparser(object):
111 """
109 """
112 this class avoids the need to parse the entirety of large indices
110 this class avoids the need to parse the entirety of large indices
113 """
111 """
114
112
115 # lazyparser is not safe to use on windows if win32 extensions not
113 # lazyparser is not safe to use on windows if win32 extensions not
116 # available. it keeps file handle open, which make it not possible
114 # available. it keeps file handle open, which make it not possible
117 # to break hardlinks on local cloned repos.
115 # to break hardlinks on local cloned repos.
118
116
119 def __init__(self, dataf, size):
117 def __init__(self, dataf, size):
120 self.dataf = dataf
118 self.dataf = dataf
121 self.s = struct.calcsize(indexformatng)
119 self.s = struct.calcsize(indexformatng)
122 self.datasize = size
120 self.datasize = size
123 self.l = size/self.s
121 self.l = size/self.s
124 self.index = [None] * self.l
122 self.index = [None] * self.l
125 self.map = {nullid: nullrev}
123 self.map = {nullid: nullrev}
126 self.allmap = 0
124 self.allmap = 0
127 self.all = 0
125 self.all = 0
128 self.mapfind_count = 0
126 self.mapfind_count = 0
129
127
130 def loadmap(self):
128 def loadmap(self):
131 """
129 """
132 during a commit, we need to make sure the rev being added is
130 during a commit, we need to make sure the rev being added is
133 not a duplicate. This requires loading the entire index,
131 not a duplicate. This requires loading the entire index,
134 which is fairly slow. loadmap can load up just the node map,
132 which is fairly slow. loadmap can load up just the node map,
135 which takes much less time.
133 which takes much less time.
136 """
134 """
137 if self.allmap:
135 if self.allmap:
138 return
136 return
139 end = self.datasize
137 end = self.datasize
140 self.allmap = 1
138 self.allmap = 1
141 cur = 0
139 cur = 0
142 count = 0
140 count = 0
143 blocksize = self.s * 256
141 blocksize = self.s * 256
144 self.dataf.seek(0)
142 self.dataf.seek(0)
145 while cur < end:
143 while cur < end:
146 data = self.dataf.read(blocksize)
144 data = self.dataf.read(blocksize)
147 off = 0
145 off = 0
148 for x in xrange(256):
146 for x in xrange(256):
149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
147 n = data[off + ngshaoffset:off + ngshaoffset + 20]
150 self.map[n] = count
148 self.map[n] = count
151 count += 1
149 count += 1
152 if count >= self.l:
150 if count >= self.l:
153 break
151 break
154 off += self.s
152 off += self.s
155 cur += blocksize
153 cur += blocksize
156
154
157 def loadblock(self, blockstart, blocksize, data=None):
155 def loadblock(self, blockstart, blocksize, data=None):
158 if self.all:
156 if self.all:
159 return
157 return
160 if data is None:
158 if data is None:
161 self.dataf.seek(blockstart)
159 self.dataf.seek(blockstart)
162 if blockstart + blocksize > self.datasize:
160 if blockstart + blocksize > self.datasize:
163 # the revlog may have grown since we've started running,
161 # the revlog may have grown since we've started running,
164 # but we don't have space in self.index for more entries.
162 # but we don't have space in self.index for more entries.
165 # limit blocksize so that we don't get too much data.
163 # limit blocksize so that we don't get too much data.
166 blocksize = max(self.datasize - blockstart, 0)
164 blocksize = max(self.datasize - blockstart, 0)
167 data = self.dataf.read(blocksize)
165 data = self.dataf.read(blocksize)
168 lend = len(data) / self.s
166 lend = len(data) / self.s
169 i = blockstart / self.s
167 i = blockstart / self.s
170 off = 0
168 off = 0
171 # lazyindex supports __delitem__
169 # lazyindex supports __delitem__
172 if lend > len(self.index) - i:
170 if lend > len(self.index) - i:
173 lend = len(self.index) - i
171 lend = len(self.index) - i
174 for x in xrange(lend):
172 for x in xrange(lend):
175 if self.index[i + x] == None:
173 if self.index[i + x] == None:
176 b = data[off : off + self.s]
174 b = data[off : off + self.s]
177 self.index[i + x] = b
175 self.index[i + x] = b
178 n = b[ngshaoffset:ngshaoffset + 20]
176 n = b[ngshaoffset:ngshaoffset + 20]
179 self.map[n] = i + x
177 self.map[n] = i + x
180 off += self.s
178 off += self.s
181
179
182 def findnode(self, node):
180 def findnode(self, node):
183 """search backwards through the index file for a specific node"""
181 """search backwards through the index file for a specific node"""
184 if self.allmap:
182 if self.allmap:
185 return None
183 return None
186
184
187 # hg log will cause many many searches for the manifest
185 # hg log will cause many many searches for the manifest
188 # nodes. After we get called a few times, just load the whole
186 # nodes. After we get called a few times, just load the whole
189 # thing.
187 # thing.
190 if self.mapfind_count > 8:
188 if self.mapfind_count > 8:
191 self.loadmap()
189 self.loadmap()
192 if node in self.map:
190 if node in self.map:
193 return node
191 return node
194 return None
192 return None
195 self.mapfind_count += 1
193 self.mapfind_count += 1
196 last = self.l - 1
194 last = self.l - 1
197 while self.index[last] != None:
195 while self.index[last] != None:
198 if last == 0:
196 if last == 0:
199 self.all = 1
197 self.all = 1
200 self.allmap = 1
198 self.allmap = 1
201 return None
199 return None
202 last -= 1
200 last -= 1
203 end = (last + 1) * self.s
201 end = (last + 1) * self.s
204 blocksize = self.s * 256
202 blocksize = self.s * 256
205 while end >= 0:
203 while end >= 0:
206 start = max(end - blocksize, 0)
204 start = max(end - blocksize, 0)
207 self.dataf.seek(start)
205 self.dataf.seek(start)
208 data = self.dataf.read(end - start)
206 data = self.dataf.read(end - start)
209 findend = end - start
207 findend = end - start
210 while True:
208 while True:
211 # we're searching backwards, so we have to make sure
209 # we're searching backwards, so we have to make sure
212 # we don't find a changeset where this node is a parent
210 # we don't find a changeset where this node is a parent
213 off = data.find(node, 0, findend)
211 off = data.find(node, 0, findend)
214 findend = off
212 findend = off
215 if off >= 0:
213 if off >= 0:
216 i = off / self.s
214 i = off / self.s
217 off = i * self.s
215 off = i * self.s
218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
216 n = data[off + ngshaoffset:off + ngshaoffset + 20]
219 if n == node:
217 if n == node:
220 self.map[n] = i + start / self.s
218 self.map[n] = i + start / self.s
221 return node
219 return node
222 else:
220 else:
223 break
221 break
224 end -= blocksize
222 end -= blocksize
225 return None
223 return None
226
224
227 def loadindex(self, i=None, end=None):
225 def loadindex(self, i=None, end=None):
228 if self.all:
226 if self.all:
229 return
227 return
230 all = False
228 all = False
231 if i == None:
229 if i == None:
232 blockstart = 0
230 blockstart = 0
233 blocksize = (65536 / self.s) * self.s
231 blocksize = (65536 / self.s) * self.s
234 end = self.datasize
232 end = self.datasize
235 all = True
233 all = True
236 else:
234 else:
237 if end:
235 if end:
238 blockstart = i * self.s
236 blockstart = i * self.s
239 end = end * self.s
237 end = end * self.s
240 blocksize = end - blockstart
238 blocksize = end - blockstart
241 else:
239 else:
242 blockstart = (i & ~1023) * self.s
240 blockstart = (i & ~1023) * self.s
243 blocksize = self.s * 1024
241 blocksize = self.s * 1024
244 end = blockstart + blocksize
242 end = blockstart + blocksize
245 while blockstart < end:
243 while blockstart < end:
246 self.loadblock(blockstart, blocksize)
244 self.loadblock(blockstart, blocksize)
247 blockstart += blocksize
245 blockstart += blocksize
248 if all:
246 if all:
249 self.all = True
247 self.all = True
250
248
251 class lazyindex(object):
249 class lazyindex(object):
252 """a lazy version of the index array"""
250 """a lazy version of the index array"""
253 def __init__(self, parser):
251 def __init__(self, parser):
254 self.p = parser
252 self.p = parser
255 def __len__(self):
253 def __len__(self):
256 return len(self.p.index)
254 return len(self.p.index)
257 def load(self, pos):
255 def load(self, pos):
258 if pos < 0:
256 if pos < 0:
259 pos += len(self.p.index)
257 pos += len(self.p.index)
260 self.p.loadindex(pos)
258 self.p.loadindex(pos)
261 return self.p.index[pos]
259 return self.p.index[pos]
262 def __getitem__(self, pos):
260 def __getitem__(self, pos):
263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
261 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
264 def __setitem__(self, pos, item):
262 def __setitem__(self, pos, item):
265 self.p.index[pos] = _pack(indexformatng, *item)
263 self.p.index[pos] = _pack(indexformatng, *item)
266 def __delitem__(self, pos):
264 def __delitem__(self, pos):
267 del self.p.index[pos]
265 del self.p.index[pos]
268 def insert(self, pos, e):
266 def insert(self, pos, e):
269 self.p.index.insert(pos, _pack(indexformatng, *e))
267 self.p.index.insert(pos, _pack(indexformatng, *e))
270 def append(self, e):
268 def append(self, e):
271 self.p.index.append(_pack(indexformatng, *e))
269 self.p.index.append(_pack(indexformatng, *e))
272
270
273 class lazymap(object):
271 class lazymap(object):
274 """a lazy version of the node map"""
272 """a lazy version of the node map"""
275 def __init__(self, parser):
273 def __init__(self, parser):
276 self.p = parser
274 self.p = parser
277 def load(self, key):
275 def load(self, key):
278 n = self.p.findnode(key)
276 n = self.p.findnode(key)
279 if n == None:
277 if n == None:
280 raise KeyError(key)
278 raise KeyError(key)
281 def __contains__(self, key):
279 def __contains__(self, key):
282 if key in self.p.map:
280 if key in self.p.map:
283 return True
281 return True
284 self.p.loadmap()
282 self.p.loadmap()
285 return key in self.p.map
283 return key in self.p.map
286 def __iter__(self):
284 def __iter__(self):
287 yield nullid
285 yield nullid
288 for i in xrange(self.p.l):
286 for i in xrange(self.p.l):
289 ret = self.p.index[i]
287 ret = self.p.index[i]
290 if not ret:
288 if not ret:
291 self.p.loadindex(i)
289 self.p.loadindex(i)
292 ret = self.p.index[i]
290 ret = self.p.index[i]
293 if isinstance(ret, str):
291 if isinstance(ret, str):
294 ret = _unpack(indexformatng, ret)
292 ret = _unpack(indexformatng, ret)
295 yield ret[7]
293 yield ret[7]
296 def __getitem__(self, key):
294 def __getitem__(self, key):
297 try:
295 try:
298 return self.p.map[key]
296 return self.p.map[key]
299 except KeyError:
297 except KeyError:
300 try:
298 try:
301 self.load(key)
299 self.load(key)
302 return self.p.map[key]
300 return self.p.map[key]
303 except KeyError:
301 except KeyError:
304 raise KeyError("node " + hex(key))
302 raise KeyError("node " + hex(key))
305 def __setitem__(self, key, val):
303 def __setitem__(self, key, val):
306 self.p.map[key] = val
304 self.p.map[key] = val
307 def __delitem__(self, key):
305 def __delitem__(self, key):
308 del self.p.map[key]
306 del self.p.map[key]
309
307
310 indexformatv0 = ">4l20s20s20s"
308 indexformatv0 = ">4l20s20s20s"
311 v0shaoffset = 56
309 v0shaoffset = 56
312
310
313 class revlogoldio(object):
311 class revlogoldio(object):
314 def __init__(self):
312 def __init__(self):
315 self.size = struct.calcsize(indexformatv0)
313 self.size = struct.calcsize(indexformatv0)
316
314
317 def parseindex(self, fp, inline):
315 def parseindex(self, fp, inline):
318 s = self.size
316 s = self.size
319 index = []
317 index = []
320 nodemap = {nullid: nullrev}
318 nodemap = {nullid: nullrev}
321 n = off = 0
319 n = off = 0
322 data = fp.read()
320 data = fp.read()
323 l = len(data)
321 l = len(data)
324 while off + s <= l:
322 while off + s <= l:
325 cur = data[off:off + s]
323 cur = data[off:off + s]
326 off += s
324 off += s
327 e = _unpack(indexformatv0, cur)
325 e = _unpack(indexformatv0, cur)
328 # transform to revlogv1 format
326 # transform to revlogv1 format
329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
327 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
328 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
331 index.append(e2)
329 index.append(e2)
332 nodemap[e[6]] = n
330 nodemap[e[6]] = n
333 n += 1
331 n += 1
334
332
335 return index, nodemap, None
333 return index, nodemap, None
336
334
337 def packentry(self, entry, node, version, rev):
335 def packentry(self, entry, node, version, rev):
338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
336 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
339 node(entry[5]), node(entry[6]), entry[7])
337 node(entry[5]), node(entry[6]), entry[7])
340 return _pack(indexformatv0, *e2)
338 return _pack(indexformatv0, *e2)
341
339
342 # index ng:
340 # index ng:
343 # 6 bytes offset
341 # 6 bytes offset
344 # 2 bytes flags
342 # 2 bytes flags
345 # 4 bytes compressed length
343 # 4 bytes compressed length
346 # 4 bytes uncompressed length
344 # 4 bytes uncompressed length
347 # 4 bytes: base rev
345 # 4 bytes: base rev
348 # 4 bytes link rev
346 # 4 bytes link rev
349 # 4 bytes parent 1 rev
347 # 4 bytes parent 1 rev
350 # 4 bytes parent 2 rev
348 # 4 bytes parent 2 rev
351 # 32 bytes: nodeid
349 # 32 bytes: nodeid
352 indexformatng = ">Qiiiiii20s12x"
350 indexformatng = ">Qiiiiii20s12x"
353 ngshaoffset = 32
351 ngshaoffset = 32
354 versionformat = ">I"
352 versionformat = ">I"
355
353
356 class revlogio(object):
354 class revlogio(object):
357 def __init__(self):
355 def __init__(self):
358 self.size = struct.calcsize(indexformatng)
356 self.size = struct.calcsize(indexformatng)
359
357
360 def parseindex(self, fp, inline):
358 def parseindex(self, fp, inline):
361 try:
359 try:
362 size = util.fstat(fp).st_size
360 size = util.fstat(fp).st_size
363 except AttributeError:
361 except AttributeError:
364 size = 0
362 size = 0
365
363
366 if util.openhardlinks() and not inline and size > 1000000:
364 if util.openhardlinks() and not inline and size > 1000000:
367 # big index, let's parse it on demand
365 # big index, let's parse it on demand
368 parser = lazyparser(fp, size)
366 parser = lazyparser(fp, size)
369 index = lazyindex(parser)
367 index = lazyindex(parser)
370 nodemap = lazymap(parser)
368 nodemap = lazymap(parser)
371 e = list(index[0])
369 e = list(index[0])
372 type = gettype(e[0])
370 type = gettype(e[0])
373 e[0] = offset_type(0, type)
371 e[0] = offset_type(0, type)
374 index[0] = e
372 index[0] = e
375 return index, nodemap, None
373 return index, nodemap, None
376
374
377 data = fp.read()
375 data = fp.read()
378 # call the C implementation to parse the index data
376 # call the C implementation to parse the index data
379 index, nodemap, cache = parsers.parse_index(data, inline)
377 index, nodemap, cache = parsers.parse_index(data, inline)
380 return index, nodemap, cache
378 return index, nodemap, cache
381
379
382 def packentry(self, entry, node, version, rev):
380 def packentry(self, entry, node, version, rev):
383 p = _pack(indexformatng, *entry)
381 p = _pack(indexformatng, *entry)
384 if rev == 0:
382 if rev == 0:
385 p = _pack(versionformat, version) + p[4:]
383 p = _pack(versionformat, version) + p[4:]
386 return p
384 return p
387
385
388 class revlog(object):
386 class revlog(object):
389 """
387 """
390 the underlying revision storage object
388 the underlying revision storage object
391
389
392 A revlog consists of two parts, an index and the revision data.
390 A revlog consists of two parts, an index and the revision data.
393
391
394 The index is a file with a fixed record size containing
392 The index is a file with a fixed record size containing
395 information on each revision, including its nodeid (hash), the
393 information on each revision, including its nodeid (hash), the
396 nodeids of its parents, the position and offset of its data within
394 nodeids of its parents, the position and offset of its data within
397 the data file, and the revision it's based on. Finally, each entry
395 the data file, and the revision it's based on. Finally, each entry
398 contains a linkrev entry that can serve as a pointer to external
396 contains a linkrev entry that can serve as a pointer to external
399 data.
397 data.
400
398
401 The revision data itself is a linear collection of data chunks.
399 The revision data itself is a linear collection of data chunks.
402 Each chunk represents a revision and is usually represented as a
400 Each chunk represents a revision and is usually represented as a
403 delta against the previous chunk. To bound lookup time, runs of
401 delta against the previous chunk. To bound lookup time, runs of
404 deltas are limited to about 2 times the length of the original
402 deltas are limited to about 2 times the length of the original
405 version data. This makes retrieval of a version proportional to
403 version data. This makes retrieval of a version proportional to
406 its size, or O(1) relative to the number of revisions.
404 its size, or O(1) relative to the number of revisions.
407
405
408 Both pieces of the revlog are written to in an append-only
406 Both pieces of the revlog are written to in an append-only
409 fashion, which means we never need to rewrite a file to insert or
407 fashion, which means we never need to rewrite a file to insert or
410 remove data, and can use some simple techniques to avoid the need
408 remove data, and can use some simple techniques to avoid the need
411 for locking while reading.
409 for locking while reading.
412 """
410 """
413 def __init__(self, opener, indexfile):
411 def __init__(self, opener, indexfile):
414 """
412 """
415 create a revlog object
413 create a revlog object
416
414
417 opener is a function that abstracts the file opening operation
415 opener is a function that abstracts the file opening operation
418 and can be used to implement COW semantics or the like.
416 and can be used to implement COW semantics or the like.
419 """
417 """
420 self.indexfile = indexfile
418 self.indexfile = indexfile
421 self.datafile = indexfile[:-2] + ".d"
419 self.datafile = indexfile[:-2] + ".d"
422 self.opener = opener
420 self.opener = opener
423 self._cache = None
421 self._cache = None
424 self._chunkcache = None
422 self._chunkcache = None
425 self.nodemap = {nullid: nullrev}
423 self.nodemap = {nullid: nullrev}
426 self.index = []
424 self.index = []
427
425
428 v = REVLOG_DEFAULT_VERSION
426 v = REVLOG_DEFAULT_VERSION
429 if hasattr(opener, "defversion"):
427 if hasattr(opener, "defversion"):
430 v = opener.defversion
428 v = opener.defversion
431 if v & REVLOGNG:
429 if v & REVLOGNG:
432 v |= REVLOGNGINLINEDATA
430 v |= REVLOGNGINLINEDATA
433
431
434 i = ""
432 i = ""
435 try:
433 try:
436 f = self.opener(self.indexfile)
434 f = self.opener(self.indexfile)
437 i = f.read(4)
435 i = f.read(4)
438 f.seek(0)
436 f.seek(0)
439 if len(i) > 0:
437 if len(i) > 0:
440 v = struct.unpack(versionformat, i)[0]
438 v = struct.unpack(versionformat, i)[0]
441 except IOError, inst:
439 except IOError, inst:
442 if inst.errno != errno.ENOENT:
440 if inst.errno != errno.ENOENT:
443 raise
441 raise
444
442
445 self.version = v
443 self.version = v
446 self._inline = v & REVLOGNGINLINEDATA
444 self._inline = v & REVLOGNGINLINEDATA
447 flags = v & ~0xFFFF
445 flags = v & ~0xFFFF
448 fmt = v & 0xFFFF
446 fmt = v & 0xFFFF
449 if fmt == REVLOGV0 and flags:
447 if fmt == REVLOGV0 and flags:
450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
448 raise RevlogError(_("index %s unknown flags %#04x for format v0")
451 % (self.indexfile, flags >> 16))
449 % (self.indexfile, flags >> 16))
452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
450 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
451 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
454 % (self.indexfile, flags >> 16))
452 % (self.indexfile, flags >> 16))
455 elif fmt > REVLOGNG:
453 elif fmt > REVLOGNG:
456 raise RevlogError(_("index %s unknown format %d")
454 raise RevlogError(_("index %s unknown format %d")
457 % (self.indexfile, fmt))
455 % (self.indexfile, fmt))
458
456
459 self._io = revlogio()
457 self._io = revlogio()
460 if self.version == REVLOGV0:
458 if self.version == REVLOGV0:
461 self._io = revlogoldio()
459 self._io = revlogoldio()
462 if i:
460 if i:
463 try:
461 try:
464 d = self._io.parseindex(f, self._inline)
462 d = self._io.parseindex(f, self._inline)
465 except (ValueError, IndexError), e:
463 except (ValueError, IndexError), e:
466 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
464 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
467 self.index, self.nodemap, self._chunkcache = d
465 self.index, self.nodemap, self._chunkcache = d
468
466
469 # add the magic null revision at -1 (if it hasn't been done already)
467 # add the magic null revision at -1 (if it hasn't been done already)
470 if (self.index == [] or isinstance(self.index, lazyindex) or
468 if (self.index == [] or isinstance(self.index, lazyindex) or
471 self.index[-1][7] != nullid) :
469 self.index[-1][7] != nullid) :
472 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
470 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
473
471
474 def _loadindex(self, start, end):
472 def _loadindex(self, start, end):
475 """load a block of indexes all at once from the lazy parser"""
473 """load a block of indexes all at once from the lazy parser"""
476 if isinstance(self.index, lazyindex):
474 if isinstance(self.index, lazyindex):
477 self.index.p.loadindex(start, end)
475 self.index.p.loadindex(start, end)
478
476
479 def _loadindexmap(self):
477 def _loadindexmap(self):
480 """loads both the map and the index from the lazy parser"""
478 """loads both the map and the index from the lazy parser"""
481 if isinstance(self.index, lazyindex):
479 if isinstance(self.index, lazyindex):
482 p = self.index.p
480 p = self.index.p
483 p.loadindex()
481 p.loadindex()
484 self.nodemap = p.map
482 self.nodemap = p.map
485
483
486 def _loadmap(self):
484 def _loadmap(self):
487 """loads the map from the lazy parser"""
485 """loads the map from the lazy parser"""
488 if isinstance(self.nodemap, lazymap):
486 if isinstance(self.nodemap, lazymap):
489 self.nodemap.p.loadmap()
487 self.nodemap.p.loadmap()
490 self.nodemap = self.nodemap.p.map
488 self.nodemap = self.nodemap.p.map
491
489
492 def tip(self):
490 def tip(self):
493 return self.node(len(self.index) - 2)
491 return self.node(len(self.index) - 2)
494 def __len__(self):
492 def __len__(self):
495 return len(self.index) - 1
493 return len(self.index) - 1
496 def __iter__(self):
494 def __iter__(self):
497 for i in xrange(len(self)):
495 for i in xrange(len(self)):
498 yield i
496 yield i
499 def rev(self, node):
497 def rev(self, node):
500 try:
498 try:
501 return self.nodemap[node]
499 return self.nodemap[node]
502 except KeyError:
500 except KeyError:
503 raise LookupError(node, self.indexfile, _('no node'))
501 raise LookupError(node, self.indexfile, _('no node'))
504 def node(self, rev):
502 def node(self, rev):
505 return self.index[rev][7]
503 return self.index[rev][7]
506 def linkrev(self, rev):
504 def linkrev(self, rev):
507 return self.index[rev][4]
505 return self.index[rev][4]
508 def parents(self, node):
506 def parents(self, node):
509 i = self.index
507 i = self.index
510 d = i[self.rev(node)]
508 d = i[self.rev(node)]
511 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
509 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
512 def parentrevs(self, rev):
510 def parentrevs(self, rev):
513 return self.index[rev][5:7]
511 return self.index[rev][5:7]
514 def start(self, rev):
512 def start(self, rev):
515 return int(self.index[rev][0] >> 16)
513 return int(self.index[rev][0] >> 16)
516 def end(self, rev):
514 def end(self, rev):
517 return self.start(rev) + self.length(rev)
515 return self.start(rev) + self.length(rev)
518 def length(self, rev):
516 def length(self, rev):
519 return self.index[rev][1]
517 return self.index[rev][1]
520 def base(self, rev):
518 def base(self, rev):
521 return self.index[rev][3]
519 return self.index[rev][3]
522
520
523 def size(self, rev):
521 def size(self, rev):
524 """return the length of the uncompressed text for a given revision"""
522 """return the length of the uncompressed text for a given revision"""
525 l = self.index[rev][2]
523 l = self.index[rev][2]
526 if l >= 0:
524 if l >= 0:
527 return l
525 return l
528
526
529 t = self.revision(self.node(rev))
527 t = self.revision(self.node(rev))
530 return len(t)
528 return len(t)
531
529
532 # alternate implementation, The advantage to this code is it
530 # alternate implementation, The advantage to this code is it
533 # will be faster for a single revision. But, the results are not
531 # will be faster for a single revision. But, the results are not
534 # cached, so finding the size of every revision will be slower.
532 # cached, so finding the size of every revision will be slower.
535 """
533 """
536 if self.cache and self.cache[1] == rev:
534 if self.cache and self.cache[1] == rev:
537 return len(self.cache[2])
535 return len(self.cache[2])
538
536
539 base = self.base(rev)
537 base = self.base(rev)
540 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
538 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
541 base = self.cache[1]
539 base = self.cache[1]
542 text = self.cache[2]
540 text = self.cache[2]
543 else:
541 else:
544 text = self.revision(self.node(base))
542 text = self.revision(self.node(base))
545
543
546 l = len(text)
544 l = len(text)
547 for x in xrange(base + 1, rev + 1):
545 for x in xrange(base + 1, rev + 1):
548 l = mdiff.patchedsize(l, self.chunk(x))
546 l = mdiff.patchedsize(l, self.chunk(x))
549 return l
547 return l
550 """
548 """
551
549
552 def reachable(self, node, stop=None):
550 def reachable(self, node, stop=None):
553 """return a hash of all nodes ancestral to a given node, including
551 """return a hash of all nodes ancestral to a given node, including
554 the node itself, stopping when stop is matched"""
552 the node itself, stopping when stop is matched"""
555 reachable = {}
553 reachable = {}
556 visit = [node]
554 visit = [node]
557 reachable[node] = 1
555 reachable[node] = 1
558 if stop:
556 if stop:
559 stopn = self.rev(stop)
557 stopn = self.rev(stop)
560 else:
558 else:
561 stopn = 0
559 stopn = 0
562 while visit:
560 while visit:
563 n = visit.pop(0)
561 n = visit.pop(0)
564 if n == stop:
562 if n == stop:
565 continue
563 continue
566 if n == nullid:
564 if n == nullid:
567 continue
565 continue
568 for p in self.parents(n):
566 for p in self.parents(n):
569 if self.rev(p) < stopn:
567 if self.rev(p) < stopn:
570 continue
568 continue
571 if p not in reachable:
569 if p not in reachable:
572 reachable[p] = 1
570 reachable[p] = 1
573 visit.append(p)
571 visit.append(p)
574 return reachable
572 return reachable
575
573
576 def ancestors(self, *revs):
574 def ancestors(self, *revs):
577 'Generate the ancestors of revs using a breadth-first visit'
575 'Generate the ancestors of revs using a breadth-first visit'
578 visit = list(revs)
576 visit = list(revs)
579 seen = set([nullrev])
577 seen = set([nullrev])
580 while visit:
578 while visit:
581 for parent in self.parentrevs(visit.pop(0)):
579 for parent in self.parentrevs(visit.pop(0)):
582 if parent not in seen:
580 if parent not in seen:
583 visit.append(parent)
581 visit.append(parent)
584 seen.add(parent)
582 seen.add(parent)
585 yield parent
583 yield parent
586
584
587 def descendants(self, *revs):
585 def descendants(self, *revs):
588 'Generate the descendants of revs in topological order'
586 'Generate the descendants of revs in topological order'
589 seen = set(revs)
587 seen = set(revs)
590 for i in xrange(min(revs) + 1, len(self)):
588 for i in xrange(min(revs) + 1, len(self)):
591 for x in self.parentrevs(i):
589 for x in self.parentrevs(i):
592 if x != nullrev and x in seen:
590 if x != nullrev and x in seen:
593 seen.add(i)
591 seen.add(i)
594 yield i
592 yield i
595 break
593 break
596
594
597 def findmissing(self, common=None, heads=None):
595 def findmissing(self, common=None, heads=None):
598 '''
596 '''
599 returns the topologically sorted list of nodes from the set:
597 returns the topologically sorted list of nodes from the set:
600 missing = (ancestors(heads) \ ancestors(common))
598 missing = (ancestors(heads) \ ancestors(common))
601
599
602 where ancestors() is the set of ancestors from heads, heads included
600 where ancestors() is the set of ancestors from heads, heads included
603
601
604 if heads is None, the heads of the revlog are used
602 if heads is None, the heads of the revlog are used
605 if common is None, nullid is assumed to be a common node
603 if common is None, nullid is assumed to be a common node
606 '''
604 '''
607 if common is None:
605 if common is None:
608 common = [nullid]
606 common = [nullid]
609 if heads is None:
607 if heads is None:
610 heads = self.heads()
608 heads = self.heads()
611
609
612 common = [self.rev(n) for n in common]
610 common = [self.rev(n) for n in common]
613 heads = [self.rev(n) for n in heads]
611 heads = [self.rev(n) for n in heads]
614
612
615 # we want the ancestors, but inclusive
613 # we want the ancestors, but inclusive
616 has = set(self.ancestors(*common))
614 has = set(self.ancestors(*common))
617 has.add(nullrev)
615 has.add(nullrev)
618 has.update(common)
616 has.update(common)
619
617
620 # take all ancestors from heads that aren't in has
618 # take all ancestors from heads that aren't in has
621 missing = {}
619 missing = {}
622 visit = [r for r in heads if r not in has]
620 visit = [r for r in heads if r not in has]
623 while visit:
621 while visit:
624 r = visit.pop(0)
622 r = visit.pop(0)
625 if r in missing:
623 if r in missing:
626 continue
624 continue
627 else:
625 else:
628 missing[r] = None
626 missing[r] = None
629 for p in self.parentrevs(r):
627 for p in self.parentrevs(r):
630 if p not in has:
628 if p not in has:
631 visit.append(p)
629 visit.append(p)
632 missing = missing.keys()
630 missing = missing.keys()
633 missing.sort()
631 missing.sort()
634 return [self.node(r) for r in missing]
632 return [self.node(r) for r in missing]
635
633
636 def nodesbetween(self, roots=None, heads=None):
634 def nodesbetween(self, roots=None, heads=None):
637 """Return a tuple containing three elements. Elements 1 and 2 contain
635 """Return a tuple containing three elements. Elements 1 and 2 contain
638 a final list bases and heads after all the unreachable ones have been
636 a final list bases and heads after all the unreachable ones have been
639 pruned. Element 0 contains a topologically sorted list of all
637 pruned. Element 0 contains a topologically sorted list of all
640
638
641 nodes that satisfy these constraints:
639 nodes that satisfy these constraints:
642 1. All nodes must be descended from a node in roots (the nodes on
640 1. All nodes must be descended from a node in roots (the nodes on
643 roots are considered descended from themselves).
641 roots are considered descended from themselves).
644 2. All nodes must also be ancestors of a node in heads (the nodes in
642 2. All nodes must also be ancestors of a node in heads (the nodes in
645 heads are considered to be their own ancestors).
643 heads are considered to be their own ancestors).
646
644
647 If roots is unspecified, nullid is assumed as the only root.
645 If roots is unspecified, nullid is assumed as the only root.
648 If heads is unspecified, it is taken to be the output of the
646 If heads is unspecified, it is taken to be the output of the
649 heads method (i.e. a list of all nodes in the repository that
647 heads method (i.e. a list of all nodes in the repository that
650 have no children)."""
648 have no children)."""
651 nonodes = ([], [], [])
649 nonodes = ([], [], [])
652 if roots is not None:
650 if roots is not None:
653 roots = list(roots)
651 roots = list(roots)
654 if not roots:
652 if not roots:
655 return nonodes
653 return nonodes
656 lowestrev = min([self.rev(n) for n in roots])
654 lowestrev = min([self.rev(n) for n in roots])
657 else:
655 else:
658 roots = [nullid] # Everybody's a descendent of nullid
656 roots = [nullid] # Everybody's a descendent of nullid
659 lowestrev = nullrev
657 lowestrev = nullrev
660 if (lowestrev == nullrev) and (heads is None):
658 if (lowestrev == nullrev) and (heads is None):
661 # We want _all_ the nodes!
659 # We want _all_ the nodes!
662 return ([self.node(r) for r in self], [nullid], list(self.heads()))
660 return ([self.node(r) for r in self], [nullid], list(self.heads()))
663 if heads is None:
661 if heads is None:
664 # All nodes are ancestors, so the latest ancestor is the last
662 # All nodes are ancestors, so the latest ancestor is the last
665 # node.
663 # node.
666 highestrev = len(self) - 1
664 highestrev = len(self) - 1
667 # Set ancestors to None to signal that every node is an ancestor.
665 # Set ancestors to None to signal that every node is an ancestor.
668 ancestors = None
666 ancestors = None
669 # Set heads to an empty dictionary for later discovery of heads
667 # Set heads to an empty dictionary for later discovery of heads
670 heads = {}
668 heads = {}
671 else:
669 else:
672 heads = list(heads)
670 heads = list(heads)
673 if not heads:
671 if not heads:
674 return nonodes
672 return nonodes
675 ancestors = {}
673 ancestors = {}
676 # Turn heads into a dictionary so we can remove 'fake' heads.
674 # Turn heads into a dictionary so we can remove 'fake' heads.
677 # Also, later we will be using it to filter out the heads we can't
675 # Also, later we will be using it to filter out the heads we can't
678 # find from roots.
676 # find from roots.
679 heads = dict.fromkeys(heads, 0)
677 heads = dict.fromkeys(heads, 0)
680 # Start at the top and keep marking parents until we're done.
678 # Start at the top and keep marking parents until we're done.
681 nodestotag = set(heads)
679 nodestotag = set(heads)
682 # Remember where the top was so we can use it as a limit later.
680 # Remember where the top was so we can use it as a limit later.
683 highestrev = max([self.rev(n) for n in nodestotag])
681 highestrev = max([self.rev(n) for n in nodestotag])
684 while nodestotag:
682 while nodestotag:
685 # grab a node to tag
683 # grab a node to tag
686 n = nodestotag.pop()
684 n = nodestotag.pop()
687 # Never tag nullid
685 # Never tag nullid
688 if n == nullid:
686 if n == nullid:
689 continue
687 continue
690 # A node's revision number represents its place in a
688 # A node's revision number represents its place in a
691 # topologically sorted list of nodes.
689 # topologically sorted list of nodes.
692 r = self.rev(n)
690 r = self.rev(n)
693 if r >= lowestrev:
691 if r >= lowestrev:
694 if n not in ancestors:
692 if n not in ancestors:
695 # If we are possibly a descendent of one of the roots
693 # If we are possibly a descendent of one of the roots
696 # and we haven't already been marked as an ancestor
694 # and we haven't already been marked as an ancestor
697 ancestors[n] = 1 # Mark as ancestor
695 ancestors[n] = 1 # Mark as ancestor
698 # Add non-nullid parents to list of nodes to tag.
696 # Add non-nullid parents to list of nodes to tag.
699 nodestotag.update([p for p in self.parents(n) if
697 nodestotag.update([p for p in self.parents(n) if
700 p != nullid])
698 p != nullid])
701 elif n in heads: # We've seen it before, is it a fake head?
699 elif n in heads: # We've seen it before, is it a fake head?
702 # So it is, real heads should not be the ancestors of
700 # So it is, real heads should not be the ancestors of
703 # any other heads.
701 # any other heads.
704 heads.pop(n)
702 heads.pop(n)
705 if not ancestors:
703 if not ancestors:
706 return nonodes
704 return nonodes
707 # Now that we have our set of ancestors, we want to remove any
705 # Now that we have our set of ancestors, we want to remove any
708 # roots that are not ancestors.
706 # roots that are not ancestors.
709
707
710 # If one of the roots was nullid, everything is included anyway.
708 # If one of the roots was nullid, everything is included anyway.
711 if lowestrev > nullrev:
709 if lowestrev > nullrev:
712 # But, since we weren't, let's recompute the lowest rev to not
710 # But, since we weren't, let's recompute the lowest rev to not
713 # include roots that aren't ancestors.
711 # include roots that aren't ancestors.
714
712
715 # Filter out roots that aren't ancestors of heads
713 # Filter out roots that aren't ancestors of heads
716 roots = [n for n in roots if n in ancestors]
714 roots = [n for n in roots if n in ancestors]
717 # Recompute the lowest revision
715 # Recompute the lowest revision
718 if roots:
716 if roots:
719 lowestrev = min([self.rev(n) for n in roots])
717 lowestrev = min([self.rev(n) for n in roots])
720 else:
718 else:
721 # No more roots? Return empty list
719 # No more roots? Return empty list
722 return nonodes
720 return nonodes
723 else:
721 else:
724 # We are descending from nullid, and don't need to care about
722 # We are descending from nullid, and don't need to care about
725 # any other roots.
723 # any other roots.
726 lowestrev = nullrev
724 lowestrev = nullrev
727 roots = [nullid]
725 roots = [nullid]
728 # Transform our roots list into a set.
726 # Transform our roots list into a set.
729 descendents = set(roots)
727 descendents = set(roots)
730 # Also, keep the original roots so we can filter out roots that aren't
728 # Also, keep the original roots so we can filter out roots that aren't
731 # 'real' roots (i.e. are descended from other roots).
729 # 'real' roots (i.e. are descended from other roots).
732 roots = descendents.copy()
730 roots = descendents.copy()
733 # Our topologically sorted list of output nodes.
731 # Our topologically sorted list of output nodes.
734 orderedout = []
732 orderedout = []
735 # Don't start at nullid since we don't want nullid in our output list,
733 # Don't start at nullid since we don't want nullid in our output list,
736 # and if nullid shows up in descedents, empty parents will look like
734 # and if nullid shows up in descedents, empty parents will look like
737 # they're descendents.
735 # they're descendents.
738 for r in xrange(max(lowestrev, 0), highestrev + 1):
736 for r in xrange(max(lowestrev, 0), highestrev + 1):
739 n = self.node(r)
737 n = self.node(r)
740 isdescendent = False
738 isdescendent = False
741 if lowestrev == nullrev: # Everybody is a descendent of nullid
739 if lowestrev == nullrev: # Everybody is a descendent of nullid
742 isdescendent = True
740 isdescendent = True
743 elif n in descendents:
741 elif n in descendents:
744 # n is already a descendent
742 # n is already a descendent
745 isdescendent = True
743 isdescendent = True
746 # This check only needs to be done here because all the roots
744 # This check only needs to be done here because all the roots
747 # will start being marked is descendents before the loop.
745 # will start being marked is descendents before the loop.
748 if n in roots:
746 if n in roots:
749 # If n was a root, check if it's a 'real' root.
747 # If n was a root, check if it's a 'real' root.
750 p = tuple(self.parents(n))
748 p = tuple(self.parents(n))
751 # If any of its parents are descendents, it's not a root.
749 # If any of its parents are descendents, it's not a root.
752 if (p[0] in descendents) or (p[1] in descendents):
750 if (p[0] in descendents) or (p[1] in descendents):
753 roots.remove(n)
751 roots.remove(n)
754 else:
752 else:
755 p = tuple(self.parents(n))
753 p = tuple(self.parents(n))
756 # A node is a descendent if either of its parents are
754 # A node is a descendent if either of its parents are
757 # descendents. (We seeded the dependents list with the roots
755 # descendents. (We seeded the dependents list with the roots
758 # up there, remember?)
756 # up there, remember?)
759 if (p[0] in descendents) or (p[1] in descendents):
757 if (p[0] in descendents) or (p[1] in descendents):
760 descendents.add(n)
758 descendents.add(n)
761 isdescendent = True
759 isdescendent = True
762 if isdescendent and ((ancestors is None) or (n in ancestors)):
760 if isdescendent and ((ancestors is None) or (n in ancestors)):
763 # Only include nodes that are both descendents and ancestors.
761 # Only include nodes that are both descendents and ancestors.
764 orderedout.append(n)
762 orderedout.append(n)
765 if (ancestors is not None) and (n in heads):
763 if (ancestors is not None) and (n in heads):
766 # We're trying to figure out which heads are reachable
764 # We're trying to figure out which heads are reachable
767 # from roots.
765 # from roots.
768 # Mark this head as having been reached
766 # Mark this head as having been reached
769 heads[n] = 1
767 heads[n] = 1
770 elif ancestors is None:
768 elif ancestors is None:
771 # Otherwise, we're trying to discover the heads.
769 # Otherwise, we're trying to discover the heads.
772 # Assume this is a head because if it isn't, the next step
770 # Assume this is a head because if it isn't, the next step
773 # will eventually remove it.
771 # will eventually remove it.
774 heads[n] = 1
772 heads[n] = 1
775 # But, obviously its parents aren't.
773 # But, obviously its parents aren't.
776 for p in self.parents(n):
774 for p in self.parents(n):
777 heads.pop(p, None)
775 heads.pop(p, None)
778 heads = [n for n in heads.iterkeys() if heads[n] != 0]
776 heads = [n for n in heads.iterkeys() if heads[n] != 0]
779 roots = list(roots)
777 roots = list(roots)
780 assert orderedout
778 assert orderedout
781 assert roots
779 assert roots
782 assert heads
780 assert heads
783 return (orderedout, roots, heads)
781 return (orderedout, roots, heads)
784
782
785 def heads(self, start=None, stop=None):
783 def heads(self, start=None, stop=None):
786 """return the list of all nodes that have no children
784 """return the list of all nodes that have no children
787
785
788 if start is specified, only heads that are descendants of
786 if start is specified, only heads that are descendants of
789 start will be returned
787 start will be returned
790 if stop is specified, it will consider all the revs from stop
788 if stop is specified, it will consider all the revs from stop
791 as if they had no children
789 as if they had no children
792 """
790 """
793 if start is None and stop is None:
791 if start is None and stop is None:
794 count = len(self)
792 count = len(self)
795 if not count:
793 if not count:
796 return [nullid]
794 return [nullid]
797 ishead = [1] * (count + 1)
795 ishead = [1] * (count + 1)
798 index = self.index
796 index = self.index
799 for r in xrange(count):
797 for r in xrange(count):
800 e = index[r]
798 e = index[r]
801 ishead[e[5]] = ishead[e[6]] = 0
799 ishead[e[5]] = ishead[e[6]] = 0
802 return [self.node(r) for r in xrange(count) if ishead[r]]
800 return [self.node(r) for r in xrange(count) if ishead[r]]
803
801
804 if start is None:
802 if start is None:
805 start = nullid
803 start = nullid
806 if stop is None:
804 if stop is None:
807 stop = []
805 stop = []
808 stoprevs = set([self.rev(n) for n in stop])
806 stoprevs = set([self.rev(n) for n in stop])
809 startrev = self.rev(start)
807 startrev = self.rev(start)
810 reachable = {startrev: 1}
808 reachable = {startrev: 1}
811 heads = {startrev: 1}
809 heads = {startrev: 1}
812
810
813 parentrevs = self.parentrevs
811 parentrevs = self.parentrevs
814 for r in xrange(startrev + 1, len(self)):
812 for r in xrange(startrev + 1, len(self)):
815 for p in parentrevs(r):
813 for p in parentrevs(r):
816 if p in reachable:
814 if p in reachable:
817 if r not in stoprevs:
815 if r not in stoprevs:
818 reachable[r] = 1
816 reachable[r] = 1
819 heads[r] = 1
817 heads[r] = 1
820 if p in heads and p not in stoprevs:
818 if p in heads and p not in stoprevs:
821 del heads[p]
819 del heads[p]
822
820
823 return [self.node(r) for r in heads]
821 return [self.node(r) for r in heads]
824
822
825 def children(self, node):
823 def children(self, node):
826 """find the children of a given node"""
824 """find the children of a given node"""
827 c = []
825 c = []
828 p = self.rev(node)
826 p = self.rev(node)
829 for r in range(p + 1, len(self)):
827 for r in range(p + 1, len(self)):
830 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
828 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
831 if prevs:
829 if prevs:
832 for pr in prevs:
830 for pr in prevs:
833 if pr == p:
831 if pr == p:
834 c.append(self.node(r))
832 c.append(self.node(r))
835 elif p == nullrev:
833 elif p == nullrev:
836 c.append(self.node(r))
834 c.append(self.node(r))
837 return c
835 return c
838
836
839 def _match(self, id):
837 def _match(self, id):
840 if isinstance(id, (long, int)):
838 if isinstance(id, (long, int)):
841 # rev
839 # rev
842 return self.node(id)
840 return self.node(id)
843 if len(id) == 20:
841 if len(id) == 20:
844 # possibly a binary node
842 # possibly a binary node
845 # odds of a binary node being all hex in ASCII are 1 in 10**25
843 # odds of a binary node being all hex in ASCII are 1 in 10**25
846 try:
844 try:
847 node = id
845 node = id
848 self.rev(node) # quick search the index
846 self.rev(node) # quick search the index
849 return node
847 return node
850 except LookupError:
848 except LookupError:
851 pass # may be partial hex id
849 pass # may be partial hex id
852 try:
850 try:
853 # str(rev)
851 # str(rev)
854 rev = int(id)
852 rev = int(id)
855 if str(rev) != id:
853 if str(rev) != id:
856 raise ValueError
854 raise ValueError
857 if rev < 0:
855 if rev < 0:
858 rev = len(self) + rev
856 rev = len(self) + rev
859 if rev < 0 or rev >= len(self):
857 if rev < 0 or rev >= len(self):
860 raise ValueError
858 raise ValueError
861 return self.node(rev)
859 return self.node(rev)
862 except (ValueError, OverflowError):
860 except (ValueError, OverflowError):
863 pass
861 pass
864 if len(id) == 40:
862 if len(id) == 40:
865 try:
863 try:
866 # a full hex nodeid?
864 # a full hex nodeid?
867 node = bin(id)
865 node = bin(id)
868 self.rev(node)
866 self.rev(node)
869 return node
867 return node
870 except (TypeError, LookupError):
868 except (TypeError, LookupError):
871 pass
869 pass
872
870
873 def _partialmatch(self, id):
871 def _partialmatch(self, id):
874 if len(id) < 40:
872 if len(id) < 40:
875 try:
873 try:
876 # hex(node)[:...]
874 # hex(node)[:...]
877 l = len(id) / 2 # grab an even number of digits
875 l = len(id) / 2 # grab an even number of digits
878 bin_id = bin(id[:l*2])
876 bin_id = bin(id[:l*2])
879 nl = [n for n in self.nodemap if n[:l] == bin_id]
877 nl = [n for n in self.nodemap if n[:l] == bin_id]
880 nl = [n for n in nl if hex(n).startswith(id)]
878 nl = [n for n in nl if hex(n).startswith(id)]
881 if len(nl) > 0:
879 if len(nl) > 0:
882 if len(nl) == 1:
880 if len(nl) == 1:
883 return nl[0]
881 return nl[0]
884 raise LookupError(id, self.indexfile,
882 raise LookupError(id, self.indexfile,
885 _('ambiguous identifier'))
883 _('ambiguous identifier'))
886 return None
884 return None
887 except TypeError:
885 except TypeError:
888 pass
886 pass
889
887
890 def lookup(self, id):
888 def lookup(self, id):
891 """locate a node based on:
889 """locate a node based on:
892 - revision number or str(revision number)
890 - revision number or str(revision number)
893 - nodeid or subset of hex nodeid
891 - nodeid or subset of hex nodeid
894 """
892 """
895 n = self._match(id)
893 n = self._match(id)
896 if n is not None:
894 if n is not None:
897 return n
895 return n
898 n = self._partialmatch(id)
896 n = self._partialmatch(id)
899 if n:
897 if n:
900 return n
898 return n
901
899
902 raise LookupError(id, self.indexfile, _('no match found'))
900 raise LookupError(id, self.indexfile, _('no match found'))
903
901
904 def cmp(self, node, text):
902 def cmp(self, node, text):
905 """compare text with a given file revision"""
903 """compare text with a given file revision"""
906 p1, p2 = self.parents(node)
904 p1, p2 = self.parents(node)
907 return hash(text, p1, p2) != node
905 return hash(text, p1, p2) != node
908
906
909 def chunk(self, rev, df=None):
907 def chunk(self, rev, df=None):
910 def loadcache(df):
908 def loadcache(df):
911 if not df:
909 if not df:
912 if self._inline:
910 if self._inline:
913 df = self.opener(self.indexfile)
911 df = self.opener(self.indexfile)
914 else:
912 else:
915 df = self.opener(self.datafile)
913 df = self.opener(self.datafile)
916 df.seek(start)
914 df.seek(start)
917 self._chunkcache = (start, df.read(cache_length))
915 self._chunkcache = (start, df.read(cache_length))
918
916
919 start, length = self.start(rev), self.length(rev)
917 start, length = self.start(rev), self.length(rev)
920 if self._inline:
918 if self._inline:
921 start += (rev + 1) * self._io.size
919 start += (rev + 1) * self._io.size
922 end = start + length
920 end = start + length
923
921
924 offset = 0
922 offset = 0
925 if not self._chunkcache:
923 if not self._chunkcache:
926 cache_length = max(65536, length)
924 cache_length = max(65536, length)
927 loadcache(df)
925 loadcache(df)
928 else:
926 else:
929 cache_start = self._chunkcache[0]
927 cache_start = self._chunkcache[0]
930 cache_length = len(self._chunkcache[1])
928 cache_length = len(self._chunkcache[1])
931 cache_end = cache_start + cache_length
929 cache_end = cache_start + cache_length
932 if start >= cache_start and end <= cache_end:
930 if start >= cache_start and end <= cache_end:
933 # it is cached
931 # it is cached
934 offset = start - cache_start
932 offset = start - cache_start
935 else:
933 else:
936 cache_length = max(65536, length)
934 cache_length = max(65536, length)
937 loadcache(df)
935 loadcache(df)
938
936
939 # avoid copying large chunks
937 # avoid copying large chunks
940 c = self._chunkcache[1]
938 c = self._chunkcache[1]
941 if cache_length != length:
939 if cache_length != length:
942 c = c[offset:offset + length]
940 c = c[offset:offset + length]
943
941
944 return decompress(c)
942 return decompress(c)
945
943
946 def revdiff(self, rev1, rev2):
944 def revdiff(self, rev1, rev2):
947 """return or calculate a delta between two revisions"""
945 """return or calculate a delta between two revisions"""
948 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
946 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
949 return self.chunk(rev2)
947 return self.chunk(rev2)
950
948
951 return mdiff.textdiff(self.revision(self.node(rev1)),
949 return mdiff.textdiff(self.revision(self.node(rev1)),
952 self.revision(self.node(rev2)))
950 self.revision(self.node(rev2)))
953
951
954 def revision(self, node):
952 def revision(self, node):
955 """return an uncompressed revision of a given node"""
953 """return an uncompressed revision of a given node"""
956 if node == nullid:
954 if node == nullid:
957 return ""
955 return ""
958 if self._cache and self._cache[0] == node:
956 if self._cache and self._cache[0] == node:
959 return str(self._cache[2])
957 return str(self._cache[2])
960
958
961 # look up what we need to read
959 # look up what we need to read
962 text = None
960 text = None
963 rev = self.rev(node)
961 rev = self.rev(node)
964 base = self.base(rev)
962 base = self.base(rev)
965
963
966 # check rev flags
964 # check rev flags
967 if self.index[rev][0] & 0xFFFF:
965 if self.index[rev][0] & 0xFFFF:
968 raise RevlogError(_('incompatible revision flag %x') %
966 raise RevlogError(_('incompatible revision flag %x') %
969 (self.index[rev][0] & 0xFFFF))
967 (self.index[rev][0] & 0xFFFF))
970
968
971 df = None
969 df = None
972
970
973 # do we have useful data cached?
971 # do we have useful data cached?
974 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
972 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
975 base = self._cache[1]
973 base = self._cache[1]
976 text = str(self._cache[2])
974 text = str(self._cache[2])
977 self._loadindex(base, rev + 1)
975 self._loadindex(base, rev + 1)
978 if not self._inline and rev > base + 1:
976 if not self._inline and rev > base + 1:
979 df = self.opener(self.datafile)
977 df = self.opener(self.datafile)
980 else:
978 else:
981 self._loadindex(base, rev + 1)
979 self._loadindex(base, rev + 1)
982 if not self._inline and rev > base:
980 if not self._inline and rev > base:
983 df = self.opener(self.datafile)
981 df = self.opener(self.datafile)
984 text = self.chunk(base, df=df)
982 text = self.chunk(base, df=df)
985
983
986 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
984 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
987 text = mdiff.patches(text, bins)
985 text = mdiff.patches(text, bins)
988 p1, p2 = self.parents(node)
986 p1, p2 = self.parents(node)
989 if node != hash(text, p1, p2):
987 if node != hash(text, p1, p2):
990 raise RevlogError(_("integrity check failed on %s:%d")
988 raise RevlogError(_("integrity check failed on %s:%d")
991 % (self.datafile, rev))
989 % (self.datafile, rev))
992
990
993 self._cache = (node, rev, text)
991 self._cache = (node, rev, text)
994 return text
992 return text
995
993
996 def checkinlinesize(self, tr, fp=None):
994 def checkinlinesize(self, tr, fp=None):
997 if not self._inline:
995 if not self._inline:
998 return
996 return
999 if not fp:
997 if not fp:
1000 fp = self.opener(self.indexfile, 'r')
998 fp = self.opener(self.indexfile, 'r')
1001 fp.seek(0, 2)
999 fp.seek(0, 2)
1002 size = fp.tell()
1000 size = fp.tell()
1003 if size < 131072:
1001 if size < 131072:
1004 return
1002 return
1005 trinfo = tr.find(self.indexfile)
1003 trinfo = tr.find(self.indexfile)
1006 if trinfo == None:
1004 if trinfo == None:
1007 raise RevlogError(_("%s not found in the transaction")
1005 raise RevlogError(_("%s not found in the transaction")
1008 % self.indexfile)
1006 % self.indexfile)
1009
1007
1010 trindex = trinfo[2]
1008 trindex = trinfo[2]
1011 dataoff = self.start(trindex)
1009 dataoff = self.start(trindex)
1012
1010
1013 tr.add(self.datafile, dataoff)
1011 tr.add(self.datafile, dataoff)
1014 df = self.opener(self.datafile, 'w')
1012 df = self.opener(self.datafile, 'w')
1015 try:
1013 try:
1016 calc = self._io.size
1014 calc = self._io.size
1017 for r in self:
1015 for r in self:
1018 start = self.start(r) + (r + 1) * calc
1016 start = self.start(r) + (r + 1) * calc
1019 length = self.length(r)
1017 length = self.length(r)
1020 fp.seek(start)
1018 fp.seek(start)
1021 d = fp.read(length)
1019 d = fp.read(length)
1022 df.write(d)
1020 df.write(d)
1023 finally:
1021 finally:
1024 df.close()
1022 df.close()
1025
1023
1026 fp.close()
1024 fp.close()
1027 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1025 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1028 self.version &= ~(REVLOGNGINLINEDATA)
1026 self.version &= ~(REVLOGNGINLINEDATA)
1029 self._inline = False
1027 self._inline = False
1030 for i in self:
1028 for i in self:
1031 e = self._io.packentry(self.index[i], self.node, self.version, i)
1029 e = self._io.packentry(self.index[i], self.node, self.version, i)
1032 fp.write(e)
1030 fp.write(e)
1033
1031
1034 # if we don't call rename, the temp file will never replace the
1032 # if we don't call rename, the temp file will never replace the
1035 # real index
1033 # real index
1036 fp.rename()
1034 fp.rename()
1037
1035
1038 tr.replace(self.indexfile, trindex * calc)
1036 tr.replace(self.indexfile, trindex * calc)
1039 self._chunkcache = None
1037 self._chunkcache = None
1040
1038
1041 def addrevision(self, text, transaction, link, p1, p2, d=None):
1039 def addrevision(self, text, transaction, link, p1, p2, d=None):
1042 """add a revision to the log
1040 """add a revision to the log
1043
1041
1044 text - the revision data to add
1042 text - the revision data to add
1045 transaction - the transaction object used for rollback
1043 transaction - the transaction object used for rollback
1046 link - the linkrev data to add
1044 link - the linkrev data to add
1047 p1, p2 - the parent nodeids of the revision
1045 p1, p2 - the parent nodeids of the revision
1048 d - an optional precomputed delta
1046 d - an optional precomputed delta
1049 """
1047 """
1050 dfh = None
1048 dfh = None
1051 if not self._inline:
1049 if not self._inline:
1052 dfh = self.opener(self.datafile, "a")
1050 dfh = self.opener(self.datafile, "a")
1053 ifh = self.opener(self.indexfile, "a+")
1051 ifh = self.opener(self.indexfile, "a+")
1054 try:
1052 try:
1055 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1053 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1056 finally:
1054 finally:
1057 if dfh:
1055 if dfh:
1058 dfh.close()
1056 dfh.close()
1059 ifh.close()
1057 ifh.close()
1060
1058
1061 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1059 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1062 node = hash(text, p1, p2)
1060 node = hash(text, p1, p2)
1063 if node in self.nodemap:
1061 if node in self.nodemap:
1064 return node
1062 return node
1065
1063
1066 curr = len(self)
1064 curr = len(self)
1067 prev = curr - 1
1065 prev = curr - 1
1068 base = self.base(prev)
1066 base = self.base(prev)
1069 offset = self.end(prev)
1067 offset = self.end(prev)
1070
1068
1071 if curr:
1069 if curr:
1072 if not d:
1070 if not d:
1073 ptext = self.revision(self.node(prev))
1071 ptext = self.revision(self.node(prev))
1074 d = mdiff.textdiff(ptext, text)
1072 d = mdiff.textdiff(ptext, text)
1075 data = compress(d)
1073 data = compress(d)
1076 l = len(data[1]) + len(data[0])
1074 l = len(data[1]) + len(data[0])
1077 dist = l + offset - self.start(base)
1075 dist = l + offset - self.start(base)
1078
1076
1079 # full versions are inserted when the needed deltas
1077 # full versions are inserted when the needed deltas
1080 # become comparable to the uncompressed text
1078 # become comparable to the uncompressed text
1081 if not curr or dist > len(text) * 2:
1079 if not curr or dist > len(text) * 2:
1082 data = compress(text)
1080 data = compress(text)
1083 l = len(data[1]) + len(data[0])
1081 l = len(data[1]) + len(data[0])
1084 base = curr
1082 base = curr
1085
1083
1086 e = (offset_type(offset, 0), l, len(text),
1084 e = (offset_type(offset, 0), l, len(text),
1087 base, link, self.rev(p1), self.rev(p2), node)
1085 base, link, self.rev(p1), self.rev(p2), node)
1088 self.index.insert(-1, e)
1086 self.index.insert(-1, e)
1089 self.nodemap[node] = curr
1087 self.nodemap[node] = curr
1090
1088
1091 entry = self._io.packentry(e, self.node, self.version, curr)
1089 entry = self._io.packentry(e, self.node, self.version, curr)
1092 if not self._inline:
1090 if not self._inline:
1093 transaction.add(self.datafile, offset)
1091 transaction.add(self.datafile, offset)
1094 transaction.add(self.indexfile, curr * len(entry))
1092 transaction.add(self.indexfile, curr * len(entry))
1095 if data[0]:
1093 if data[0]:
1096 dfh.write(data[0])
1094 dfh.write(data[0])
1097 dfh.write(data[1])
1095 dfh.write(data[1])
1098 dfh.flush()
1096 dfh.flush()
1099 ifh.write(entry)
1097 ifh.write(entry)
1100 else:
1098 else:
1101 offset += curr * self._io.size
1099 offset += curr * self._io.size
1102 transaction.add(self.indexfile, offset, curr)
1100 transaction.add(self.indexfile, offset, curr)
1103 ifh.write(entry)
1101 ifh.write(entry)
1104 ifh.write(data[0])
1102 ifh.write(data[0])
1105 ifh.write(data[1])
1103 ifh.write(data[1])
1106 self.checkinlinesize(transaction, ifh)
1104 self.checkinlinesize(transaction, ifh)
1107
1105
1108 self._cache = (node, curr, text)
1106 self._cache = (node, curr, text)
1109 return node
1107 return node
1110
1108
1111 def ancestor(self, a, b):
1109 def ancestor(self, a, b):
1112 """calculate the least common ancestor of nodes a and b"""
1110 """calculate the least common ancestor of nodes a and b"""
1113
1111
1114 def parents(rev):
1112 def parents(rev):
1115 return [p for p in self.parentrevs(rev) if p != nullrev]
1113 return [p for p in self.parentrevs(rev) if p != nullrev]
1116
1114
1117 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1115 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1118 if c is None:
1116 if c is None:
1119 return nullid
1117 return nullid
1120
1118
1121 return self.node(c)
1119 return self.node(c)
1122
1120
1123 def group(self, nodelist, lookup, infocollect=None):
1121 def group(self, nodelist, lookup, infocollect=None):
1124 """calculate a delta group
1122 """calculate a delta group
1125
1123
1126 Given a list of changeset revs, return a set of deltas and
1124 Given a list of changeset revs, return a set of deltas and
1127 metadata corresponding to nodes. the first delta is
1125 metadata corresponding to nodes. the first delta is
1128 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1126 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1129 have this parent as it has all history before these
1127 have this parent as it has all history before these
1130 changesets. parent is parent[0]
1128 changesets. parent is parent[0]
1131 """
1129 """
1132 revs = [self.rev(n) for n in nodelist]
1130 revs = [self.rev(n) for n in nodelist]
1133
1131
1134 # if we don't have any revisions touched by these changesets, bail
1132 # if we don't have any revisions touched by these changesets, bail
1135 if not revs:
1133 if not revs:
1136 yield changegroup.closechunk()
1134 yield changegroup.closechunk()
1137 return
1135 return
1138
1136
1139 # add the parent of the first rev
1137 # add the parent of the first rev
1140 p = self.parents(self.node(revs[0]))[0]
1138 p = self.parents(self.node(revs[0]))[0]
1141 revs.insert(0, self.rev(p))
1139 revs.insert(0, self.rev(p))
1142
1140
1143 # build deltas
1141 # build deltas
1144 for d in xrange(0, len(revs) - 1):
1142 for d in xrange(0, len(revs) - 1):
1145 a, b = revs[d], revs[d + 1]
1143 a, b = revs[d], revs[d + 1]
1146 nb = self.node(b)
1144 nb = self.node(b)
1147
1145
1148 if infocollect is not None:
1146 if infocollect is not None:
1149 infocollect(nb)
1147 infocollect(nb)
1150
1148
1151 p = self.parents(nb)
1149 p = self.parents(nb)
1152 meta = nb + p[0] + p[1] + lookup(nb)
1150 meta = nb + p[0] + p[1] + lookup(nb)
1153 if a == -1:
1151 if a == -1:
1154 d = self.revision(nb)
1152 d = self.revision(nb)
1155 meta += mdiff.trivialdiffheader(len(d))
1153 meta += mdiff.trivialdiffheader(len(d))
1156 else:
1154 else:
1157 d = self.revdiff(a, b)
1155 d = self.revdiff(a, b)
1158 yield changegroup.chunkheader(len(meta) + len(d))
1156 yield changegroup.chunkheader(len(meta) + len(d))
1159 yield meta
1157 yield meta
1160 if len(d) > 2**20:
1158 if len(d) > 2**20:
1161 pos = 0
1159 pos = 0
1162 while pos < len(d):
1160 while pos < len(d):
1163 pos2 = pos + 2 ** 18
1161 pos2 = pos + 2 ** 18
1164 yield d[pos:pos2]
1162 yield d[pos:pos2]
1165 pos = pos2
1163 pos = pos2
1166 else:
1164 else:
1167 yield d
1165 yield d
1168
1166
1169 yield changegroup.closechunk()
1167 yield changegroup.closechunk()
1170
1168
1171 def addgroup(self, revs, linkmapper, transaction):
1169 def addgroup(self, revs, linkmapper, transaction):
1172 """
1170 """
1173 add a delta group
1171 add a delta group
1174
1172
1175 given a set of deltas, add them to the revision log. the
1173 given a set of deltas, add them to the revision log. the
1176 first delta is against its parent, which should be in our
1174 first delta is against its parent, which should be in our
1177 log, the rest are against the previous delta.
1175 log, the rest are against the previous delta.
1178 """
1176 """
1179
1177
1180 #track the base of the current delta log
1178 #track the base of the current delta log
1181 r = len(self)
1179 r = len(self)
1182 t = r - 1
1180 t = r - 1
1183 node = None
1181 node = None
1184
1182
1185 base = prev = nullrev
1183 base = prev = nullrev
1186 start = end = textlen = 0
1184 start = end = textlen = 0
1187 if r:
1185 if r:
1188 end = self.end(t)
1186 end = self.end(t)
1189
1187
1190 ifh = self.opener(self.indexfile, "a+")
1188 ifh = self.opener(self.indexfile, "a+")
1191 isize = r * self._io.size
1189 isize = r * self._io.size
1192 if self._inline:
1190 if self._inline:
1193 transaction.add(self.indexfile, end + isize, r)
1191 transaction.add(self.indexfile, end + isize, r)
1194 dfh = None
1192 dfh = None
1195 else:
1193 else:
1196 transaction.add(self.indexfile, isize, r)
1194 transaction.add(self.indexfile, isize, r)
1197 transaction.add(self.datafile, end)
1195 transaction.add(self.datafile, end)
1198 dfh = self.opener(self.datafile, "a")
1196 dfh = self.opener(self.datafile, "a")
1199
1197
1200 try:
1198 try:
1201 # loop through our set of deltas
1199 # loop through our set of deltas
1202 chain = None
1200 chain = None
1203 for chunk in revs:
1201 for chunk in revs:
1204 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1202 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1205 link = linkmapper(cs)
1203 link = linkmapper(cs)
1206 if node in self.nodemap:
1204 if node in self.nodemap:
1207 # this can happen if two branches make the same change
1205 # this can happen if two branches make the same change
1208 chain = node
1206 chain = node
1209 continue
1207 continue
1210 delta = buffer(chunk, 80)
1208 delta = buffer(chunk, 80)
1211 del chunk
1209 del chunk
1212
1210
1213 for p in (p1, p2):
1211 for p in (p1, p2):
1214 if not p in self.nodemap:
1212 if not p in self.nodemap:
1215 raise LookupError(p, self.indexfile, _('unknown parent'))
1213 raise LookupError(p, self.indexfile, _('unknown parent'))
1216
1214
1217 if not chain:
1215 if not chain:
1218 # retrieve the parent revision of the delta chain
1216 # retrieve the parent revision of the delta chain
1219 chain = p1
1217 chain = p1
1220 if not chain in self.nodemap:
1218 if not chain in self.nodemap:
1221 raise LookupError(chain, self.indexfile, _('unknown base'))
1219 raise LookupError(chain, self.indexfile, _('unknown base'))
1222
1220
1223 # full versions are inserted when the needed deltas become
1221 # full versions are inserted when the needed deltas become
1224 # comparable to the uncompressed text or when the previous
1222 # comparable to the uncompressed text or when the previous
1225 # version is not the one we have a delta against. We use
1223 # version is not the one we have a delta against. We use
1226 # the size of the previous full rev as a proxy for the
1224 # the size of the previous full rev as a proxy for the
1227 # current size.
1225 # current size.
1228
1226
1229 if chain == prev:
1227 if chain == prev:
1230 cdelta = compress(delta)
1228 cdelta = compress(delta)
1231 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1229 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1232 textlen = mdiff.patchedsize(textlen, delta)
1230 textlen = mdiff.patchedsize(textlen, delta)
1233
1231
1234 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1232 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1235 # flush our writes here so we can read it in revision
1233 # flush our writes here so we can read it in revision
1236 if dfh:
1234 if dfh:
1237 dfh.flush()
1235 dfh.flush()
1238 ifh.flush()
1236 ifh.flush()
1239 text = self.revision(chain)
1237 text = self.revision(chain)
1240 if len(text) == 0:
1238 if len(text) == 0:
1241 # skip over trivial delta header
1239 # skip over trivial delta header
1242 text = buffer(delta, 12)
1240 text = buffer(delta, 12)
1243 else:
1241 else:
1244 text = mdiff.patches(text, [delta])
1242 text = mdiff.patches(text, [delta])
1245 del delta
1243 del delta
1246 chk = self._addrevision(text, transaction, link, p1, p2, None,
1244 chk = self._addrevision(text, transaction, link, p1, p2, None,
1247 ifh, dfh)
1245 ifh, dfh)
1248 if not dfh and not self._inline:
1246 if not dfh and not self._inline:
1249 # addrevision switched from inline to conventional
1247 # addrevision switched from inline to conventional
1250 # reopen the index
1248 # reopen the index
1251 dfh = self.opener(self.datafile, "a")
1249 dfh = self.opener(self.datafile, "a")
1252 ifh = self.opener(self.indexfile, "a")
1250 ifh = self.opener(self.indexfile, "a")
1253 if chk != node:
1251 if chk != node:
1254 raise RevlogError(_("consistency error adding group"))
1252 raise RevlogError(_("consistency error adding group"))
1255 textlen = len(text)
1253 textlen = len(text)
1256 else:
1254 else:
1257 e = (offset_type(end, 0), cdeltalen, textlen, base,
1255 e = (offset_type(end, 0), cdeltalen, textlen, base,
1258 link, self.rev(p1), self.rev(p2), node)
1256 link, self.rev(p1), self.rev(p2), node)
1259 self.index.insert(-1, e)
1257 self.index.insert(-1, e)
1260 self.nodemap[node] = r
1258 self.nodemap[node] = r
1261 entry = self._io.packentry(e, self.node, self.version, r)
1259 entry = self._io.packentry(e, self.node, self.version, r)
1262 if self._inline:
1260 if self._inline:
1263 ifh.write(entry)
1261 ifh.write(entry)
1264 ifh.write(cdelta[0])
1262 ifh.write(cdelta[0])
1265 ifh.write(cdelta[1])
1263 ifh.write(cdelta[1])
1266 self.checkinlinesize(transaction, ifh)
1264 self.checkinlinesize(transaction, ifh)
1267 if not self._inline:
1265 if not self._inline:
1268 dfh = self.opener(self.datafile, "a")
1266 dfh = self.opener(self.datafile, "a")
1269 ifh = self.opener(self.indexfile, "a")
1267 ifh = self.opener(self.indexfile, "a")
1270 else:
1268 else:
1271 dfh.write(cdelta[0])
1269 dfh.write(cdelta[0])
1272 dfh.write(cdelta[1])
1270 dfh.write(cdelta[1])
1273 ifh.write(entry)
1271 ifh.write(entry)
1274
1272
1275 t, r, chain, prev = r, r + 1, node, node
1273 t, r, chain, prev = r, r + 1, node, node
1276 base = self.base(t)
1274 base = self.base(t)
1277 start = self.start(base)
1275 start = self.start(base)
1278 end = self.end(t)
1276 end = self.end(t)
1279 finally:
1277 finally:
1280 if dfh:
1278 if dfh:
1281 dfh.close()
1279 dfh.close()
1282 ifh.close()
1280 ifh.close()
1283
1281
1284 return node
1282 return node
1285
1283
1286 def strip(self, minlink, transaction):
1284 def strip(self, minlink, transaction):
1287 """truncate the revlog on the first revision with a linkrev >= minlink
1285 """truncate the revlog on the first revision with a linkrev >= minlink
1288
1286
1289 This function is called when we're stripping revision minlink and
1287 This function is called when we're stripping revision minlink and
1290 its descendants from the repository.
1288 its descendants from the repository.
1291
1289
1292 We have to remove all revisions with linkrev >= minlink, because
1290 We have to remove all revisions with linkrev >= minlink, because
1293 the equivalent changelog revisions will be renumbered after the
1291 the equivalent changelog revisions will be renumbered after the
1294 strip.
1292 strip.
1295
1293
1296 So we truncate the revlog on the first of these revisions, and
1294 So we truncate the revlog on the first of these revisions, and
1297 trust that the caller has saved the revisions that shouldn't be
1295 trust that the caller has saved the revisions that shouldn't be
1298 removed and that it'll readd them after this truncation.
1296 removed and that it'll readd them after this truncation.
1299 """
1297 """
1300 if len(self) == 0:
1298 if len(self) == 0:
1301 return
1299 return
1302
1300
1303 if isinstance(self.index, lazyindex):
1301 if isinstance(self.index, lazyindex):
1304 self._loadindexmap()
1302 self._loadindexmap()
1305
1303
1306 for rev in self:
1304 for rev in self:
1307 if self.index[rev][4] >= minlink:
1305 if self.index[rev][4] >= minlink:
1308 break
1306 break
1309 else:
1307 else:
1310 return
1308 return
1311
1309
1312 # first truncate the files on disk
1310 # first truncate the files on disk
1313 end = self.start(rev)
1311 end = self.start(rev)
1314 if not self._inline:
1312 if not self._inline:
1315 transaction.add(self.datafile, end)
1313 transaction.add(self.datafile, end)
1316 end = rev * self._io.size
1314 end = rev * self._io.size
1317 else:
1315 else:
1318 end += rev * self._io.size
1316 end += rev * self._io.size
1319
1317
1320 transaction.add(self.indexfile, end)
1318 transaction.add(self.indexfile, end)
1321
1319
1322 # then reset internal state in memory to forget those revisions
1320 # then reset internal state in memory to forget those revisions
1323 self._cache = None
1321 self._cache = None
1324 self._chunkcache = None
1322 self._chunkcache = None
1325 for x in xrange(rev, len(self)):
1323 for x in xrange(rev, len(self)):
1326 del self.nodemap[self.node(x)]
1324 del self.nodemap[self.node(x)]
1327
1325
1328 del self.index[rev:-1]
1326 del self.index[rev:-1]
1329
1327
1330 def checksize(self):
1328 def checksize(self):
1331 expected = 0
1329 expected = 0
1332 if len(self):
1330 if len(self):
1333 expected = max(0, self.end(len(self) - 1))
1331 expected = max(0, self.end(len(self) - 1))
1334
1332
1335 try:
1333 try:
1336 f = self.opener(self.datafile)
1334 f = self.opener(self.datafile)
1337 f.seek(0, 2)
1335 f.seek(0, 2)
1338 actual = f.tell()
1336 actual = f.tell()
1339 dd = actual - expected
1337 dd = actual - expected
1340 except IOError, inst:
1338 except IOError, inst:
1341 if inst.errno != errno.ENOENT:
1339 if inst.errno != errno.ENOENT:
1342 raise
1340 raise
1343 dd = 0
1341 dd = 0
1344
1342
1345 try:
1343 try:
1346 f = self.opener(self.indexfile)
1344 f = self.opener(self.indexfile)
1347 f.seek(0, 2)
1345 f.seek(0, 2)
1348 actual = f.tell()
1346 actual = f.tell()
1349 s = self._io.size
1347 s = self._io.size
1350 i = max(0, actual / s)
1348 i = max(0, actual / s)
1351 di = actual - (i * s)
1349 di = actual - (i * s)
1352 if self._inline:
1350 if self._inline:
1353 databytes = 0
1351 databytes = 0
1354 for r in self:
1352 for r in self:
1355 databytes += max(0, self.length(r))
1353 databytes += max(0, self.length(r))
1356 dd = 0
1354 dd = 0
1357 di = actual - len(self) * s - databytes
1355 di = actual - len(self) * s - databytes
1358 except IOError, inst:
1356 except IOError, inst:
1359 if inst.errno != errno.ENOENT:
1357 if inst.errno != errno.ENOENT:
1360 raise
1358 raise
1361 di = 0
1359 di = 0
1362
1360
1363 return (dd, di)
1361 return (dd, di)
1364
1362
1365 def files(self):
1363 def files(self):
1366 res = [ self.indexfile ]
1364 res = [ self.indexfile ]
1367 if not self._inline:
1365 if not self._inline:
1368 res.append(self.datafile)
1366 res.append(self.datafile)
1369 return res
1367 return res
@@ -1,1477 +1,1475 b''
1 """
1 # util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
2 #
3
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
7
7 # This software may be used and distributed according to the terms of the
8 This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
9 GNU General Public License version 2, incorporated herein by reference.
9 #
10
10 # This contains helper routines that are independent of the SCM core and hide
11 This contains helper routines that are independent of the SCM core and hide
11 # platform-specific details from the core.
12 platform-specific details from the core.
13 """
14
12
15 from i18n import _
13 from i18n import _
16 import cStringIO, errno, re, shutil, sys, tempfile, traceback, error
14 import cStringIO, errno, re, shutil, sys, tempfile, traceback, error
17 import os, stat, threading, time, calendar, glob, osutil
15 import os, stat, threading, time, calendar, glob, osutil
18 import imp
16 import imp
19
17
20 # Python compatibility
18 # Python compatibility
21
19
22 _md5 = None
20 _md5 = None
23 def md5(s):
21 def md5(s):
24 global _md5
22 global _md5
25 if _md5 is None:
23 if _md5 is None:
26 try:
24 try:
27 import hashlib
25 import hashlib
28 _md5 = hashlib.md5
26 _md5 = hashlib.md5
29 except ImportError:
27 except ImportError:
30 import md5
28 import md5
31 _md5 = md5.md5
29 _md5 = md5.md5
32 return _md5(s)
30 return _md5(s)
33
31
34 _sha1 = None
32 _sha1 = None
35 def sha1(s):
33 def sha1(s):
36 global _sha1
34 global _sha1
37 if _sha1 is None:
35 if _sha1 is None:
38 try:
36 try:
39 import hashlib
37 import hashlib
40 _sha1 = hashlib.sha1
38 _sha1 = hashlib.sha1
41 except ImportError:
39 except ImportError:
42 import sha
40 import sha
43 _sha1 = sha.sha
41 _sha1 = sha.sha
44 return _sha1(s)
42 return _sha1(s)
45
43
46 try:
44 try:
47 import subprocess
45 import subprocess
48 subprocess.Popen # trigger ImportError early
46 subprocess.Popen # trigger ImportError early
49 closefds = os.name == 'posix'
47 closefds = os.name == 'posix'
50 def popen2(cmd, mode='t', bufsize=-1):
48 def popen2(cmd, mode='t', bufsize=-1):
51 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
49 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
52 close_fds=closefds,
50 close_fds=closefds,
53 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
51 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
54 return p.stdin, p.stdout
52 return p.stdin, p.stdout
55 def popen3(cmd, mode='t', bufsize=-1):
53 def popen3(cmd, mode='t', bufsize=-1):
56 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
54 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
57 close_fds=closefds,
55 close_fds=closefds,
58 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
56 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
59 stderr=subprocess.PIPE)
57 stderr=subprocess.PIPE)
60 return p.stdin, p.stdout, p.stderr
58 return p.stdin, p.stdout, p.stderr
61 def Popen3(cmd, capturestderr=False, bufsize=-1):
59 def Popen3(cmd, capturestderr=False, bufsize=-1):
62 stderr = capturestderr and subprocess.PIPE or None
60 stderr = capturestderr and subprocess.PIPE or None
63 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
61 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
64 close_fds=closefds,
62 close_fds=closefds,
65 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
63 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
66 stderr=stderr)
64 stderr=stderr)
67 p.fromchild = p.stdout
65 p.fromchild = p.stdout
68 p.tochild = p.stdin
66 p.tochild = p.stdin
69 p.childerr = p.stderr
67 p.childerr = p.stderr
70 return p
68 return p
71 except ImportError:
69 except ImportError:
72 subprocess = None
70 subprocess = None
73 from popen2 import Popen3
71 from popen2 import Popen3
74 popen2 = os.popen2
72 popen2 = os.popen2
75 popen3 = os.popen3
73 popen3 = os.popen3
76
74
77
75
78 def version():
76 def version():
79 """Return version information if available."""
77 """Return version information if available."""
80 try:
78 try:
81 import __version__
79 import __version__
82 return __version__.version
80 return __version__.version
83 except ImportError:
81 except ImportError:
84 return 'unknown'
82 return 'unknown'
85
83
86 # used by parsedate
84 # used by parsedate
87 defaultdateformats = (
85 defaultdateformats = (
88 '%Y-%m-%d %H:%M:%S',
86 '%Y-%m-%d %H:%M:%S',
89 '%Y-%m-%d %I:%M:%S%p',
87 '%Y-%m-%d %I:%M:%S%p',
90 '%Y-%m-%d %H:%M',
88 '%Y-%m-%d %H:%M',
91 '%Y-%m-%d %I:%M%p',
89 '%Y-%m-%d %I:%M%p',
92 '%Y-%m-%d',
90 '%Y-%m-%d',
93 '%m-%d',
91 '%m-%d',
94 '%m/%d',
92 '%m/%d',
95 '%m/%d/%y',
93 '%m/%d/%y',
96 '%m/%d/%Y',
94 '%m/%d/%Y',
97 '%a %b %d %H:%M:%S %Y',
95 '%a %b %d %H:%M:%S %Y',
98 '%a %b %d %I:%M:%S%p %Y',
96 '%a %b %d %I:%M:%S%p %Y',
99 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
100 '%b %d %H:%M:%S %Y',
98 '%b %d %H:%M:%S %Y',
101 '%b %d %I:%M:%S%p %Y',
99 '%b %d %I:%M:%S%p %Y',
102 '%b %d %H:%M:%S',
100 '%b %d %H:%M:%S',
103 '%b %d %I:%M:%S%p',
101 '%b %d %I:%M:%S%p',
104 '%b %d %H:%M',
102 '%b %d %H:%M',
105 '%b %d %I:%M%p',
103 '%b %d %I:%M%p',
106 '%b %d %Y',
104 '%b %d %Y',
107 '%b %d',
105 '%b %d',
108 '%H:%M:%S',
106 '%H:%M:%S',
109 '%I:%M:%SP',
107 '%I:%M:%SP',
110 '%H:%M',
108 '%H:%M',
111 '%I:%M%p',
109 '%I:%M%p',
112 )
110 )
113
111
114 extendeddateformats = defaultdateformats + (
112 extendeddateformats = defaultdateformats + (
115 "%Y",
113 "%Y",
116 "%Y-%m",
114 "%Y-%m",
117 "%b",
115 "%b",
118 "%b %Y",
116 "%b %Y",
119 )
117 )
120
118
121 def cachefunc(func):
119 def cachefunc(func):
122 '''cache the result of function calls'''
120 '''cache the result of function calls'''
123 # XXX doesn't handle keywords args
121 # XXX doesn't handle keywords args
124 cache = {}
122 cache = {}
125 if func.func_code.co_argcount == 1:
123 if func.func_code.co_argcount == 1:
126 # we gain a small amount of time because
124 # we gain a small amount of time because
127 # we don't need to pack/unpack the list
125 # we don't need to pack/unpack the list
128 def f(arg):
126 def f(arg):
129 if arg not in cache:
127 if arg not in cache:
130 cache[arg] = func(arg)
128 cache[arg] = func(arg)
131 return cache[arg]
129 return cache[arg]
132 else:
130 else:
133 def f(*args):
131 def f(*args):
134 if args not in cache:
132 if args not in cache:
135 cache[args] = func(*args)
133 cache[args] = func(*args)
136 return cache[args]
134 return cache[args]
137
135
138 return f
136 return f
139
137
140 class propertycache(object):
138 class propertycache(object):
141 def __init__(self, func):
139 def __init__(self, func):
142 self.func = func
140 self.func = func
143 self.name = func.__name__
141 self.name = func.__name__
144 def __get__(self, obj, type=None):
142 def __get__(self, obj, type=None):
145 result = self.func(obj)
143 result = self.func(obj)
146 setattr(obj, self.name, result)
144 setattr(obj, self.name, result)
147 return result
145 return result
148
146
149 def pipefilter(s, cmd):
147 def pipefilter(s, cmd):
150 '''filter string S through command CMD, returning its output'''
148 '''filter string S through command CMD, returning its output'''
151 (pin, pout) = popen2(cmd, 'b')
149 (pin, pout) = popen2(cmd, 'b')
152 def writer():
150 def writer():
153 try:
151 try:
154 pin.write(s)
152 pin.write(s)
155 pin.close()
153 pin.close()
156 except IOError, inst:
154 except IOError, inst:
157 if inst.errno != errno.EPIPE:
155 if inst.errno != errno.EPIPE:
158 raise
156 raise
159
157
160 # we should use select instead on UNIX, but this will work on most
158 # we should use select instead on UNIX, but this will work on most
161 # systems, including Windows
159 # systems, including Windows
162 w = threading.Thread(target=writer)
160 w = threading.Thread(target=writer)
163 w.start()
161 w.start()
164 f = pout.read()
162 f = pout.read()
165 pout.close()
163 pout.close()
166 w.join()
164 w.join()
167 return f
165 return f
168
166
169 def tempfilter(s, cmd):
167 def tempfilter(s, cmd):
170 '''filter string S through a pair of temporary files with CMD.
168 '''filter string S through a pair of temporary files with CMD.
171 CMD is used as a template to create the real command to be run,
169 CMD is used as a template to create the real command to be run,
172 with the strings INFILE and OUTFILE replaced by the real names of
170 with the strings INFILE and OUTFILE replaced by the real names of
173 the temporary files generated.'''
171 the temporary files generated.'''
174 inname, outname = None, None
172 inname, outname = None, None
175 try:
173 try:
176 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
174 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
177 fp = os.fdopen(infd, 'wb')
175 fp = os.fdopen(infd, 'wb')
178 fp.write(s)
176 fp.write(s)
179 fp.close()
177 fp.close()
180 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
178 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
181 os.close(outfd)
179 os.close(outfd)
182 cmd = cmd.replace('INFILE', inname)
180 cmd = cmd.replace('INFILE', inname)
183 cmd = cmd.replace('OUTFILE', outname)
181 cmd = cmd.replace('OUTFILE', outname)
184 code = os.system(cmd)
182 code = os.system(cmd)
185 if sys.platform == 'OpenVMS' and code & 1:
183 if sys.platform == 'OpenVMS' and code & 1:
186 code = 0
184 code = 0
187 if code: raise Abort(_("command '%s' failed: %s") %
185 if code: raise Abort(_("command '%s' failed: %s") %
188 (cmd, explain_exit(code)))
186 (cmd, explain_exit(code)))
189 return open(outname, 'rb').read()
187 return open(outname, 'rb').read()
190 finally:
188 finally:
191 try:
189 try:
192 if inname: os.unlink(inname)
190 if inname: os.unlink(inname)
193 except: pass
191 except: pass
194 try:
192 try:
195 if outname: os.unlink(outname)
193 if outname: os.unlink(outname)
196 except: pass
194 except: pass
197
195
198 filtertable = {
196 filtertable = {
199 'tempfile:': tempfilter,
197 'tempfile:': tempfilter,
200 'pipe:': pipefilter,
198 'pipe:': pipefilter,
201 }
199 }
202
200
203 def filter(s, cmd):
201 def filter(s, cmd):
204 "filter a string through a command that transforms its input to its output"
202 "filter a string through a command that transforms its input to its output"
205 for name, fn in filtertable.iteritems():
203 for name, fn in filtertable.iteritems():
206 if cmd.startswith(name):
204 if cmd.startswith(name):
207 return fn(s, cmd[len(name):].lstrip())
205 return fn(s, cmd[len(name):].lstrip())
208 return pipefilter(s, cmd)
206 return pipefilter(s, cmd)
209
207
210 def binary(s):
208 def binary(s):
211 """return true if a string is binary data"""
209 """return true if a string is binary data"""
212 return bool(s and '\0' in s)
210 return bool(s and '\0' in s)
213
211
214 def increasingchunks(source, min=1024, max=65536):
212 def increasingchunks(source, min=1024, max=65536):
215 '''return no less than min bytes per chunk while data remains,
213 '''return no less than min bytes per chunk while data remains,
216 doubling min after each chunk until it reaches max'''
214 doubling min after each chunk until it reaches max'''
217 def log2(x):
215 def log2(x):
218 if not x:
216 if not x:
219 return 0
217 return 0
220 i = 0
218 i = 0
221 while x:
219 while x:
222 x >>= 1
220 x >>= 1
223 i += 1
221 i += 1
224 return i - 1
222 return i - 1
225
223
226 buf = []
224 buf = []
227 blen = 0
225 blen = 0
228 for chunk in source:
226 for chunk in source:
229 buf.append(chunk)
227 buf.append(chunk)
230 blen += len(chunk)
228 blen += len(chunk)
231 if blen >= min:
229 if blen >= min:
232 if min < max:
230 if min < max:
233 min = min << 1
231 min = min << 1
234 nmin = 1 << log2(blen)
232 nmin = 1 << log2(blen)
235 if nmin > min:
233 if nmin > min:
236 min = nmin
234 min = nmin
237 if min > max:
235 if min > max:
238 min = max
236 min = max
239 yield ''.join(buf)
237 yield ''.join(buf)
240 blen = 0
238 blen = 0
241 buf = []
239 buf = []
242 if buf:
240 if buf:
243 yield ''.join(buf)
241 yield ''.join(buf)
244
242
245 Abort = error.Abort
243 Abort = error.Abort
246
244
247 def always(fn): return True
245 def always(fn): return True
248 def never(fn): return False
246 def never(fn): return False
249
247
250 def patkind(name, default):
248 def patkind(name, default):
251 """Split a string into an optional pattern kind prefix and the
249 """Split a string into an optional pattern kind prefix and the
252 actual pattern."""
250 actual pattern."""
253 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
251 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
254 if name.startswith(prefix + ':'): return name.split(':', 1)
252 if name.startswith(prefix + ':'): return name.split(':', 1)
255 return default, name
253 return default, name
256
254
257 def globre(pat, head='^', tail='$'):
255 def globre(pat, head='^', tail='$'):
258 "convert a glob pattern into a regexp"
256 "convert a glob pattern into a regexp"
259 i, n = 0, len(pat)
257 i, n = 0, len(pat)
260 res = ''
258 res = ''
261 group = 0
259 group = 0
262 def peek(): return i < n and pat[i]
260 def peek(): return i < n and pat[i]
263 while i < n:
261 while i < n:
264 c = pat[i]
262 c = pat[i]
265 i = i+1
263 i = i+1
266 if c == '*':
264 if c == '*':
267 if peek() == '*':
265 if peek() == '*':
268 i += 1
266 i += 1
269 res += '.*'
267 res += '.*'
270 else:
268 else:
271 res += '[^/]*'
269 res += '[^/]*'
272 elif c == '?':
270 elif c == '?':
273 res += '.'
271 res += '.'
274 elif c == '[':
272 elif c == '[':
275 j = i
273 j = i
276 if j < n and pat[j] in '!]':
274 if j < n and pat[j] in '!]':
277 j += 1
275 j += 1
278 while j < n and pat[j] != ']':
276 while j < n and pat[j] != ']':
279 j += 1
277 j += 1
280 if j >= n:
278 if j >= n:
281 res += '\\['
279 res += '\\['
282 else:
280 else:
283 stuff = pat[i:j].replace('\\','\\\\')
281 stuff = pat[i:j].replace('\\','\\\\')
284 i = j + 1
282 i = j + 1
285 if stuff[0] == '!':
283 if stuff[0] == '!':
286 stuff = '^' + stuff[1:]
284 stuff = '^' + stuff[1:]
287 elif stuff[0] == '^':
285 elif stuff[0] == '^':
288 stuff = '\\' + stuff
286 stuff = '\\' + stuff
289 res = '%s[%s]' % (res, stuff)
287 res = '%s[%s]' % (res, stuff)
290 elif c == '{':
288 elif c == '{':
291 group += 1
289 group += 1
292 res += '(?:'
290 res += '(?:'
293 elif c == '}' and group:
291 elif c == '}' and group:
294 res += ')'
292 res += ')'
295 group -= 1
293 group -= 1
296 elif c == ',' and group:
294 elif c == ',' and group:
297 res += '|'
295 res += '|'
298 elif c == '\\':
296 elif c == '\\':
299 p = peek()
297 p = peek()
300 if p:
298 if p:
301 i += 1
299 i += 1
302 res += re.escape(p)
300 res += re.escape(p)
303 else:
301 else:
304 res += re.escape(c)
302 res += re.escape(c)
305 else:
303 else:
306 res += re.escape(c)
304 res += re.escape(c)
307 return head + res + tail
305 return head + res + tail
308
306
309 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
307 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
310
308
311 def pathto(root, n1, n2):
309 def pathto(root, n1, n2):
312 '''return the relative path from one place to another.
310 '''return the relative path from one place to another.
313 root should use os.sep to separate directories
311 root should use os.sep to separate directories
314 n1 should use os.sep to separate directories
312 n1 should use os.sep to separate directories
315 n2 should use "/" to separate directories
313 n2 should use "/" to separate directories
316 returns an os.sep-separated path.
314 returns an os.sep-separated path.
317
315
318 If n1 is a relative path, it's assumed it's
316 If n1 is a relative path, it's assumed it's
319 relative to root.
317 relative to root.
320 n2 should always be relative to root.
318 n2 should always be relative to root.
321 '''
319 '''
322 if not n1: return localpath(n2)
320 if not n1: return localpath(n2)
323 if os.path.isabs(n1):
321 if os.path.isabs(n1):
324 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
322 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
325 return os.path.join(root, localpath(n2))
323 return os.path.join(root, localpath(n2))
326 n2 = '/'.join((pconvert(root), n2))
324 n2 = '/'.join((pconvert(root), n2))
327 a, b = splitpath(n1), n2.split('/')
325 a, b = splitpath(n1), n2.split('/')
328 a.reverse()
326 a.reverse()
329 b.reverse()
327 b.reverse()
330 while a and b and a[-1] == b[-1]:
328 while a and b and a[-1] == b[-1]:
331 a.pop()
329 a.pop()
332 b.pop()
330 b.pop()
333 b.reverse()
331 b.reverse()
334 return os.sep.join((['..'] * len(a)) + b) or '.'
332 return os.sep.join((['..'] * len(a)) + b) or '.'
335
333
336 def canonpath(root, cwd, myname):
334 def canonpath(root, cwd, myname):
337 """return the canonical path of myname, given cwd and root"""
335 """return the canonical path of myname, given cwd and root"""
338 if root == os.sep:
336 if root == os.sep:
339 rootsep = os.sep
337 rootsep = os.sep
340 elif endswithsep(root):
338 elif endswithsep(root):
341 rootsep = root
339 rootsep = root
342 else:
340 else:
343 rootsep = root + os.sep
341 rootsep = root + os.sep
344 name = myname
342 name = myname
345 if not os.path.isabs(name):
343 if not os.path.isabs(name):
346 name = os.path.join(root, cwd, name)
344 name = os.path.join(root, cwd, name)
347 name = os.path.normpath(name)
345 name = os.path.normpath(name)
348 audit_path = path_auditor(root)
346 audit_path = path_auditor(root)
349 if name != rootsep and name.startswith(rootsep):
347 if name != rootsep and name.startswith(rootsep):
350 name = name[len(rootsep):]
348 name = name[len(rootsep):]
351 audit_path(name)
349 audit_path(name)
352 return pconvert(name)
350 return pconvert(name)
353 elif name == root:
351 elif name == root:
354 return ''
352 return ''
355 else:
353 else:
356 # Determine whether `name' is in the hierarchy at or beneath `root',
354 # Determine whether `name' is in the hierarchy at or beneath `root',
357 # by iterating name=dirname(name) until that causes no change (can't
355 # by iterating name=dirname(name) until that causes no change (can't
358 # check name == '/', because that doesn't work on windows). For each
356 # check name == '/', because that doesn't work on windows). For each
359 # `name', compare dev/inode numbers. If they match, the list `rel'
357 # `name', compare dev/inode numbers. If they match, the list `rel'
360 # holds the reversed list of components making up the relative file
358 # holds the reversed list of components making up the relative file
361 # name we want.
359 # name we want.
362 root_st = os.stat(root)
360 root_st = os.stat(root)
363 rel = []
361 rel = []
364 while True:
362 while True:
365 try:
363 try:
366 name_st = os.stat(name)
364 name_st = os.stat(name)
367 except OSError:
365 except OSError:
368 break
366 break
369 if samestat(name_st, root_st):
367 if samestat(name_st, root_st):
370 if not rel:
368 if not rel:
371 # name was actually the same as root (maybe a symlink)
369 # name was actually the same as root (maybe a symlink)
372 return ''
370 return ''
373 rel.reverse()
371 rel.reverse()
374 name = os.path.join(*rel)
372 name = os.path.join(*rel)
375 audit_path(name)
373 audit_path(name)
376 return pconvert(name)
374 return pconvert(name)
377 dirname, basename = os.path.split(name)
375 dirname, basename = os.path.split(name)
378 rel.append(basename)
376 rel.append(basename)
379 if dirname == name:
377 if dirname == name:
380 break
378 break
381 name = dirname
379 name = dirname
382
380
383 raise Abort('%s not under root' % myname)
381 raise Abort('%s not under root' % myname)
384
382
385 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
383 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
386 """build a function to match a set of file patterns
384 """build a function to match a set of file patterns
387
385
388 arguments:
386 arguments:
389 canonroot - the canonical root of the tree you're matching against
387 canonroot - the canonical root of the tree you're matching against
390 cwd - the current working directory, if relevant
388 cwd - the current working directory, if relevant
391 names - patterns to find
389 names - patterns to find
392 inc - patterns to include
390 inc - patterns to include
393 exc - patterns to exclude
391 exc - patterns to exclude
394 dflt_pat - if a pattern in names has no explicit type, assume this one
392 dflt_pat - if a pattern in names has no explicit type, assume this one
395 src - where these patterns came from (e.g. .hgignore)
393 src - where these patterns came from (e.g. .hgignore)
396
394
397 a pattern is one of:
395 a pattern is one of:
398 'glob:<glob>' - a glob relative to cwd
396 'glob:<glob>' - a glob relative to cwd
399 're:<regexp>' - a regular expression
397 're:<regexp>' - a regular expression
400 'path:<path>' - a path relative to canonroot
398 'path:<path>' - a path relative to canonroot
401 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
399 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
402 'relpath:<path>' - a path relative to cwd
400 'relpath:<path>' - a path relative to cwd
403 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
401 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
404 '<something>' - one of the cases above, selected by the dflt_pat argument
402 '<something>' - one of the cases above, selected by the dflt_pat argument
405
403
406 returns:
404 returns:
407 a 3-tuple containing
405 a 3-tuple containing
408 - list of roots (places where one should start a recursive walk of the fs);
406 - list of roots (places where one should start a recursive walk of the fs);
409 this often matches the explicit non-pattern names passed in, but also
407 this often matches the explicit non-pattern names passed in, but also
410 includes the initial part of glob: patterns that has no glob characters
408 includes the initial part of glob: patterns that has no glob characters
411 - a bool match(filename) function
409 - a bool match(filename) function
412 - a bool indicating if any patterns were passed in
410 - a bool indicating if any patterns were passed in
413 """
411 """
414
412
415 # a common case: no patterns at all
413 # a common case: no patterns at all
416 if not names and not inc and not exc:
414 if not names and not inc and not exc:
417 return [], always, False
415 return [], always, False
418
416
419 def contains_glob(name):
417 def contains_glob(name):
420 for c in name:
418 for c in name:
421 if c in _globchars: return True
419 if c in _globchars: return True
422 return False
420 return False
423
421
424 def regex(kind, name, tail):
422 def regex(kind, name, tail):
425 '''convert a pattern into a regular expression'''
423 '''convert a pattern into a regular expression'''
426 if not name:
424 if not name:
427 return ''
425 return ''
428 if kind == 're':
426 if kind == 're':
429 return name
427 return name
430 elif kind == 'path':
428 elif kind == 'path':
431 return '^' + re.escape(name) + '(?:/|$)'
429 return '^' + re.escape(name) + '(?:/|$)'
432 elif kind == 'relglob':
430 elif kind == 'relglob':
433 return globre(name, '(?:|.*/)', tail)
431 return globre(name, '(?:|.*/)', tail)
434 elif kind == 'relpath':
432 elif kind == 'relpath':
435 return re.escape(name) + '(?:/|$)'
433 return re.escape(name) + '(?:/|$)'
436 elif kind == 'relre':
434 elif kind == 'relre':
437 if name.startswith('^'):
435 if name.startswith('^'):
438 return name
436 return name
439 return '.*' + name
437 return '.*' + name
440 return globre(name, '', tail)
438 return globre(name, '', tail)
441
439
442 def matchfn(pats, tail):
440 def matchfn(pats, tail):
443 """build a matching function from a set of patterns"""
441 """build a matching function from a set of patterns"""
444 if not pats:
442 if not pats:
445 return
443 return
446 try:
444 try:
447 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
445 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
448 if len(pat) > 20000:
446 if len(pat) > 20000:
449 raise OverflowError()
447 raise OverflowError()
450 return re.compile(pat).match
448 return re.compile(pat).match
451 except OverflowError:
449 except OverflowError:
452 # We're using a Python with a tiny regex engine and we
450 # We're using a Python with a tiny regex engine and we
453 # made it explode, so we'll divide the pattern list in two
451 # made it explode, so we'll divide the pattern list in two
454 # until it works
452 # until it works
455 l = len(pats)
453 l = len(pats)
456 if l < 2:
454 if l < 2:
457 raise
455 raise
458 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
456 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
459 return lambda s: a(s) or b(s)
457 return lambda s: a(s) or b(s)
460 except re.error:
458 except re.error:
461 for k, p in pats:
459 for k, p in pats:
462 try:
460 try:
463 re.compile('(?:%s)' % regex(k, p, tail))
461 re.compile('(?:%s)' % regex(k, p, tail))
464 except re.error:
462 except re.error:
465 if src:
463 if src:
466 raise Abort("%s: invalid pattern (%s): %s" %
464 raise Abort("%s: invalid pattern (%s): %s" %
467 (src, k, p))
465 (src, k, p))
468 else:
466 else:
469 raise Abort("invalid pattern (%s): %s" % (k, p))
467 raise Abort("invalid pattern (%s): %s" % (k, p))
470 raise Abort("invalid pattern")
468 raise Abort("invalid pattern")
471
469
472 def globprefix(pat):
470 def globprefix(pat):
473 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
471 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
474 root = []
472 root = []
475 for p in pat.split('/'):
473 for p in pat.split('/'):
476 if contains_glob(p): break
474 if contains_glob(p): break
477 root.append(p)
475 root.append(p)
478 return '/'.join(root) or '.'
476 return '/'.join(root) or '.'
479
477
480 def normalizepats(names, default):
478 def normalizepats(names, default):
481 pats = []
479 pats = []
482 roots = []
480 roots = []
483 anypats = False
481 anypats = False
484 for kind, name in [patkind(p, default) for p in names]:
482 for kind, name in [patkind(p, default) for p in names]:
485 if kind in ('glob', 'relpath'):
483 if kind in ('glob', 'relpath'):
486 name = canonpath(canonroot, cwd, name)
484 name = canonpath(canonroot, cwd, name)
487 elif kind in ('relglob', 'path'):
485 elif kind in ('relglob', 'path'):
488 name = normpath(name)
486 name = normpath(name)
489
487
490 pats.append((kind, name))
488 pats.append((kind, name))
491
489
492 if kind in ('glob', 're', 'relglob', 'relre'):
490 if kind in ('glob', 're', 'relglob', 'relre'):
493 anypats = True
491 anypats = True
494
492
495 if kind == 'glob':
493 if kind == 'glob':
496 root = globprefix(name)
494 root = globprefix(name)
497 roots.append(root)
495 roots.append(root)
498 elif kind in ('relpath', 'path'):
496 elif kind in ('relpath', 'path'):
499 roots.append(name or '.')
497 roots.append(name or '.')
500 elif kind == 'relglob':
498 elif kind == 'relglob':
501 roots.append('.')
499 roots.append('.')
502 return roots, pats, anypats
500 return roots, pats, anypats
503
501
504 roots, pats, anypats = normalizepats(names, dflt_pat)
502 roots, pats, anypats = normalizepats(names, dflt_pat)
505
503
506 patmatch = matchfn(pats, '$') or always
504 patmatch = matchfn(pats, '$') or always
507 incmatch = always
505 incmatch = always
508 if inc:
506 if inc:
509 dummy, inckinds, dummy = normalizepats(inc, 'glob')
507 dummy, inckinds, dummy = normalizepats(inc, 'glob')
510 incmatch = matchfn(inckinds, '(?:/|$)')
508 incmatch = matchfn(inckinds, '(?:/|$)')
511 excmatch = never
509 excmatch = never
512 if exc:
510 if exc:
513 dummy, exckinds, dummy = normalizepats(exc, 'glob')
511 dummy, exckinds, dummy = normalizepats(exc, 'glob')
514 excmatch = matchfn(exckinds, '(?:/|$)')
512 excmatch = matchfn(exckinds, '(?:/|$)')
515
513
516 if not names and inc and not exc:
514 if not names and inc and not exc:
517 # common case: hgignore patterns
515 # common case: hgignore patterns
518 match = incmatch
516 match = incmatch
519 else:
517 else:
520 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
518 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
521
519
522 return (roots, match, (inc or exc or anypats) and True)
520 return (roots, match, (inc or exc or anypats) and True)
523
521
524 _hgexecutable = None
522 _hgexecutable = None
525
523
526 def main_is_frozen():
524 def main_is_frozen():
527 """return True if we are a frozen executable.
525 """return True if we are a frozen executable.
528
526
529 The code supports py2exe (most common, Windows only) and tools/freeze
527 The code supports py2exe (most common, Windows only) and tools/freeze
530 (portable, not much used).
528 (portable, not much used).
531 """
529 """
532 return (hasattr(sys, "frozen") or # new py2exe
530 return (hasattr(sys, "frozen") or # new py2exe
533 hasattr(sys, "importers") or # old py2exe
531 hasattr(sys, "importers") or # old py2exe
534 imp.is_frozen("__main__")) # tools/freeze
532 imp.is_frozen("__main__")) # tools/freeze
535
533
536 def hgexecutable():
534 def hgexecutable():
537 """return location of the 'hg' executable.
535 """return location of the 'hg' executable.
538
536
539 Defaults to $HG or 'hg' in the search path.
537 Defaults to $HG or 'hg' in the search path.
540 """
538 """
541 if _hgexecutable is None:
539 if _hgexecutable is None:
542 hg = os.environ.get('HG')
540 hg = os.environ.get('HG')
543 if hg:
541 if hg:
544 set_hgexecutable(hg)
542 set_hgexecutable(hg)
545 elif main_is_frozen():
543 elif main_is_frozen():
546 set_hgexecutable(sys.executable)
544 set_hgexecutable(sys.executable)
547 else:
545 else:
548 set_hgexecutable(find_exe('hg') or 'hg')
546 set_hgexecutable(find_exe('hg') or 'hg')
549 return _hgexecutable
547 return _hgexecutable
550
548
551 def set_hgexecutable(path):
549 def set_hgexecutable(path):
552 """set location of the 'hg' executable"""
550 """set location of the 'hg' executable"""
553 global _hgexecutable
551 global _hgexecutable
554 _hgexecutable = path
552 _hgexecutable = path
555
553
556 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
554 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
557 '''enhanced shell command execution.
555 '''enhanced shell command execution.
558 run with environment maybe modified, maybe in different dir.
556 run with environment maybe modified, maybe in different dir.
559
557
560 if command fails and onerr is None, return status. if ui object,
558 if command fails and onerr is None, return status. if ui object,
561 print error message and return status, else raise onerr object as
559 print error message and return status, else raise onerr object as
562 exception.'''
560 exception.'''
563 def py2shell(val):
561 def py2shell(val):
564 'convert python object into string that is useful to shell'
562 'convert python object into string that is useful to shell'
565 if val in (None, False):
563 if val in (None, False):
566 return '0'
564 return '0'
567 if val == True:
565 if val == True:
568 return '1'
566 return '1'
569 return str(val)
567 return str(val)
570 oldenv = {}
568 oldenv = {}
571 for k in environ:
569 for k in environ:
572 oldenv[k] = os.environ.get(k)
570 oldenv[k] = os.environ.get(k)
573 if cwd is not None:
571 if cwd is not None:
574 oldcwd = os.getcwd()
572 oldcwd = os.getcwd()
575 origcmd = cmd
573 origcmd = cmd
576 if os.name == 'nt':
574 if os.name == 'nt':
577 cmd = '"%s"' % cmd
575 cmd = '"%s"' % cmd
578 try:
576 try:
579 for k, v in environ.iteritems():
577 for k, v in environ.iteritems():
580 os.environ[k] = py2shell(v)
578 os.environ[k] = py2shell(v)
581 os.environ['HG'] = hgexecutable()
579 os.environ['HG'] = hgexecutable()
582 if cwd is not None and oldcwd != cwd:
580 if cwd is not None and oldcwd != cwd:
583 os.chdir(cwd)
581 os.chdir(cwd)
584 rc = os.system(cmd)
582 rc = os.system(cmd)
585 if sys.platform == 'OpenVMS' and rc & 1:
583 if sys.platform == 'OpenVMS' and rc & 1:
586 rc = 0
584 rc = 0
587 if rc and onerr:
585 if rc and onerr:
588 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
586 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
589 explain_exit(rc)[0])
587 explain_exit(rc)[0])
590 if errprefix:
588 if errprefix:
591 errmsg = '%s: %s' % (errprefix, errmsg)
589 errmsg = '%s: %s' % (errprefix, errmsg)
592 try:
590 try:
593 onerr.warn(errmsg + '\n')
591 onerr.warn(errmsg + '\n')
594 except AttributeError:
592 except AttributeError:
595 raise onerr(errmsg)
593 raise onerr(errmsg)
596 return rc
594 return rc
597 finally:
595 finally:
598 for k, v in oldenv.iteritems():
596 for k, v in oldenv.iteritems():
599 if v is None:
597 if v is None:
600 del os.environ[k]
598 del os.environ[k]
601 else:
599 else:
602 os.environ[k] = v
600 os.environ[k] = v
603 if cwd is not None and oldcwd != cwd:
601 if cwd is not None and oldcwd != cwd:
604 os.chdir(oldcwd)
602 os.chdir(oldcwd)
605
603
606 def checksignature(func):
604 def checksignature(func):
607 '''wrap a function with code to check for calling errors'''
605 '''wrap a function with code to check for calling errors'''
608 def check(*args, **kwargs):
606 def check(*args, **kwargs):
609 try:
607 try:
610 return func(*args, **kwargs)
608 return func(*args, **kwargs)
611 except TypeError:
609 except TypeError:
612 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
610 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
613 raise error.SignatureError
611 raise error.SignatureError
614 raise
612 raise
615
613
616 return check
614 return check
617
615
618 # os.path.lexists is not available on python2.3
616 # os.path.lexists is not available on python2.3
619 def lexists(filename):
617 def lexists(filename):
620 "test whether a file with this name exists. does not follow symlinks"
618 "test whether a file with this name exists. does not follow symlinks"
621 try:
619 try:
622 os.lstat(filename)
620 os.lstat(filename)
623 except:
621 except:
624 return False
622 return False
625 return True
623 return True
626
624
627 def rename(src, dst):
625 def rename(src, dst):
628 """forcibly rename a file"""
626 """forcibly rename a file"""
629 try:
627 try:
630 os.rename(src, dst)
628 os.rename(src, dst)
631 except OSError, err: # FIXME: check err (EEXIST ?)
629 except OSError, err: # FIXME: check err (EEXIST ?)
632 # on windows, rename to existing file is not allowed, so we
630 # on windows, rename to existing file is not allowed, so we
633 # must delete destination first. but if file is open, unlink
631 # must delete destination first. but if file is open, unlink
634 # schedules it for delete but does not delete it. rename
632 # schedules it for delete but does not delete it. rename
635 # happens immediately even for open files, so we rename
633 # happens immediately even for open files, so we rename
636 # destination to a temporary name, then delete that. then
634 # destination to a temporary name, then delete that. then
637 # rename is safe to do.
635 # rename is safe to do.
638 temp = dst + "-force-rename"
636 temp = dst + "-force-rename"
639 os.rename(dst, temp)
637 os.rename(dst, temp)
640 os.unlink(temp)
638 os.unlink(temp)
641 os.rename(src, dst)
639 os.rename(src, dst)
642
640
643 def unlink(f):
641 def unlink(f):
644 """unlink and remove the directory if it is empty"""
642 """unlink and remove the directory if it is empty"""
645 os.unlink(f)
643 os.unlink(f)
646 # try removing directories that might now be empty
644 # try removing directories that might now be empty
647 try:
645 try:
648 os.removedirs(os.path.dirname(f))
646 os.removedirs(os.path.dirname(f))
649 except OSError:
647 except OSError:
650 pass
648 pass
651
649
652 def copyfile(src, dest):
650 def copyfile(src, dest):
653 "copy a file, preserving mode and atime/mtime"
651 "copy a file, preserving mode and atime/mtime"
654 if os.path.islink(src):
652 if os.path.islink(src):
655 try:
653 try:
656 os.unlink(dest)
654 os.unlink(dest)
657 except:
655 except:
658 pass
656 pass
659 os.symlink(os.readlink(src), dest)
657 os.symlink(os.readlink(src), dest)
660 else:
658 else:
661 try:
659 try:
662 shutil.copyfile(src, dest)
660 shutil.copyfile(src, dest)
663 shutil.copystat(src, dest)
661 shutil.copystat(src, dest)
664 except shutil.Error, inst:
662 except shutil.Error, inst:
665 raise Abort(str(inst))
663 raise Abort(str(inst))
666
664
667 def copyfiles(src, dst, hardlink=None):
665 def copyfiles(src, dst, hardlink=None):
668 """Copy a directory tree using hardlinks if possible"""
666 """Copy a directory tree using hardlinks if possible"""
669
667
670 if hardlink is None:
668 if hardlink is None:
671 hardlink = (os.stat(src).st_dev ==
669 hardlink = (os.stat(src).st_dev ==
672 os.stat(os.path.dirname(dst)).st_dev)
670 os.stat(os.path.dirname(dst)).st_dev)
673
671
674 if os.path.isdir(src):
672 if os.path.isdir(src):
675 os.mkdir(dst)
673 os.mkdir(dst)
676 for name, kind in osutil.listdir(src):
674 for name, kind in osutil.listdir(src):
677 srcname = os.path.join(src, name)
675 srcname = os.path.join(src, name)
678 dstname = os.path.join(dst, name)
676 dstname = os.path.join(dst, name)
679 copyfiles(srcname, dstname, hardlink)
677 copyfiles(srcname, dstname, hardlink)
680 else:
678 else:
681 if hardlink:
679 if hardlink:
682 try:
680 try:
683 os_link(src, dst)
681 os_link(src, dst)
684 except (IOError, OSError):
682 except (IOError, OSError):
685 hardlink = False
683 hardlink = False
686 shutil.copy(src, dst)
684 shutil.copy(src, dst)
687 else:
685 else:
688 shutil.copy(src, dst)
686 shutil.copy(src, dst)
689
687
690 class path_auditor(object):
688 class path_auditor(object):
691 '''ensure that a filesystem path contains no banned components.
689 '''ensure that a filesystem path contains no banned components.
692 the following properties of a path are checked:
690 the following properties of a path are checked:
693
691
694 - under top-level .hg
692 - under top-level .hg
695 - starts at the root of a windows drive
693 - starts at the root of a windows drive
696 - contains ".."
694 - contains ".."
697 - traverses a symlink (e.g. a/symlink_here/b)
695 - traverses a symlink (e.g. a/symlink_here/b)
698 - inside a nested repository'''
696 - inside a nested repository'''
699
697
700 def __init__(self, root):
698 def __init__(self, root):
701 self.audited = set()
699 self.audited = set()
702 self.auditeddir = set()
700 self.auditeddir = set()
703 self.root = root
701 self.root = root
704
702
705 def __call__(self, path):
703 def __call__(self, path):
706 if path in self.audited:
704 if path in self.audited:
707 return
705 return
708 normpath = os.path.normcase(path)
706 normpath = os.path.normcase(path)
709 parts = splitpath(normpath)
707 parts = splitpath(normpath)
710 if (os.path.splitdrive(path)[0]
708 if (os.path.splitdrive(path)[0]
711 or parts[0].lower() in ('.hg', '.hg.', '')
709 or parts[0].lower() in ('.hg', '.hg.', '')
712 or os.pardir in parts):
710 or os.pardir in parts):
713 raise Abort(_("path contains illegal component: %s") % path)
711 raise Abort(_("path contains illegal component: %s") % path)
714 if '.hg' in path.lower():
712 if '.hg' in path.lower():
715 lparts = [p.lower() for p in parts]
713 lparts = [p.lower() for p in parts]
716 for p in '.hg', '.hg.':
714 for p in '.hg', '.hg.':
717 if p in lparts[1:]:
715 if p in lparts[1:]:
718 pos = lparts.index(p)
716 pos = lparts.index(p)
719 base = os.path.join(*parts[:pos])
717 base = os.path.join(*parts[:pos])
720 raise Abort(_('path %r is inside repo %r') % (path, base))
718 raise Abort(_('path %r is inside repo %r') % (path, base))
721 def check(prefix):
719 def check(prefix):
722 curpath = os.path.join(self.root, prefix)
720 curpath = os.path.join(self.root, prefix)
723 try:
721 try:
724 st = os.lstat(curpath)
722 st = os.lstat(curpath)
725 except OSError, err:
723 except OSError, err:
726 # EINVAL can be raised as invalid path syntax under win32.
724 # EINVAL can be raised as invalid path syntax under win32.
727 # They must be ignored for patterns can be checked too.
725 # They must be ignored for patterns can be checked too.
728 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
726 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
729 raise
727 raise
730 else:
728 else:
731 if stat.S_ISLNK(st.st_mode):
729 if stat.S_ISLNK(st.st_mode):
732 raise Abort(_('path %r traverses symbolic link %r') %
730 raise Abort(_('path %r traverses symbolic link %r') %
733 (path, prefix))
731 (path, prefix))
734 elif (stat.S_ISDIR(st.st_mode) and
732 elif (stat.S_ISDIR(st.st_mode) and
735 os.path.isdir(os.path.join(curpath, '.hg'))):
733 os.path.isdir(os.path.join(curpath, '.hg'))):
736 raise Abort(_('path %r is inside repo %r') %
734 raise Abort(_('path %r is inside repo %r') %
737 (path, prefix))
735 (path, prefix))
738 parts.pop()
736 parts.pop()
739 prefixes = []
737 prefixes = []
740 for n in range(len(parts)):
738 for n in range(len(parts)):
741 prefix = os.sep.join(parts)
739 prefix = os.sep.join(parts)
742 if prefix in self.auditeddir:
740 if prefix in self.auditeddir:
743 break
741 break
744 check(prefix)
742 check(prefix)
745 prefixes.append(prefix)
743 prefixes.append(prefix)
746 parts.pop()
744 parts.pop()
747
745
748 self.audited.add(path)
746 self.audited.add(path)
749 # only add prefixes to the cache after checking everything: we don't
747 # only add prefixes to the cache after checking everything: we don't
750 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
748 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
751 self.auditeddir.update(prefixes)
749 self.auditeddir.update(prefixes)
752
750
753 def nlinks(pathname):
751 def nlinks(pathname):
754 """Return number of hardlinks for the given file."""
752 """Return number of hardlinks for the given file."""
755 return os.lstat(pathname).st_nlink
753 return os.lstat(pathname).st_nlink
756
754
757 if hasattr(os, 'link'):
755 if hasattr(os, 'link'):
758 os_link = os.link
756 os_link = os.link
759 else:
757 else:
760 def os_link(src, dst):
758 def os_link(src, dst):
761 raise OSError(0, _("Hardlinks not supported"))
759 raise OSError(0, _("Hardlinks not supported"))
762
760
763 def lookup_reg(key, name=None, scope=None):
761 def lookup_reg(key, name=None, scope=None):
764 return None
762 return None
765
763
766 if os.name == 'nt':
764 if os.name == 'nt':
767 from windows import *
765 from windows import *
768 def expand_glob(pats):
766 def expand_glob(pats):
769 '''On Windows, expand the implicit globs in a list of patterns'''
767 '''On Windows, expand the implicit globs in a list of patterns'''
770 ret = []
768 ret = []
771 for p in pats:
769 for p in pats:
772 kind, name = patkind(p, None)
770 kind, name = patkind(p, None)
773 if kind is None:
771 if kind is None:
774 globbed = glob.glob(name)
772 globbed = glob.glob(name)
775 if globbed:
773 if globbed:
776 ret.extend(globbed)
774 ret.extend(globbed)
777 continue
775 continue
778 # if we couldn't expand the glob, just keep it around
776 # if we couldn't expand the glob, just keep it around
779 ret.append(p)
777 ret.append(p)
780 return ret
778 return ret
781 else:
779 else:
782 from posix import *
780 from posix import *
783
781
784 def makelock(info, pathname):
782 def makelock(info, pathname):
785 try:
783 try:
786 return os.symlink(info, pathname)
784 return os.symlink(info, pathname)
787 except OSError, why:
785 except OSError, why:
788 if why.errno == errno.EEXIST:
786 if why.errno == errno.EEXIST:
789 raise
787 raise
790 except AttributeError: # no symlink in os
788 except AttributeError: # no symlink in os
791 pass
789 pass
792
790
793 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
791 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
794 os.write(ld, info)
792 os.write(ld, info)
795 os.close(ld)
793 os.close(ld)
796
794
797 def readlock(pathname):
795 def readlock(pathname):
798 try:
796 try:
799 return os.readlink(pathname)
797 return os.readlink(pathname)
800 except OSError, why:
798 except OSError, why:
801 if why.errno not in (errno.EINVAL, errno.ENOSYS):
799 if why.errno not in (errno.EINVAL, errno.ENOSYS):
802 raise
800 raise
803 except AttributeError: # no symlink in os
801 except AttributeError: # no symlink in os
804 pass
802 pass
805 return posixfile(pathname).read()
803 return posixfile(pathname).read()
806
804
807 def fstat(fp):
805 def fstat(fp):
808 '''stat file object that may not have fileno method.'''
806 '''stat file object that may not have fileno method.'''
809 try:
807 try:
810 return os.fstat(fp.fileno())
808 return os.fstat(fp.fileno())
811 except AttributeError:
809 except AttributeError:
812 return os.stat(fp.name)
810 return os.stat(fp.name)
813
811
814 # File system features
812 # File system features
815
813
816 def checkcase(path):
814 def checkcase(path):
817 """
815 """
818 Check whether the given path is on a case-sensitive filesystem
816 Check whether the given path is on a case-sensitive filesystem
819
817
820 Requires a path (like /foo/.hg) ending with a foldable final
818 Requires a path (like /foo/.hg) ending with a foldable final
821 directory component.
819 directory component.
822 """
820 """
823 s1 = os.stat(path)
821 s1 = os.stat(path)
824 d, b = os.path.split(path)
822 d, b = os.path.split(path)
825 p2 = os.path.join(d, b.upper())
823 p2 = os.path.join(d, b.upper())
826 if path == p2:
824 if path == p2:
827 p2 = os.path.join(d, b.lower())
825 p2 = os.path.join(d, b.lower())
828 try:
826 try:
829 s2 = os.stat(p2)
827 s2 = os.stat(p2)
830 if s2 == s1:
828 if s2 == s1:
831 return False
829 return False
832 return True
830 return True
833 except:
831 except:
834 return True
832 return True
835
833
836 _fspathcache = {}
834 _fspathcache = {}
837 def fspath(name, root):
835 def fspath(name, root):
838 '''Get name in the case stored in the filesystem
836 '''Get name in the case stored in the filesystem
839
837
840 The name is either relative to root, or it is an absolute path starting
838 The name is either relative to root, or it is an absolute path starting
841 with root. Note that this function is unnecessary, and should not be
839 with root. Note that this function is unnecessary, and should not be
842 called, for case-sensitive filesystems (simply because it's expensive).
840 called, for case-sensitive filesystems (simply because it's expensive).
843 '''
841 '''
844 # If name is absolute, make it relative
842 # If name is absolute, make it relative
845 if name.lower().startswith(root.lower()):
843 if name.lower().startswith(root.lower()):
846 l = len(root)
844 l = len(root)
847 if name[l] == os.sep or name[l] == os.altsep:
845 if name[l] == os.sep or name[l] == os.altsep:
848 l = l + 1
846 l = l + 1
849 name = name[l:]
847 name = name[l:]
850
848
851 if not os.path.exists(os.path.join(root, name)):
849 if not os.path.exists(os.path.join(root, name)):
852 return None
850 return None
853
851
854 seps = os.sep
852 seps = os.sep
855 if os.altsep:
853 if os.altsep:
856 seps = seps + os.altsep
854 seps = seps + os.altsep
857 # Protect backslashes. This gets silly very quickly.
855 # Protect backslashes. This gets silly very quickly.
858 seps.replace('\\','\\\\')
856 seps.replace('\\','\\\\')
859 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
857 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
860 dir = os.path.normcase(os.path.normpath(root))
858 dir = os.path.normcase(os.path.normpath(root))
861 result = []
859 result = []
862 for part, sep in pattern.findall(name):
860 for part, sep in pattern.findall(name):
863 if sep:
861 if sep:
864 result.append(sep)
862 result.append(sep)
865 continue
863 continue
866
864
867 if dir not in _fspathcache:
865 if dir not in _fspathcache:
868 _fspathcache[dir] = os.listdir(dir)
866 _fspathcache[dir] = os.listdir(dir)
869 contents = _fspathcache[dir]
867 contents = _fspathcache[dir]
870
868
871 lpart = part.lower()
869 lpart = part.lower()
872 for n in contents:
870 for n in contents:
873 if n.lower() == lpart:
871 if n.lower() == lpart:
874 result.append(n)
872 result.append(n)
875 break
873 break
876 else:
874 else:
877 # Cannot happen, as the file exists!
875 # Cannot happen, as the file exists!
878 result.append(part)
876 result.append(part)
879 dir = os.path.join(dir, lpart)
877 dir = os.path.join(dir, lpart)
880
878
881 return ''.join(result)
879 return ''.join(result)
882
880
883 def checkexec(path):
881 def checkexec(path):
884 """
882 """
885 Check whether the given path is on a filesystem with UNIX-like exec flags
883 Check whether the given path is on a filesystem with UNIX-like exec flags
886
884
887 Requires a directory (like /foo/.hg)
885 Requires a directory (like /foo/.hg)
888 """
886 """
889
887
890 # VFAT on some Linux versions can flip mode but it doesn't persist
888 # VFAT on some Linux versions can flip mode but it doesn't persist
891 # a FS remount. Frequently we can detect it if files are created
889 # a FS remount. Frequently we can detect it if files are created
892 # with exec bit on.
890 # with exec bit on.
893
891
894 try:
892 try:
895 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
893 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
896 fh, fn = tempfile.mkstemp("", "", path)
894 fh, fn = tempfile.mkstemp("", "", path)
897 try:
895 try:
898 os.close(fh)
896 os.close(fh)
899 m = os.stat(fn).st_mode & 0777
897 m = os.stat(fn).st_mode & 0777
900 new_file_has_exec = m & EXECFLAGS
898 new_file_has_exec = m & EXECFLAGS
901 os.chmod(fn, m ^ EXECFLAGS)
899 os.chmod(fn, m ^ EXECFLAGS)
902 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
900 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
903 finally:
901 finally:
904 os.unlink(fn)
902 os.unlink(fn)
905 except (IOError, OSError):
903 except (IOError, OSError):
906 # we don't care, the user probably won't be able to commit anyway
904 # we don't care, the user probably won't be able to commit anyway
907 return False
905 return False
908 return not (new_file_has_exec or exec_flags_cannot_flip)
906 return not (new_file_has_exec or exec_flags_cannot_flip)
909
907
910 def checklink(path):
908 def checklink(path):
911 """check whether the given path is on a symlink-capable filesystem"""
909 """check whether the given path is on a symlink-capable filesystem"""
912 # mktemp is not racy because symlink creation will fail if the
910 # mktemp is not racy because symlink creation will fail if the
913 # file already exists
911 # file already exists
914 name = tempfile.mktemp(dir=path)
912 name = tempfile.mktemp(dir=path)
915 try:
913 try:
916 os.symlink(".", name)
914 os.symlink(".", name)
917 os.unlink(name)
915 os.unlink(name)
918 return True
916 return True
919 except (OSError, AttributeError):
917 except (OSError, AttributeError):
920 return False
918 return False
921
919
922 def needbinarypatch():
920 def needbinarypatch():
923 """return True if patches should be applied in binary mode by default."""
921 """return True if patches should be applied in binary mode by default."""
924 return os.name == 'nt'
922 return os.name == 'nt'
925
923
926 def endswithsep(path):
924 def endswithsep(path):
927 '''Check path ends with os.sep or os.altsep.'''
925 '''Check path ends with os.sep or os.altsep.'''
928 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
926 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
929
927
930 def splitpath(path):
928 def splitpath(path):
931 '''Split path by os.sep.
929 '''Split path by os.sep.
932 Note that this function does not use os.altsep because this is
930 Note that this function does not use os.altsep because this is
933 an alternative of simple "xxx.split(os.sep)".
931 an alternative of simple "xxx.split(os.sep)".
934 It is recommended to use os.path.normpath() before using this
932 It is recommended to use os.path.normpath() before using this
935 function if need.'''
933 function if need.'''
936 return path.split(os.sep)
934 return path.split(os.sep)
937
935
938 def gui():
936 def gui():
939 '''Are we running in a GUI?'''
937 '''Are we running in a GUI?'''
940 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
938 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
941
939
942 def mktempcopy(name, emptyok=False, createmode=None):
940 def mktempcopy(name, emptyok=False, createmode=None):
943 """Create a temporary file with the same contents from name
941 """Create a temporary file with the same contents from name
944
942
945 The permission bits are copied from the original file.
943 The permission bits are copied from the original file.
946
944
947 If the temporary file is going to be truncated immediately, you
945 If the temporary file is going to be truncated immediately, you
948 can use emptyok=True as an optimization.
946 can use emptyok=True as an optimization.
949
947
950 Returns the name of the temporary file.
948 Returns the name of the temporary file.
951 """
949 """
952 d, fn = os.path.split(name)
950 d, fn = os.path.split(name)
953 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
951 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
954 os.close(fd)
952 os.close(fd)
955 # Temporary files are created with mode 0600, which is usually not
953 # Temporary files are created with mode 0600, which is usually not
956 # what we want. If the original file already exists, just copy
954 # what we want. If the original file already exists, just copy
957 # its mode. Otherwise, manually obey umask.
955 # its mode. Otherwise, manually obey umask.
958 try:
956 try:
959 st_mode = os.lstat(name).st_mode & 0777
957 st_mode = os.lstat(name).st_mode & 0777
960 except OSError, inst:
958 except OSError, inst:
961 if inst.errno != errno.ENOENT:
959 if inst.errno != errno.ENOENT:
962 raise
960 raise
963 st_mode = createmode
961 st_mode = createmode
964 if st_mode is None:
962 if st_mode is None:
965 st_mode = ~umask
963 st_mode = ~umask
966 st_mode &= 0666
964 st_mode &= 0666
967 os.chmod(temp, st_mode)
965 os.chmod(temp, st_mode)
968 if emptyok:
966 if emptyok:
969 return temp
967 return temp
970 try:
968 try:
971 try:
969 try:
972 ifp = posixfile(name, "rb")
970 ifp = posixfile(name, "rb")
973 except IOError, inst:
971 except IOError, inst:
974 if inst.errno == errno.ENOENT:
972 if inst.errno == errno.ENOENT:
975 return temp
973 return temp
976 if not getattr(inst, 'filename', None):
974 if not getattr(inst, 'filename', None):
977 inst.filename = name
975 inst.filename = name
978 raise
976 raise
979 ofp = posixfile(temp, "wb")
977 ofp = posixfile(temp, "wb")
980 for chunk in filechunkiter(ifp):
978 for chunk in filechunkiter(ifp):
981 ofp.write(chunk)
979 ofp.write(chunk)
982 ifp.close()
980 ifp.close()
983 ofp.close()
981 ofp.close()
984 except:
982 except:
985 try: os.unlink(temp)
983 try: os.unlink(temp)
986 except: pass
984 except: pass
987 raise
985 raise
988 return temp
986 return temp
989
987
990 class atomictempfile(posixfile):
988 class atomictempfile(posixfile):
991 """file-like object that atomically updates a file
989 """file-like object that atomically updates a file
992
990
993 All writes will be redirected to a temporary copy of the original
991 All writes will be redirected to a temporary copy of the original
994 file. When rename is called, the copy is renamed to the original
992 file. When rename is called, the copy is renamed to the original
995 name, making the changes visible.
993 name, making the changes visible.
996 """
994 """
997 def __init__(self, name, mode, createmode):
995 def __init__(self, name, mode, createmode):
998 self.__name = name
996 self.__name = name
999 self.temp = mktempcopy(name, emptyok=('w' in mode),
997 self.temp = mktempcopy(name, emptyok=('w' in mode),
1000 createmode=createmode)
998 createmode=createmode)
1001 posixfile.__init__(self, self.temp, mode)
999 posixfile.__init__(self, self.temp, mode)
1002
1000
1003 def rename(self):
1001 def rename(self):
1004 if not self.closed:
1002 if not self.closed:
1005 posixfile.close(self)
1003 posixfile.close(self)
1006 rename(self.temp, localpath(self.__name))
1004 rename(self.temp, localpath(self.__name))
1007
1005
1008 def __del__(self):
1006 def __del__(self):
1009 if not self.closed:
1007 if not self.closed:
1010 try:
1008 try:
1011 os.unlink(self.temp)
1009 os.unlink(self.temp)
1012 except: pass
1010 except: pass
1013 posixfile.close(self)
1011 posixfile.close(self)
1014
1012
1015 def makedirs(name, mode=None):
1013 def makedirs(name, mode=None):
1016 """recursive directory creation with parent mode inheritance"""
1014 """recursive directory creation with parent mode inheritance"""
1017 try:
1015 try:
1018 os.mkdir(name)
1016 os.mkdir(name)
1019 if mode is not None:
1017 if mode is not None:
1020 os.chmod(name, mode)
1018 os.chmod(name, mode)
1021 return
1019 return
1022 except OSError, err:
1020 except OSError, err:
1023 if err.errno == errno.EEXIST:
1021 if err.errno == errno.EEXIST:
1024 return
1022 return
1025 if err.errno != errno.ENOENT:
1023 if err.errno != errno.ENOENT:
1026 raise
1024 raise
1027 parent = os.path.abspath(os.path.dirname(name))
1025 parent = os.path.abspath(os.path.dirname(name))
1028 makedirs(parent, mode)
1026 makedirs(parent, mode)
1029 makedirs(name, mode)
1027 makedirs(name, mode)
1030
1028
1031 class opener(object):
1029 class opener(object):
1032 """Open files relative to a base directory
1030 """Open files relative to a base directory
1033
1031
1034 This class is used to hide the details of COW semantics and
1032 This class is used to hide the details of COW semantics and
1035 remote file access from higher level code.
1033 remote file access from higher level code.
1036 """
1034 """
1037 def __init__(self, base, audit=True):
1035 def __init__(self, base, audit=True):
1038 self.base = base
1036 self.base = base
1039 if audit:
1037 if audit:
1040 self.audit_path = path_auditor(base)
1038 self.audit_path = path_auditor(base)
1041 else:
1039 else:
1042 self.audit_path = always
1040 self.audit_path = always
1043 self.createmode = None
1041 self.createmode = None
1044
1042
1045 def __getattr__(self, name):
1043 def __getattr__(self, name):
1046 if name == '_can_symlink':
1044 if name == '_can_symlink':
1047 self._can_symlink = checklink(self.base)
1045 self._can_symlink = checklink(self.base)
1048 return self._can_symlink
1046 return self._can_symlink
1049 raise AttributeError(name)
1047 raise AttributeError(name)
1050
1048
1051 def _fixfilemode(self, name):
1049 def _fixfilemode(self, name):
1052 if self.createmode is None:
1050 if self.createmode is None:
1053 return
1051 return
1054 os.chmod(name, self.createmode & 0666)
1052 os.chmod(name, self.createmode & 0666)
1055
1053
1056 def __call__(self, path, mode="r", text=False, atomictemp=False):
1054 def __call__(self, path, mode="r", text=False, atomictemp=False):
1057 self.audit_path(path)
1055 self.audit_path(path)
1058 f = os.path.join(self.base, path)
1056 f = os.path.join(self.base, path)
1059
1057
1060 if not text and "b" not in mode:
1058 if not text and "b" not in mode:
1061 mode += "b" # for that other OS
1059 mode += "b" # for that other OS
1062
1060
1063 nlink = -1
1061 nlink = -1
1064 if mode not in ("r", "rb"):
1062 if mode not in ("r", "rb"):
1065 try:
1063 try:
1066 nlink = nlinks(f)
1064 nlink = nlinks(f)
1067 except OSError:
1065 except OSError:
1068 nlink = 0
1066 nlink = 0
1069 d = os.path.dirname(f)
1067 d = os.path.dirname(f)
1070 if not os.path.isdir(d):
1068 if not os.path.isdir(d):
1071 makedirs(d, self.createmode)
1069 makedirs(d, self.createmode)
1072 if atomictemp:
1070 if atomictemp:
1073 return atomictempfile(f, mode, self.createmode)
1071 return atomictempfile(f, mode, self.createmode)
1074 if nlink > 1:
1072 if nlink > 1:
1075 rename(mktempcopy(f), f)
1073 rename(mktempcopy(f), f)
1076 fp = posixfile(f, mode)
1074 fp = posixfile(f, mode)
1077 if nlink == 0:
1075 if nlink == 0:
1078 self._fixfilemode(f)
1076 self._fixfilemode(f)
1079 return fp
1077 return fp
1080
1078
1081 def symlink(self, src, dst):
1079 def symlink(self, src, dst):
1082 self.audit_path(dst)
1080 self.audit_path(dst)
1083 linkname = os.path.join(self.base, dst)
1081 linkname = os.path.join(self.base, dst)
1084 try:
1082 try:
1085 os.unlink(linkname)
1083 os.unlink(linkname)
1086 except OSError:
1084 except OSError:
1087 pass
1085 pass
1088
1086
1089 dirname = os.path.dirname(linkname)
1087 dirname = os.path.dirname(linkname)
1090 if not os.path.exists(dirname):
1088 if not os.path.exists(dirname):
1091 makedirs(dirname, self.createmode)
1089 makedirs(dirname, self.createmode)
1092
1090
1093 if self._can_symlink:
1091 if self._can_symlink:
1094 try:
1092 try:
1095 os.symlink(src, linkname)
1093 os.symlink(src, linkname)
1096 except OSError, err:
1094 except OSError, err:
1097 raise OSError(err.errno, _('could not symlink to %r: %s') %
1095 raise OSError(err.errno, _('could not symlink to %r: %s') %
1098 (src, err.strerror), linkname)
1096 (src, err.strerror), linkname)
1099 else:
1097 else:
1100 f = self(dst, "w")
1098 f = self(dst, "w")
1101 f.write(src)
1099 f.write(src)
1102 f.close()
1100 f.close()
1103 self._fixfilemode(dst)
1101 self._fixfilemode(dst)
1104
1102
1105 class chunkbuffer(object):
1103 class chunkbuffer(object):
1106 """Allow arbitrary sized chunks of data to be efficiently read from an
1104 """Allow arbitrary sized chunks of data to be efficiently read from an
1107 iterator over chunks of arbitrary size."""
1105 iterator over chunks of arbitrary size."""
1108
1106
1109 def __init__(self, in_iter):
1107 def __init__(self, in_iter):
1110 """in_iter is the iterator that's iterating over the input chunks.
1108 """in_iter is the iterator that's iterating over the input chunks.
1111 targetsize is how big a buffer to try to maintain."""
1109 targetsize is how big a buffer to try to maintain."""
1112 self.iter = iter(in_iter)
1110 self.iter = iter(in_iter)
1113 self.buf = ''
1111 self.buf = ''
1114 self.targetsize = 2**16
1112 self.targetsize = 2**16
1115
1113
1116 def read(self, l):
1114 def read(self, l):
1117 """Read L bytes of data from the iterator of chunks of data.
1115 """Read L bytes of data from the iterator of chunks of data.
1118 Returns less than L bytes if the iterator runs dry."""
1116 Returns less than L bytes if the iterator runs dry."""
1119 if l > len(self.buf) and self.iter:
1117 if l > len(self.buf) and self.iter:
1120 # Clamp to a multiple of self.targetsize
1118 # Clamp to a multiple of self.targetsize
1121 targetsize = max(l, self.targetsize)
1119 targetsize = max(l, self.targetsize)
1122 collector = cStringIO.StringIO()
1120 collector = cStringIO.StringIO()
1123 collector.write(self.buf)
1121 collector.write(self.buf)
1124 collected = len(self.buf)
1122 collected = len(self.buf)
1125 for chunk in self.iter:
1123 for chunk in self.iter:
1126 collector.write(chunk)
1124 collector.write(chunk)
1127 collected += len(chunk)
1125 collected += len(chunk)
1128 if collected >= targetsize:
1126 if collected >= targetsize:
1129 break
1127 break
1130 if collected < targetsize:
1128 if collected < targetsize:
1131 self.iter = False
1129 self.iter = False
1132 self.buf = collector.getvalue()
1130 self.buf = collector.getvalue()
1133 if len(self.buf) == l:
1131 if len(self.buf) == l:
1134 s, self.buf = str(self.buf), ''
1132 s, self.buf = str(self.buf), ''
1135 else:
1133 else:
1136 s, self.buf = self.buf[:l], buffer(self.buf, l)
1134 s, self.buf = self.buf[:l], buffer(self.buf, l)
1137 return s
1135 return s
1138
1136
1139 def filechunkiter(f, size=65536, limit=None):
1137 def filechunkiter(f, size=65536, limit=None):
1140 """Create a generator that produces the data in the file size
1138 """Create a generator that produces the data in the file size
1141 (default 65536) bytes at a time, up to optional limit (default is
1139 (default 65536) bytes at a time, up to optional limit (default is
1142 to read all data). Chunks may be less than size bytes if the
1140 to read all data). Chunks may be less than size bytes if the
1143 chunk is the last chunk in the file, or the file is a socket or
1141 chunk is the last chunk in the file, or the file is a socket or
1144 some other type of file that sometimes reads less data than is
1142 some other type of file that sometimes reads less data than is
1145 requested."""
1143 requested."""
1146 assert size >= 0
1144 assert size >= 0
1147 assert limit is None or limit >= 0
1145 assert limit is None or limit >= 0
1148 while True:
1146 while True:
1149 if limit is None: nbytes = size
1147 if limit is None: nbytes = size
1150 else: nbytes = min(limit, size)
1148 else: nbytes = min(limit, size)
1151 s = nbytes and f.read(nbytes)
1149 s = nbytes and f.read(nbytes)
1152 if not s: break
1150 if not s: break
1153 if limit: limit -= len(s)
1151 if limit: limit -= len(s)
1154 yield s
1152 yield s
1155
1153
1156 def makedate():
1154 def makedate():
1157 lt = time.localtime()
1155 lt = time.localtime()
1158 if lt[8] == 1 and time.daylight:
1156 if lt[8] == 1 and time.daylight:
1159 tz = time.altzone
1157 tz = time.altzone
1160 else:
1158 else:
1161 tz = time.timezone
1159 tz = time.timezone
1162 return time.mktime(lt), tz
1160 return time.mktime(lt), tz
1163
1161
1164 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1162 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1165 """represent a (unixtime, offset) tuple as a localized time.
1163 """represent a (unixtime, offset) tuple as a localized time.
1166 unixtime is seconds since the epoch, and offset is the time zone's
1164 unixtime is seconds since the epoch, and offset is the time zone's
1167 number of seconds away from UTC. if timezone is false, do not
1165 number of seconds away from UTC. if timezone is false, do not
1168 append time zone to string."""
1166 append time zone to string."""
1169 t, tz = date or makedate()
1167 t, tz = date or makedate()
1170 if "%1" in format or "%2" in format:
1168 if "%1" in format or "%2" in format:
1171 sign = (tz > 0) and "-" or "+"
1169 sign = (tz > 0) and "-" or "+"
1172 minutes = abs(tz) / 60
1170 minutes = abs(tz) / 60
1173 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1171 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1174 format = format.replace("%2", "%02d" % (minutes % 60))
1172 format = format.replace("%2", "%02d" % (minutes % 60))
1175 s = time.strftime(format, time.gmtime(float(t) - tz))
1173 s = time.strftime(format, time.gmtime(float(t) - tz))
1176 return s
1174 return s
1177
1175
1178 def shortdate(date=None):
1176 def shortdate(date=None):
1179 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1177 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1180 return datestr(date, format='%Y-%m-%d')
1178 return datestr(date, format='%Y-%m-%d')
1181
1179
1182 def strdate(string, format, defaults=[]):
1180 def strdate(string, format, defaults=[]):
1183 """parse a localized time string and return a (unixtime, offset) tuple.
1181 """parse a localized time string and return a (unixtime, offset) tuple.
1184 if the string cannot be parsed, ValueError is raised."""
1182 if the string cannot be parsed, ValueError is raised."""
1185 def timezone(string):
1183 def timezone(string):
1186 tz = string.split()[-1]
1184 tz = string.split()[-1]
1187 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1185 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1188 sign = (tz[0] == "+") and 1 or -1
1186 sign = (tz[0] == "+") and 1 or -1
1189 hours = int(tz[1:3])
1187 hours = int(tz[1:3])
1190 minutes = int(tz[3:5])
1188 minutes = int(tz[3:5])
1191 return -sign * (hours * 60 + minutes) * 60
1189 return -sign * (hours * 60 + minutes) * 60
1192 if tz == "GMT" or tz == "UTC":
1190 if tz == "GMT" or tz == "UTC":
1193 return 0
1191 return 0
1194 return None
1192 return None
1195
1193
1196 # NOTE: unixtime = localunixtime + offset
1194 # NOTE: unixtime = localunixtime + offset
1197 offset, date = timezone(string), string
1195 offset, date = timezone(string), string
1198 if offset != None:
1196 if offset != None:
1199 date = " ".join(string.split()[:-1])
1197 date = " ".join(string.split()[:-1])
1200
1198
1201 # add missing elements from defaults
1199 # add missing elements from defaults
1202 for part in defaults:
1200 for part in defaults:
1203 found = [True for p in part if ("%"+p) in format]
1201 found = [True for p in part if ("%"+p) in format]
1204 if not found:
1202 if not found:
1205 date += "@" + defaults[part]
1203 date += "@" + defaults[part]
1206 format += "@%" + part[0]
1204 format += "@%" + part[0]
1207
1205
1208 timetuple = time.strptime(date, format)
1206 timetuple = time.strptime(date, format)
1209 localunixtime = int(calendar.timegm(timetuple))
1207 localunixtime = int(calendar.timegm(timetuple))
1210 if offset is None:
1208 if offset is None:
1211 # local timezone
1209 # local timezone
1212 unixtime = int(time.mktime(timetuple))
1210 unixtime = int(time.mktime(timetuple))
1213 offset = unixtime - localunixtime
1211 offset = unixtime - localunixtime
1214 else:
1212 else:
1215 unixtime = localunixtime + offset
1213 unixtime = localunixtime + offset
1216 return unixtime, offset
1214 return unixtime, offset
1217
1215
1218 def parsedate(date, formats=None, defaults=None):
1216 def parsedate(date, formats=None, defaults=None):
1219 """parse a localized date/time string and return a (unixtime, offset) tuple.
1217 """parse a localized date/time string and return a (unixtime, offset) tuple.
1220
1218
1221 The date may be a "unixtime offset" string or in one of the specified
1219 The date may be a "unixtime offset" string or in one of the specified
1222 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1220 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1223 """
1221 """
1224 if not date:
1222 if not date:
1225 return 0, 0
1223 return 0, 0
1226 if isinstance(date, tuple) and len(date) == 2:
1224 if isinstance(date, tuple) and len(date) == 2:
1227 return date
1225 return date
1228 if not formats:
1226 if not formats:
1229 formats = defaultdateformats
1227 formats = defaultdateformats
1230 date = date.strip()
1228 date = date.strip()
1231 try:
1229 try:
1232 when, offset = map(int, date.split(' '))
1230 when, offset = map(int, date.split(' '))
1233 except ValueError:
1231 except ValueError:
1234 # fill out defaults
1232 # fill out defaults
1235 if not defaults:
1233 if not defaults:
1236 defaults = {}
1234 defaults = {}
1237 now = makedate()
1235 now = makedate()
1238 for part in "d mb yY HI M S".split():
1236 for part in "d mb yY HI M S".split():
1239 if part not in defaults:
1237 if part not in defaults:
1240 if part[0] in "HMS":
1238 if part[0] in "HMS":
1241 defaults[part] = "00"
1239 defaults[part] = "00"
1242 else:
1240 else:
1243 defaults[part] = datestr(now, "%" + part[0])
1241 defaults[part] = datestr(now, "%" + part[0])
1244
1242
1245 for format in formats:
1243 for format in formats:
1246 try:
1244 try:
1247 when, offset = strdate(date, format, defaults)
1245 when, offset = strdate(date, format, defaults)
1248 except (ValueError, OverflowError):
1246 except (ValueError, OverflowError):
1249 pass
1247 pass
1250 else:
1248 else:
1251 break
1249 break
1252 else:
1250 else:
1253 raise Abort(_('invalid date: %r ') % date)
1251 raise Abort(_('invalid date: %r ') % date)
1254 # validate explicit (probably user-specified) date and
1252 # validate explicit (probably user-specified) date and
1255 # time zone offset. values must fit in signed 32 bits for
1253 # time zone offset. values must fit in signed 32 bits for
1256 # current 32-bit linux runtimes. timezones go from UTC-12
1254 # current 32-bit linux runtimes. timezones go from UTC-12
1257 # to UTC+14
1255 # to UTC+14
1258 if abs(when) > 0x7fffffff:
1256 if abs(when) > 0x7fffffff:
1259 raise Abort(_('date exceeds 32 bits: %d') % when)
1257 raise Abort(_('date exceeds 32 bits: %d') % when)
1260 if offset < -50400 or offset > 43200:
1258 if offset < -50400 or offset > 43200:
1261 raise Abort(_('impossible time zone offset: %d') % offset)
1259 raise Abort(_('impossible time zone offset: %d') % offset)
1262 return when, offset
1260 return when, offset
1263
1261
1264 def matchdate(date):
1262 def matchdate(date):
1265 """Return a function that matches a given date match specifier
1263 """Return a function that matches a given date match specifier
1266
1264
1267 Formats include:
1265 Formats include:
1268
1266
1269 '{date}' match a given date to the accuracy provided
1267 '{date}' match a given date to the accuracy provided
1270
1268
1271 '<{date}' on or before a given date
1269 '<{date}' on or before a given date
1272
1270
1273 '>{date}' on or after a given date
1271 '>{date}' on or after a given date
1274
1272
1275 """
1273 """
1276
1274
1277 def lower(date):
1275 def lower(date):
1278 d = dict(mb="1", d="1")
1276 d = dict(mb="1", d="1")
1279 return parsedate(date, extendeddateformats, d)[0]
1277 return parsedate(date, extendeddateformats, d)[0]
1280
1278
1281 def upper(date):
1279 def upper(date):
1282 d = dict(mb="12", HI="23", M="59", S="59")
1280 d = dict(mb="12", HI="23", M="59", S="59")
1283 for days in "31 30 29".split():
1281 for days in "31 30 29".split():
1284 try:
1282 try:
1285 d["d"] = days
1283 d["d"] = days
1286 return parsedate(date, extendeddateformats, d)[0]
1284 return parsedate(date, extendeddateformats, d)[0]
1287 except:
1285 except:
1288 pass
1286 pass
1289 d["d"] = "28"
1287 d["d"] = "28"
1290 return parsedate(date, extendeddateformats, d)[0]
1288 return parsedate(date, extendeddateformats, d)[0]
1291
1289
1292 date = date.strip()
1290 date = date.strip()
1293 if date[0] == "<":
1291 if date[0] == "<":
1294 when = upper(date[1:])
1292 when = upper(date[1:])
1295 return lambda x: x <= when
1293 return lambda x: x <= when
1296 elif date[0] == ">":
1294 elif date[0] == ">":
1297 when = lower(date[1:])
1295 when = lower(date[1:])
1298 return lambda x: x >= when
1296 return lambda x: x >= when
1299 elif date[0] == "-":
1297 elif date[0] == "-":
1300 try:
1298 try:
1301 days = int(date[1:])
1299 days = int(date[1:])
1302 except ValueError:
1300 except ValueError:
1303 raise Abort(_("invalid day spec: %s") % date[1:])
1301 raise Abort(_("invalid day spec: %s") % date[1:])
1304 when = makedate()[0] - days * 3600 * 24
1302 when = makedate()[0] - days * 3600 * 24
1305 return lambda x: x >= when
1303 return lambda x: x >= when
1306 elif " to " in date:
1304 elif " to " in date:
1307 a, b = date.split(" to ")
1305 a, b = date.split(" to ")
1308 start, stop = lower(a), upper(b)
1306 start, stop = lower(a), upper(b)
1309 return lambda x: x >= start and x <= stop
1307 return lambda x: x >= start and x <= stop
1310 else:
1308 else:
1311 start, stop = lower(date), upper(date)
1309 start, stop = lower(date), upper(date)
1312 return lambda x: x >= start and x <= stop
1310 return lambda x: x >= start and x <= stop
1313
1311
1314 def shortuser(user):
1312 def shortuser(user):
1315 """Return a short representation of a user name or email address."""
1313 """Return a short representation of a user name or email address."""
1316 f = user.find('@')
1314 f = user.find('@')
1317 if f >= 0:
1315 if f >= 0:
1318 user = user[:f]
1316 user = user[:f]
1319 f = user.find('<')
1317 f = user.find('<')
1320 if f >= 0:
1318 if f >= 0:
1321 user = user[f+1:]
1319 user = user[f+1:]
1322 f = user.find(' ')
1320 f = user.find(' ')
1323 if f >= 0:
1321 if f >= 0:
1324 user = user[:f]
1322 user = user[:f]
1325 f = user.find('.')
1323 f = user.find('.')
1326 if f >= 0:
1324 if f >= 0:
1327 user = user[:f]
1325 user = user[:f]
1328 return user
1326 return user
1329
1327
1330 def email(author):
1328 def email(author):
1331 '''get email of author.'''
1329 '''get email of author.'''
1332 r = author.find('>')
1330 r = author.find('>')
1333 if r == -1: r = None
1331 if r == -1: r = None
1334 return author[author.find('<')+1:r]
1332 return author[author.find('<')+1:r]
1335
1333
1336 def ellipsis(text, maxlength=400):
1334 def ellipsis(text, maxlength=400):
1337 """Trim string to at most maxlength (default: 400) characters."""
1335 """Trim string to at most maxlength (default: 400) characters."""
1338 if len(text) <= maxlength:
1336 if len(text) <= maxlength:
1339 return text
1337 return text
1340 else:
1338 else:
1341 return "%s..." % (text[:maxlength-3])
1339 return "%s..." % (text[:maxlength-3])
1342
1340
1343 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1341 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1344 '''yield every hg repository under path, recursively.'''
1342 '''yield every hg repository under path, recursively.'''
1345 def errhandler(err):
1343 def errhandler(err):
1346 if err.filename == path:
1344 if err.filename == path:
1347 raise err
1345 raise err
1348 if followsym and hasattr(os.path, 'samestat'):
1346 if followsym and hasattr(os.path, 'samestat'):
1349 def _add_dir_if_not_there(dirlst, dirname):
1347 def _add_dir_if_not_there(dirlst, dirname):
1350 match = False
1348 match = False
1351 samestat = os.path.samestat
1349 samestat = os.path.samestat
1352 dirstat = os.stat(dirname)
1350 dirstat = os.stat(dirname)
1353 for lstdirstat in dirlst:
1351 for lstdirstat in dirlst:
1354 if samestat(dirstat, lstdirstat):
1352 if samestat(dirstat, lstdirstat):
1355 match = True
1353 match = True
1356 break
1354 break
1357 if not match:
1355 if not match:
1358 dirlst.append(dirstat)
1356 dirlst.append(dirstat)
1359 return not match
1357 return not match
1360 else:
1358 else:
1361 followsym = False
1359 followsym = False
1362
1360
1363 if (seen_dirs is None) and followsym:
1361 if (seen_dirs is None) and followsym:
1364 seen_dirs = []
1362 seen_dirs = []
1365 _add_dir_if_not_there(seen_dirs, path)
1363 _add_dir_if_not_there(seen_dirs, path)
1366 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1364 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1367 if '.hg' in dirs:
1365 if '.hg' in dirs:
1368 yield root # found a repository
1366 yield root # found a repository
1369 qroot = os.path.join(root, '.hg', 'patches')
1367 qroot = os.path.join(root, '.hg', 'patches')
1370 if os.path.isdir(os.path.join(qroot, '.hg')):
1368 if os.path.isdir(os.path.join(qroot, '.hg')):
1371 yield qroot # we have a patch queue repo here
1369 yield qroot # we have a patch queue repo here
1372 if recurse:
1370 if recurse:
1373 # avoid recursing inside the .hg directory
1371 # avoid recursing inside the .hg directory
1374 dirs.remove('.hg')
1372 dirs.remove('.hg')
1375 else:
1373 else:
1376 dirs[:] = [] # don't descend further
1374 dirs[:] = [] # don't descend further
1377 elif followsym:
1375 elif followsym:
1378 newdirs = []
1376 newdirs = []
1379 for d in dirs:
1377 for d in dirs:
1380 fname = os.path.join(root, d)
1378 fname = os.path.join(root, d)
1381 if _add_dir_if_not_there(seen_dirs, fname):
1379 if _add_dir_if_not_there(seen_dirs, fname):
1382 if os.path.islink(fname):
1380 if os.path.islink(fname):
1383 for hgname in walkrepos(fname, True, seen_dirs):
1381 for hgname in walkrepos(fname, True, seen_dirs):
1384 yield hgname
1382 yield hgname
1385 else:
1383 else:
1386 newdirs.append(d)
1384 newdirs.append(d)
1387 dirs[:] = newdirs
1385 dirs[:] = newdirs
1388
1386
1389 _rcpath = None
1387 _rcpath = None
1390
1388
1391 def os_rcpath():
1389 def os_rcpath():
1392 '''return default os-specific hgrc search path'''
1390 '''return default os-specific hgrc search path'''
1393 path = system_rcpath()
1391 path = system_rcpath()
1394 path.extend(user_rcpath())
1392 path.extend(user_rcpath())
1395 path = [os.path.normpath(f) for f in path]
1393 path = [os.path.normpath(f) for f in path]
1396 return path
1394 return path
1397
1395
1398 def rcpath():
1396 def rcpath():
1399 '''return hgrc search path. if env var HGRCPATH is set, use it.
1397 '''return hgrc search path. if env var HGRCPATH is set, use it.
1400 for each item in path, if directory, use files ending in .rc,
1398 for each item in path, if directory, use files ending in .rc,
1401 else use item.
1399 else use item.
1402 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1400 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1403 if no HGRCPATH, use default os-specific path.'''
1401 if no HGRCPATH, use default os-specific path.'''
1404 global _rcpath
1402 global _rcpath
1405 if _rcpath is None:
1403 if _rcpath is None:
1406 if 'HGRCPATH' in os.environ:
1404 if 'HGRCPATH' in os.environ:
1407 _rcpath = []
1405 _rcpath = []
1408 for p in os.environ['HGRCPATH'].split(os.pathsep):
1406 for p in os.environ['HGRCPATH'].split(os.pathsep):
1409 if not p: continue
1407 if not p: continue
1410 if os.path.isdir(p):
1408 if os.path.isdir(p):
1411 for f, kind in osutil.listdir(p):
1409 for f, kind in osutil.listdir(p):
1412 if f.endswith('.rc'):
1410 if f.endswith('.rc'):
1413 _rcpath.append(os.path.join(p, f))
1411 _rcpath.append(os.path.join(p, f))
1414 else:
1412 else:
1415 _rcpath.append(p)
1413 _rcpath.append(p)
1416 else:
1414 else:
1417 _rcpath = os_rcpath()
1415 _rcpath = os_rcpath()
1418 return _rcpath
1416 return _rcpath
1419
1417
1420 def bytecount(nbytes):
1418 def bytecount(nbytes):
1421 '''return byte count formatted as readable string, with units'''
1419 '''return byte count formatted as readable string, with units'''
1422
1420
1423 units = (
1421 units = (
1424 (100, 1<<30, _('%.0f GB')),
1422 (100, 1<<30, _('%.0f GB')),
1425 (10, 1<<30, _('%.1f GB')),
1423 (10, 1<<30, _('%.1f GB')),
1426 (1, 1<<30, _('%.2f GB')),
1424 (1, 1<<30, _('%.2f GB')),
1427 (100, 1<<20, _('%.0f MB')),
1425 (100, 1<<20, _('%.0f MB')),
1428 (10, 1<<20, _('%.1f MB')),
1426 (10, 1<<20, _('%.1f MB')),
1429 (1, 1<<20, _('%.2f MB')),
1427 (1, 1<<20, _('%.2f MB')),
1430 (100, 1<<10, _('%.0f KB')),
1428 (100, 1<<10, _('%.0f KB')),
1431 (10, 1<<10, _('%.1f KB')),
1429 (10, 1<<10, _('%.1f KB')),
1432 (1, 1<<10, _('%.2f KB')),
1430 (1, 1<<10, _('%.2f KB')),
1433 (1, 1, _('%.0f bytes')),
1431 (1, 1, _('%.0f bytes')),
1434 )
1432 )
1435
1433
1436 for multiplier, divisor, format in units:
1434 for multiplier, divisor, format in units:
1437 if nbytes >= divisor * multiplier:
1435 if nbytes >= divisor * multiplier:
1438 return format % (nbytes / float(divisor))
1436 return format % (nbytes / float(divisor))
1439 return units[-1][2] % nbytes
1437 return units[-1][2] % nbytes
1440
1438
1441 def drop_scheme(scheme, path):
1439 def drop_scheme(scheme, path):
1442 sc = scheme + ':'
1440 sc = scheme + ':'
1443 if path.startswith(sc):
1441 if path.startswith(sc):
1444 path = path[len(sc):]
1442 path = path[len(sc):]
1445 if path.startswith('//'):
1443 if path.startswith('//'):
1446 path = path[2:]
1444 path = path[2:]
1447 return path
1445 return path
1448
1446
1449 def uirepr(s):
1447 def uirepr(s):
1450 # Avoid double backslash in Windows path repr()
1448 # Avoid double backslash in Windows path repr()
1451 return repr(s).replace('\\\\', '\\')
1449 return repr(s).replace('\\\\', '\\')
1452
1450
1453 def termwidth():
1451 def termwidth():
1454 if 'COLUMNS' in os.environ:
1452 if 'COLUMNS' in os.environ:
1455 try:
1453 try:
1456 return int(os.environ['COLUMNS'])
1454 return int(os.environ['COLUMNS'])
1457 except ValueError:
1455 except ValueError:
1458 pass
1456 pass
1459 try:
1457 try:
1460 import termios, array, fcntl
1458 import termios, array, fcntl
1461 for dev in (sys.stdout, sys.stdin):
1459 for dev in (sys.stdout, sys.stdin):
1462 try:
1460 try:
1463 fd = dev.fileno()
1461 fd = dev.fileno()
1464 if not os.isatty(fd):
1462 if not os.isatty(fd):
1465 continue
1463 continue
1466 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1464 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1467 return array.array('h', arri)[1]
1465 return array.array('h', arri)[1]
1468 except ValueError:
1466 except ValueError:
1469 pass
1467 pass
1470 except ImportError:
1468 except ImportError:
1471 pass
1469 pass
1472 return 80
1470 return 80
1473
1471
1474 def iterlines(iterator):
1472 def iterlines(iterator):
1475 for chunk in iterator:
1473 for chunk in iterator:
1476 for line in chunk.splitlines():
1474 for line in chunk.splitlines():
1477 yield line
1475 yield line
@@ -1,379 +1,377 b''
1 '''
1 # win32.py - utility functions that use win32 API
2 win32.py - utility functions that use win32 API
2 #
3
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
5
5 # This software may be used and distributed according to the terms of the
6 This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
7 GNU General Public License version 2, incorporated herein by reference.
7 #
8
8 # Mark Hammond's win32all package allows better functionality on
9 Mark Hammond's win32all package allows better functionality on
9 # Windows. this module overrides definitions in util.py. if not
10 Windows. this module overrides definitions in util.py. if not
10 # available, import of this module will fail, and generic code will be
11 available, import of this module will fail, and generic code will be
11 # used.
12 used.
13 '''
14
12
15 import win32api
13 import win32api
16
14
17 import errno, os, sys, pywintypes, win32con, win32file, win32process
15 import errno, os, sys, pywintypes, win32con, win32file, win32process
18 import cStringIO, winerror
16 import cStringIO, winerror
19 import osutil, encoding
17 import osutil, encoding
20 import util
18 import util
21 from win32com.shell import shell,shellcon
19 from win32com.shell import shell,shellcon
22
20
23 class WinError(Exception):
21 class WinError(Exception):
24 winerror_map = {
22 winerror_map = {
25 winerror.ERROR_ACCESS_DENIED: errno.EACCES,
23 winerror.ERROR_ACCESS_DENIED: errno.EACCES,
26 winerror.ERROR_ACCOUNT_DISABLED: errno.EACCES,
24 winerror.ERROR_ACCOUNT_DISABLED: errno.EACCES,
27 winerror.ERROR_ACCOUNT_RESTRICTION: errno.EACCES,
25 winerror.ERROR_ACCOUNT_RESTRICTION: errno.EACCES,
28 winerror.ERROR_ALREADY_ASSIGNED: errno.EBUSY,
26 winerror.ERROR_ALREADY_ASSIGNED: errno.EBUSY,
29 winerror.ERROR_ALREADY_EXISTS: errno.EEXIST,
27 winerror.ERROR_ALREADY_EXISTS: errno.EEXIST,
30 winerror.ERROR_ARITHMETIC_OVERFLOW: errno.ERANGE,
28 winerror.ERROR_ARITHMETIC_OVERFLOW: errno.ERANGE,
31 winerror.ERROR_BAD_COMMAND: errno.EIO,
29 winerror.ERROR_BAD_COMMAND: errno.EIO,
32 winerror.ERROR_BAD_DEVICE: errno.ENODEV,
30 winerror.ERROR_BAD_DEVICE: errno.ENODEV,
33 winerror.ERROR_BAD_DRIVER_LEVEL: errno.ENXIO,
31 winerror.ERROR_BAD_DRIVER_LEVEL: errno.ENXIO,
34 winerror.ERROR_BAD_EXE_FORMAT: errno.ENOEXEC,
32 winerror.ERROR_BAD_EXE_FORMAT: errno.ENOEXEC,
35 winerror.ERROR_BAD_FORMAT: errno.ENOEXEC,
33 winerror.ERROR_BAD_FORMAT: errno.ENOEXEC,
36 winerror.ERROR_BAD_LENGTH: errno.EINVAL,
34 winerror.ERROR_BAD_LENGTH: errno.EINVAL,
37 winerror.ERROR_BAD_PATHNAME: errno.ENOENT,
35 winerror.ERROR_BAD_PATHNAME: errno.ENOENT,
38 winerror.ERROR_BAD_PIPE: errno.EPIPE,
36 winerror.ERROR_BAD_PIPE: errno.EPIPE,
39 winerror.ERROR_BAD_UNIT: errno.ENODEV,
37 winerror.ERROR_BAD_UNIT: errno.ENODEV,
40 winerror.ERROR_BAD_USERNAME: errno.EINVAL,
38 winerror.ERROR_BAD_USERNAME: errno.EINVAL,
41 winerror.ERROR_BROKEN_PIPE: errno.EPIPE,
39 winerror.ERROR_BROKEN_PIPE: errno.EPIPE,
42 winerror.ERROR_BUFFER_OVERFLOW: errno.ENAMETOOLONG,
40 winerror.ERROR_BUFFER_OVERFLOW: errno.ENAMETOOLONG,
43 winerror.ERROR_BUSY: errno.EBUSY,
41 winerror.ERROR_BUSY: errno.EBUSY,
44 winerror.ERROR_BUSY_DRIVE: errno.EBUSY,
42 winerror.ERROR_BUSY_DRIVE: errno.EBUSY,
45 winerror.ERROR_CALL_NOT_IMPLEMENTED: errno.ENOSYS,
43 winerror.ERROR_CALL_NOT_IMPLEMENTED: errno.ENOSYS,
46 winerror.ERROR_CANNOT_MAKE: errno.EACCES,
44 winerror.ERROR_CANNOT_MAKE: errno.EACCES,
47 winerror.ERROR_CANTOPEN: errno.EIO,
45 winerror.ERROR_CANTOPEN: errno.EIO,
48 winerror.ERROR_CANTREAD: errno.EIO,
46 winerror.ERROR_CANTREAD: errno.EIO,
49 winerror.ERROR_CANTWRITE: errno.EIO,
47 winerror.ERROR_CANTWRITE: errno.EIO,
50 winerror.ERROR_CRC: errno.EIO,
48 winerror.ERROR_CRC: errno.EIO,
51 winerror.ERROR_CURRENT_DIRECTORY: errno.EACCES,
49 winerror.ERROR_CURRENT_DIRECTORY: errno.EACCES,
52 winerror.ERROR_DEVICE_IN_USE: errno.EBUSY,
50 winerror.ERROR_DEVICE_IN_USE: errno.EBUSY,
53 winerror.ERROR_DEV_NOT_EXIST: errno.ENODEV,
51 winerror.ERROR_DEV_NOT_EXIST: errno.ENODEV,
54 winerror.ERROR_DIRECTORY: errno.EINVAL,
52 winerror.ERROR_DIRECTORY: errno.EINVAL,
55 winerror.ERROR_DIR_NOT_EMPTY: errno.ENOTEMPTY,
53 winerror.ERROR_DIR_NOT_EMPTY: errno.ENOTEMPTY,
56 winerror.ERROR_DISK_CHANGE: errno.EIO,
54 winerror.ERROR_DISK_CHANGE: errno.EIO,
57 winerror.ERROR_DISK_FULL: errno.ENOSPC,
55 winerror.ERROR_DISK_FULL: errno.ENOSPC,
58 winerror.ERROR_DRIVE_LOCKED: errno.EBUSY,
56 winerror.ERROR_DRIVE_LOCKED: errno.EBUSY,
59 winerror.ERROR_ENVVAR_NOT_FOUND: errno.EINVAL,
57 winerror.ERROR_ENVVAR_NOT_FOUND: errno.EINVAL,
60 winerror.ERROR_EXE_MARKED_INVALID: errno.ENOEXEC,
58 winerror.ERROR_EXE_MARKED_INVALID: errno.ENOEXEC,
61 winerror.ERROR_FILENAME_EXCED_RANGE: errno.ENAMETOOLONG,
59 winerror.ERROR_FILENAME_EXCED_RANGE: errno.ENAMETOOLONG,
62 winerror.ERROR_FILE_EXISTS: errno.EEXIST,
60 winerror.ERROR_FILE_EXISTS: errno.EEXIST,
63 winerror.ERROR_FILE_INVALID: errno.ENODEV,
61 winerror.ERROR_FILE_INVALID: errno.ENODEV,
64 winerror.ERROR_FILE_NOT_FOUND: errno.ENOENT,
62 winerror.ERROR_FILE_NOT_FOUND: errno.ENOENT,
65 winerror.ERROR_GEN_FAILURE: errno.EIO,
63 winerror.ERROR_GEN_FAILURE: errno.EIO,
66 winerror.ERROR_HANDLE_DISK_FULL: errno.ENOSPC,
64 winerror.ERROR_HANDLE_DISK_FULL: errno.ENOSPC,
67 winerror.ERROR_INSUFFICIENT_BUFFER: errno.ENOMEM,
65 winerror.ERROR_INSUFFICIENT_BUFFER: errno.ENOMEM,
68 winerror.ERROR_INVALID_ACCESS: errno.EACCES,
66 winerror.ERROR_INVALID_ACCESS: errno.EACCES,
69 winerror.ERROR_INVALID_ADDRESS: errno.EFAULT,
67 winerror.ERROR_INVALID_ADDRESS: errno.EFAULT,
70 winerror.ERROR_INVALID_BLOCK: errno.EFAULT,
68 winerror.ERROR_INVALID_BLOCK: errno.EFAULT,
71 winerror.ERROR_INVALID_DATA: errno.EINVAL,
69 winerror.ERROR_INVALID_DATA: errno.EINVAL,
72 winerror.ERROR_INVALID_DRIVE: errno.ENODEV,
70 winerror.ERROR_INVALID_DRIVE: errno.ENODEV,
73 winerror.ERROR_INVALID_EXE_SIGNATURE: errno.ENOEXEC,
71 winerror.ERROR_INVALID_EXE_SIGNATURE: errno.ENOEXEC,
74 winerror.ERROR_INVALID_FLAGS: errno.EINVAL,
72 winerror.ERROR_INVALID_FLAGS: errno.EINVAL,
75 winerror.ERROR_INVALID_FUNCTION: errno.ENOSYS,
73 winerror.ERROR_INVALID_FUNCTION: errno.ENOSYS,
76 winerror.ERROR_INVALID_HANDLE: errno.EBADF,
74 winerror.ERROR_INVALID_HANDLE: errno.EBADF,
77 winerror.ERROR_INVALID_LOGON_HOURS: errno.EACCES,
75 winerror.ERROR_INVALID_LOGON_HOURS: errno.EACCES,
78 winerror.ERROR_INVALID_NAME: errno.EINVAL,
76 winerror.ERROR_INVALID_NAME: errno.EINVAL,
79 winerror.ERROR_INVALID_OWNER: errno.EINVAL,
77 winerror.ERROR_INVALID_OWNER: errno.EINVAL,
80 winerror.ERROR_INVALID_PARAMETER: errno.EINVAL,
78 winerror.ERROR_INVALID_PARAMETER: errno.EINVAL,
81 winerror.ERROR_INVALID_PASSWORD: errno.EPERM,
79 winerror.ERROR_INVALID_PASSWORD: errno.EPERM,
82 winerror.ERROR_INVALID_PRIMARY_GROUP: errno.EINVAL,
80 winerror.ERROR_INVALID_PRIMARY_GROUP: errno.EINVAL,
83 winerror.ERROR_INVALID_SIGNAL_NUMBER: errno.EINVAL,
81 winerror.ERROR_INVALID_SIGNAL_NUMBER: errno.EINVAL,
84 winerror.ERROR_INVALID_TARGET_HANDLE: errno.EIO,
82 winerror.ERROR_INVALID_TARGET_HANDLE: errno.EIO,
85 winerror.ERROR_INVALID_WORKSTATION: errno.EACCES,
83 winerror.ERROR_INVALID_WORKSTATION: errno.EACCES,
86 winerror.ERROR_IO_DEVICE: errno.EIO,
84 winerror.ERROR_IO_DEVICE: errno.EIO,
87 winerror.ERROR_IO_INCOMPLETE: errno.EINTR,
85 winerror.ERROR_IO_INCOMPLETE: errno.EINTR,
88 winerror.ERROR_LOCKED: errno.EBUSY,
86 winerror.ERROR_LOCKED: errno.EBUSY,
89 winerror.ERROR_LOCK_VIOLATION: errno.EACCES,
87 winerror.ERROR_LOCK_VIOLATION: errno.EACCES,
90 winerror.ERROR_LOGON_FAILURE: errno.EACCES,
88 winerror.ERROR_LOGON_FAILURE: errno.EACCES,
91 winerror.ERROR_MAPPED_ALIGNMENT: errno.EINVAL,
89 winerror.ERROR_MAPPED_ALIGNMENT: errno.EINVAL,
92 winerror.ERROR_META_EXPANSION_TOO_LONG: errno.E2BIG,
90 winerror.ERROR_META_EXPANSION_TOO_LONG: errno.E2BIG,
93 winerror.ERROR_MORE_DATA: errno.EPIPE,
91 winerror.ERROR_MORE_DATA: errno.EPIPE,
94 winerror.ERROR_NEGATIVE_SEEK: errno.ESPIPE,
92 winerror.ERROR_NEGATIVE_SEEK: errno.ESPIPE,
95 winerror.ERROR_NOACCESS: errno.EFAULT,
93 winerror.ERROR_NOACCESS: errno.EFAULT,
96 winerror.ERROR_NONE_MAPPED: errno.EINVAL,
94 winerror.ERROR_NONE_MAPPED: errno.EINVAL,
97 winerror.ERROR_NOT_ENOUGH_MEMORY: errno.ENOMEM,
95 winerror.ERROR_NOT_ENOUGH_MEMORY: errno.ENOMEM,
98 winerror.ERROR_NOT_READY: errno.EAGAIN,
96 winerror.ERROR_NOT_READY: errno.EAGAIN,
99 winerror.ERROR_NOT_SAME_DEVICE: errno.EXDEV,
97 winerror.ERROR_NOT_SAME_DEVICE: errno.EXDEV,
100 winerror.ERROR_NO_DATA: errno.EPIPE,
98 winerror.ERROR_NO_DATA: errno.EPIPE,
101 winerror.ERROR_NO_MORE_SEARCH_HANDLES: errno.EIO,
99 winerror.ERROR_NO_MORE_SEARCH_HANDLES: errno.EIO,
102 winerror.ERROR_NO_PROC_SLOTS: errno.EAGAIN,
100 winerror.ERROR_NO_PROC_SLOTS: errno.EAGAIN,
103 winerror.ERROR_NO_SUCH_PRIVILEGE: errno.EACCES,
101 winerror.ERROR_NO_SUCH_PRIVILEGE: errno.EACCES,
104 winerror.ERROR_OPEN_FAILED: errno.EIO,
102 winerror.ERROR_OPEN_FAILED: errno.EIO,
105 winerror.ERROR_OPEN_FILES: errno.EBUSY,
103 winerror.ERROR_OPEN_FILES: errno.EBUSY,
106 winerror.ERROR_OPERATION_ABORTED: errno.EINTR,
104 winerror.ERROR_OPERATION_ABORTED: errno.EINTR,
107 winerror.ERROR_OUTOFMEMORY: errno.ENOMEM,
105 winerror.ERROR_OUTOFMEMORY: errno.ENOMEM,
108 winerror.ERROR_PASSWORD_EXPIRED: errno.EACCES,
106 winerror.ERROR_PASSWORD_EXPIRED: errno.EACCES,
109 winerror.ERROR_PATH_BUSY: errno.EBUSY,
107 winerror.ERROR_PATH_BUSY: errno.EBUSY,
110 winerror.ERROR_PATH_NOT_FOUND: errno.ENOENT,
108 winerror.ERROR_PATH_NOT_FOUND: errno.ENOENT,
111 winerror.ERROR_PIPE_BUSY: errno.EBUSY,
109 winerror.ERROR_PIPE_BUSY: errno.EBUSY,
112 winerror.ERROR_PIPE_CONNECTED: errno.EPIPE,
110 winerror.ERROR_PIPE_CONNECTED: errno.EPIPE,
113 winerror.ERROR_PIPE_LISTENING: errno.EPIPE,
111 winerror.ERROR_PIPE_LISTENING: errno.EPIPE,
114 winerror.ERROR_PIPE_NOT_CONNECTED: errno.EPIPE,
112 winerror.ERROR_PIPE_NOT_CONNECTED: errno.EPIPE,
115 winerror.ERROR_PRIVILEGE_NOT_HELD: errno.EACCES,
113 winerror.ERROR_PRIVILEGE_NOT_HELD: errno.EACCES,
116 winerror.ERROR_READ_FAULT: errno.EIO,
114 winerror.ERROR_READ_FAULT: errno.EIO,
117 winerror.ERROR_SEEK: errno.EIO,
115 winerror.ERROR_SEEK: errno.EIO,
118 winerror.ERROR_SEEK_ON_DEVICE: errno.ESPIPE,
116 winerror.ERROR_SEEK_ON_DEVICE: errno.ESPIPE,
119 winerror.ERROR_SHARING_BUFFER_EXCEEDED: errno.ENFILE,
117 winerror.ERROR_SHARING_BUFFER_EXCEEDED: errno.ENFILE,
120 winerror.ERROR_SHARING_VIOLATION: errno.EACCES,
118 winerror.ERROR_SHARING_VIOLATION: errno.EACCES,
121 winerror.ERROR_STACK_OVERFLOW: errno.ENOMEM,
119 winerror.ERROR_STACK_OVERFLOW: errno.ENOMEM,
122 winerror.ERROR_SWAPERROR: errno.ENOENT,
120 winerror.ERROR_SWAPERROR: errno.ENOENT,
123 winerror.ERROR_TOO_MANY_MODULES: errno.EMFILE,
121 winerror.ERROR_TOO_MANY_MODULES: errno.EMFILE,
124 winerror.ERROR_TOO_MANY_OPEN_FILES: errno.EMFILE,
122 winerror.ERROR_TOO_MANY_OPEN_FILES: errno.EMFILE,
125 winerror.ERROR_UNRECOGNIZED_MEDIA: errno.ENXIO,
123 winerror.ERROR_UNRECOGNIZED_MEDIA: errno.ENXIO,
126 winerror.ERROR_UNRECOGNIZED_VOLUME: errno.ENODEV,
124 winerror.ERROR_UNRECOGNIZED_VOLUME: errno.ENODEV,
127 winerror.ERROR_WAIT_NO_CHILDREN: errno.ECHILD,
125 winerror.ERROR_WAIT_NO_CHILDREN: errno.ECHILD,
128 winerror.ERROR_WRITE_FAULT: errno.EIO,
126 winerror.ERROR_WRITE_FAULT: errno.EIO,
129 winerror.ERROR_WRITE_PROTECT: errno.EROFS,
127 winerror.ERROR_WRITE_PROTECT: errno.EROFS,
130 }
128 }
131
129
132 def __init__(self, err):
130 def __init__(self, err):
133 self.win_errno, self.win_function, self.win_strerror = err
131 self.win_errno, self.win_function, self.win_strerror = err
134 if self.win_strerror.endswith('.'):
132 if self.win_strerror.endswith('.'):
135 self.win_strerror = self.win_strerror[:-1]
133 self.win_strerror = self.win_strerror[:-1]
136
134
137 class WinIOError(WinError, IOError):
135 class WinIOError(WinError, IOError):
138 def __init__(self, err, filename=None):
136 def __init__(self, err, filename=None):
139 WinError.__init__(self, err)
137 WinError.__init__(self, err)
140 IOError.__init__(self, self.winerror_map.get(self.win_errno, 0),
138 IOError.__init__(self, self.winerror_map.get(self.win_errno, 0),
141 self.win_strerror)
139 self.win_strerror)
142 self.filename = filename
140 self.filename = filename
143
141
144 class WinOSError(WinError, OSError):
142 class WinOSError(WinError, OSError):
145 def __init__(self, err):
143 def __init__(self, err):
146 WinError.__init__(self, err)
144 WinError.__init__(self, err)
147 OSError.__init__(self, self.winerror_map.get(self.win_errno, 0),
145 OSError.__init__(self, self.winerror_map.get(self.win_errno, 0),
148 self.win_strerror)
146 self.win_strerror)
149
147
150 def os_link(src, dst):
148 def os_link(src, dst):
151 try:
149 try:
152 win32file.CreateHardLink(dst, src)
150 win32file.CreateHardLink(dst, src)
153 # CreateHardLink sometimes succeeds on mapped drives but
151 # CreateHardLink sometimes succeeds on mapped drives but
154 # following nlinks() returns 1. Check it now and bail out.
152 # following nlinks() returns 1. Check it now and bail out.
155 if nlinks(src) < 2:
153 if nlinks(src) < 2:
156 try:
154 try:
157 win32file.DeleteFile(dst)
155 win32file.DeleteFile(dst)
158 except:
156 except:
159 pass
157 pass
160 # Fake hardlinking error
158 # Fake hardlinking error
161 raise WinOSError((18, 'CreateHardLink', 'The system cannot '
159 raise WinOSError((18, 'CreateHardLink', 'The system cannot '
162 'move the file to a different disk drive'))
160 'move the file to a different disk drive'))
163 except pywintypes.error, details:
161 except pywintypes.error, details:
164 raise WinOSError(details)
162 raise WinOSError(details)
165 except NotImplementedError: # Another fake error win Win98
163 except NotImplementedError: # Another fake error win Win98
166 raise WinOSError((18, 'CreateHardLink', 'Hardlinking not supported'))
164 raise WinOSError((18, 'CreateHardLink', 'Hardlinking not supported'))
167
165
168 def nlinks(pathname):
166 def nlinks(pathname):
169 """Return number of hardlinks for the given file."""
167 """Return number of hardlinks for the given file."""
170 try:
168 try:
171 fh = win32file.CreateFile(pathname,
169 fh = win32file.CreateFile(pathname,
172 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
170 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
173 None, win32file.OPEN_EXISTING, 0, None)
171 None, win32file.OPEN_EXISTING, 0, None)
174 res = win32file.GetFileInformationByHandle(fh)
172 res = win32file.GetFileInformationByHandle(fh)
175 fh.Close()
173 fh.Close()
176 return res[7]
174 return res[7]
177 except pywintypes.error:
175 except pywintypes.error:
178 return os.lstat(pathname).st_nlink
176 return os.lstat(pathname).st_nlink
179
177
180 def testpid(pid):
178 def testpid(pid):
181 '''return True if pid is still running or unable to
179 '''return True if pid is still running or unable to
182 determine, False otherwise'''
180 determine, False otherwise'''
183 try:
181 try:
184 handle = win32api.OpenProcess(
182 handle = win32api.OpenProcess(
185 win32con.PROCESS_QUERY_INFORMATION, False, pid)
183 win32con.PROCESS_QUERY_INFORMATION, False, pid)
186 if handle:
184 if handle:
187 status = win32process.GetExitCodeProcess(handle)
185 status = win32process.GetExitCodeProcess(handle)
188 return status == win32con.STILL_ACTIVE
186 return status == win32con.STILL_ACTIVE
189 except pywintypes.error, details:
187 except pywintypes.error, details:
190 return details[0] != winerror.ERROR_INVALID_PARAMETER
188 return details[0] != winerror.ERROR_INVALID_PARAMETER
191 return True
189 return True
192
190
193 def lookup_reg(key, valname=None, scope=None):
191 def lookup_reg(key, valname=None, scope=None):
194 ''' Look up a key/value name in the Windows registry.
192 ''' Look up a key/value name in the Windows registry.
195
193
196 valname: value name. If unspecified, the default value for the key
194 valname: value name. If unspecified, the default value for the key
197 is used.
195 is used.
198 scope: optionally specify scope for registry lookup, this can be
196 scope: optionally specify scope for registry lookup, this can be
199 a sequence of scopes to look up in order. Default (CURRENT_USER,
197 a sequence of scopes to look up in order. Default (CURRENT_USER,
200 LOCAL_MACHINE).
198 LOCAL_MACHINE).
201 '''
199 '''
202 try:
200 try:
203 from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, \
201 from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, \
204 QueryValueEx, OpenKey
202 QueryValueEx, OpenKey
205 except ImportError:
203 except ImportError:
206 return None
204 return None
207
205
208 if scope is None:
206 if scope is None:
209 scope = (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE)
207 scope = (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE)
210 elif not isinstance(scope, (list, tuple)):
208 elif not isinstance(scope, (list, tuple)):
211 scope = (scope,)
209 scope = (scope,)
212 for s in scope:
210 for s in scope:
213 try:
211 try:
214 val = QueryValueEx(OpenKey(s, key), valname)[0]
212 val = QueryValueEx(OpenKey(s, key), valname)[0]
215 # never let a Unicode string escape into the wild
213 # never let a Unicode string escape into the wild
216 return encoding.tolocal(val.encode('UTF-8'))
214 return encoding.tolocal(val.encode('UTF-8'))
217 except EnvironmentError:
215 except EnvironmentError:
218 pass
216 pass
219
217
220 def system_rcpath_win32():
218 def system_rcpath_win32():
221 '''return default os-specific hgrc search path'''
219 '''return default os-specific hgrc search path'''
222 proc = win32api.GetCurrentProcess()
220 proc = win32api.GetCurrentProcess()
223 try:
221 try:
224 # This will fail on windows < NT
222 # This will fail on windows < NT
225 filename = win32process.GetModuleFileNameEx(proc, 0)
223 filename = win32process.GetModuleFileNameEx(proc, 0)
226 except:
224 except:
227 filename = win32api.GetModuleFileName(0)
225 filename = win32api.GetModuleFileName(0)
228 # Use mercurial.ini found in directory with hg.exe
226 # Use mercurial.ini found in directory with hg.exe
229 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
227 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
230 if os.path.isfile(progrc):
228 if os.path.isfile(progrc):
231 return [progrc]
229 return [progrc]
232 # else look for a system rcpath in the registry
230 # else look for a system rcpath in the registry
233 try:
231 try:
234 value = win32api.RegQueryValue(
232 value = win32api.RegQueryValue(
235 win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Mercurial')
233 win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Mercurial')
236 rcpath = []
234 rcpath = []
237 for p in value.split(os.pathsep):
235 for p in value.split(os.pathsep):
238 if p.lower().endswith('mercurial.ini'):
236 if p.lower().endswith('mercurial.ini'):
239 rcpath.append(p)
237 rcpath.append(p)
240 elif os.path.isdir(p):
238 elif os.path.isdir(p):
241 for f, kind in osutil.listdir(p):
239 for f, kind in osutil.listdir(p):
242 if f.endswith('.rc'):
240 if f.endswith('.rc'):
243 rcpath.append(os.path.join(p, f))
241 rcpath.append(os.path.join(p, f))
244 return rcpath
242 return rcpath
245 except pywintypes.error:
243 except pywintypes.error:
246 return []
244 return []
247
245
248 def user_rcpath_win32():
246 def user_rcpath_win32():
249 '''return os-specific hgrc search path to the user dir'''
247 '''return os-specific hgrc search path to the user dir'''
250 userdir = os.path.expanduser('~')
248 userdir = os.path.expanduser('~')
251 if sys.getwindowsversion()[3] != 2 and userdir == '~':
249 if sys.getwindowsversion()[3] != 2 and userdir == '~':
252 # We are on win < nt: fetch the APPDATA directory location and use
250 # We are on win < nt: fetch the APPDATA directory location and use
253 # the parent directory as the user home dir.
251 # the parent directory as the user home dir.
254 appdir = shell.SHGetPathFromIDList(
252 appdir = shell.SHGetPathFromIDList(
255 shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
253 shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
256 userdir = os.path.dirname(appdir)
254 userdir = os.path.dirname(appdir)
257 return [os.path.join(userdir, 'mercurial.ini'),
255 return [os.path.join(userdir, 'mercurial.ini'),
258 os.path.join(userdir, '.hgrc')]
256 os.path.join(userdir, '.hgrc')]
259
257
260 class posixfile_nt(object):
258 class posixfile_nt(object):
261 '''file object with posix-like semantics. on windows, normal
259 '''file object with posix-like semantics. on windows, normal
262 files can not be deleted or renamed if they are open. must open
260 files can not be deleted or renamed if they are open. must open
263 with win32file.FILE_SHARE_DELETE. this flag does not exist on
261 with win32file.FILE_SHARE_DELETE. this flag does not exist on
264 windows < nt, so do not use this class there.'''
262 windows < nt, so do not use this class there.'''
265
263
266 # tried to use win32file._open_osfhandle to pass fd to os.fdopen,
264 # tried to use win32file._open_osfhandle to pass fd to os.fdopen,
267 # but does not work at all. wrap win32 file api instead.
265 # but does not work at all. wrap win32 file api instead.
268
266
269 def __init__(self, name, mode='rb'):
267 def __init__(self, name, mode='rb'):
270 self.closed = False
268 self.closed = False
271 self.name = name
269 self.name = name
272 self.mode = mode
270 self.mode = mode
273 access = 0
271 access = 0
274 if 'r' in mode or '+' in mode:
272 if 'r' in mode or '+' in mode:
275 access |= win32file.GENERIC_READ
273 access |= win32file.GENERIC_READ
276 if 'w' in mode or 'a' in mode or '+' in mode:
274 if 'w' in mode or 'a' in mode or '+' in mode:
277 access |= win32file.GENERIC_WRITE
275 access |= win32file.GENERIC_WRITE
278 if 'r' in mode:
276 if 'r' in mode:
279 creation = win32file.OPEN_EXISTING
277 creation = win32file.OPEN_EXISTING
280 elif 'a' in mode:
278 elif 'a' in mode:
281 creation = win32file.OPEN_ALWAYS
279 creation = win32file.OPEN_ALWAYS
282 else:
280 else:
283 creation = win32file.CREATE_ALWAYS
281 creation = win32file.CREATE_ALWAYS
284 try:
282 try:
285 self.handle = win32file.CreateFile(name,
283 self.handle = win32file.CreateFile(name,
286 access,
284 access,
287 win32file.FILE_SHARE_READ |
285 win32file.FILE_SHARE_READ |
288 win32file.FILE_SHARE_WRITE |
286 win32file.FILE_SHARE_WRITE |
289 win32file.FILE_SHARE_DELETE,
287 win32file.FILE_SHARE_DELETE,
290 None,
288 None,
291 creation,
289 creation,
292 win32file.FILE_ATTRIBUTE_NORMAL,
290 win32file.FILE_ATTRIBUTE_NORMAL,
293 0)
291 0)
294 except pywintypes.error, err:
292 except pywintypes.error, err:
295 raise WinIOError(err, name)
293 raise WinIOError(err, name)
296
294
297 def __iter__(self):
295 def __iter__(self):
298 for line in self.readlines():
296 for line in self.readlines():
299 yield line
297 yield line
300
298
301 def read(self, count=-1):
299 def read(self, count=-1):
302 try:
300 try:
303 cs = cStringIO.StringIO()
301 cs = cStringIO.StringIO()
304 while count:
302 while count:
305 wincount = int(count)
303 wincount = int(count)
306 if wincount == -1:
304 if wincount == -1:
307 wincount = 1048576
305 wincount = 1048576
308 val, data = win32file.ReadFile(self.handle, wincount)
306 val, data = win32file.ReadFile(self.handle, wincount)
309 if not data: break
307 if not data: break
310 cs.write(data)
308 cs.write(data)
311 if count != -1:
309 if count != -1:
312 count -= len(data)
310 count -= len(data)
313 return cs.getvalue()
311 return cs.getvalue()
314 except pywintypes.error, err:
312 except pywintypes.error, err:
315 raise WinIOError(err)
313 raise WinIOError(err)
316
314
317 def readlines(self, sizehint=None):
315 def readlines(self, sizehint=None):
318 # splitlines() splits on single '\r' while readlines()
316 # splitlines() splits on single '\r' while readlines()
319 # does not. cStringIO has a well behaving readlines() and is fast.
317 # does not. cStringIO has a well behaving readlines() and is fast.
320 return cStringIO.StringIO(self.read()).readlines()
318 return cStringIO.StringIO(self.read()).readlines()
321
319
322 def write(self, data):
320 def write(self, data):
323 try:
321 try:
324 if 'a' in self.mode:
322 if 'a' in self.mode:
325 win32file.SetFilePointer(self.handle, 0, win32file.FILE_END)
323 win32file.SetFilePointer(self.handle, 0, win32file.FILE_END)
326 nwrit = 0
324 nwrit = 0
327 while nwrit < len(data):
325 while nwrit < len(data):
328 val, nwrit = win32file.WriteFile(self.handle, data)
326 val, nwrit = win32file.WriteFile(self.handle, data)
329 data = data[nwrit:]
327 data = data[nwrit:]
330 except pywintypes.error, err:
328 except pywintypes.error, err:
331 raise WinIOError(err)
329 raise WinIOError(err)
332
330
333 def writelines(self, sequence):
331 def writelines(self, sequence):
334 for s in sequence:
332 for s in sequence:
335 self.write(s)
333 self.write(s)
336
334
337 def seek(self, pos, whence=0):
335 def seek(self, pos, whence=0):
338 try:
336 try:
339 win32file.SetFilePointer(self.handle, int(pos), whence)
337 win32file.SetFilePointer(self.handle, int(pos), whence)
340 except pywintypes.error, err:
338 except pywintypes.error, err:
341 raise WinIOError(err)
339 raise WinIOError(err)
342
340
343 def tell(self):
341 def tell(self):
344 try:
342 try:
345 return win32file.SetFilePointer(self.handle, 0,
343 return win32file.SetFilePointer(self.handle, 0,
346 win32file.FILE_CURRENT)
344 win32file.FILE_CURRENT)
347 except pywintypes.error, err:
345 except pywintypes.error, err:
348 raise WinIOError(err)
346 raise WinIOError(err)
349
347
350 def close(self):
348 def close(self):
351 if not self.closed:
349 if not self.closed:
352 self.handle = None
350 self.handle = None
353 self.closed = True
351 self.closed = True
354
352
355 def flush(self):
353 def flush(self):
356 # we have no application-level buffering
354 # we have no application-level buffering
357 pass
355 pass
358
356
359 def truncate(self, pos=0):
357 def truncate(self, pos=0):
360 try:
358 try:
361 win32file.SetFilePointer(self.handle, int(pos),
359 win32file.SetFilePointer(self.handle, int(pos),
362 win32file.FILE_BEGIN)
360 win32file.FILE_BEGIN)
363 win32file.SetEndOfFile(self.handle)
361 win32file.SetEndOfFile(self.handle)
364 except pywintypes.error, err:
362 except pywintypes.error, err:
365 raise WinIOError(err)
363 raise WinIOError(err)
366
364
367 def getuser():
365 def getuser():
368 '''return name of current user'''
366 '''return name of current user'''
369 return win32api.GetUserName()
367 return win32api.GetUserName()
370
368
371 def set_signal_handler_win32():
369 def set_signal_handler_win32():
372 """Register a termination handler for console events including
370 """Register a termination handler for console events including
373 CTRL+C. python signal handlers do not work well with socket
371 CTRL+C. python signal handlers do not work well with socket
374 operations.
372 operations.
375 """
373 """
376 def handler(event):
374 def handler(event):
377 win32process.ExitProcess(1)
375 win32process.ExitProcess(1)
378 win32api.SetConsoleCtrlHandler(handler)
376 win32api.SetConsoleCtrlHandler(handler)
379
377
@@ -1,252 +1,250 b''
1 """
1 # windows.py - Windows utility function implementations for Mercurial
2 windows.py - Windows utility function implementations for Mercurial
2 #
3
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
5
5 # This software may be used and distributed according to the terms of the
6 This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
7 GNU General Public License version 2, incorporated herein by reference.
8 """
9
7
10 from i18n import _
8 from i18n import _
11 import errno, msvcrt, os, osutil, re, sys, error
9 import errno, msvcrt, os, osutil, re, sys, error
12 nulldev = 'NUL:'
10 nulldev = 'NUL:'
13
11
14 umask = 002
12 umask = 002
15
13
16 class winstdout:
14 class winstdout:
17 '''stdout on windows misbehaves if sent through a pipe'''
15 '''stdout on windows misbehaves if sent through a pipe'''
18
16
19 def __init__(self, fp):
17 def __init__(self, fp):
20 self.fp = fp
18 self.fp = fp
21
19
22 def __getattr__(self, key):
20 def __getattr__(self, key):
23 return getattr(self.fp, key)
21 return getattr(self.fp, key)
24
22
25 def close(self):
23 def close(self):
26 try:
24 try:
27 self.fp.close()
25 self.fp.close()
28 except: pass
26 except: pass
29
27
30 def write(self, s):
28 def write(self, s):
31 try:
29 try:
32 # This is workaround for "Not enough space" error on
30 # This is workaround for "Not enough space" error on
33 # writing large size of data to console.
31 # writing large size of data to console.
34 limit = 16000
32 limit = 16000
35 l = len(s)
33 l = len(s)
36 start = 0
34 start = 0
37 while start < l:
35 while start < l:
38 end = start + limit
36 end = start + limit
39 self.fp.write(s[start:end])
37 self.fp.write(s[start:end])
40 start = end
38 start = end
41 except IOError, inst:
39 except IOError, inst:
42 if inst.errno != 0: raise
40 if inst.errno != 0: raise
43 self.close()
41 self.close()
44 raise IOError(errno.EPIPE, 'Broken pipe')
42 raise IOError(errno.EPIPE, 'Broken pipe')
45
43
46 def flush(self):
44 def flush(self):
47 try:
45 try:
48 return self.fp.flush()
46 return self.fp.flush()
49 except IOError, inst:
47 except IOError, inst:
50 if inst.errno != errno.EINVAL: raise
48 if inst.errno != errno.EINVAL: raise
51 self.close()
49 self.close()
52 raise IOError(errno.EPIPE, 'Broken pipe')
50 raise IOError(errno.EPIPE, 'Broken pipe')
53
51
54 sys.stdout = winstdout(sys.stdout)
52 sys.stdout = winstdout(sys.stdout)
55
53
56 def _is_win_9x():
54 def _is_win_9x():
57 '''return true if run on windows 95, 98 or me.'''
55 '''return true if run on windows 95, 98 or me.'''
58 try:
56 try:
59 return sys.getwindowsversion()[3] == 1
57 return sys.getwindowsversion()[3] == 1
60 except AttributeError:
58 except AttributeError:
61 return 'command' in os.environ.get('comspec', '')
59 return 'command' in os.environ.get('comspec', '')
62
60
63 def openhardlinks():
61 def openhardlinks():
64 return not _is_win_9x and "win32api" in locals()
62 return not _is_win_9x and "win32api" in locals()
65
63
66 def system_rcpath():
64 def system_rcpath():
67 try:
65 try:
68 return system_rcpath_win32()
66 return system_rcpath_win32()
69 except:
67 except:
70 return [r'c:\mercurial\mercurial.ini']
68 return [r'c:\mercurial\mercurial.ini']
71
69
72 def user_rcpath():
70 def user_rcpath():
73 '''return os-specific hgrc search path to the user dir'''
71 '''return os-specific hgrc search path to the user dir'''
74 try:
72 try:
75 path = user_rcpath_win32()
73 path = user_rcpath_win32()
76 except:
74 except:
77 home = os.path.expanduser('~')
75 home = os.path.expanduser('~')
78 path = [os.path.join(home, 'mercurial.ini'),
76 path = [os.path.join(home, 'mercurial.ini'),
79 os.path.join(home, '.hgrc')]
77 os.path.join(home, '.hgrc')]
80 userprofile = os.environ.get('USERPROFILE')
78 userprofile = os.environ.get('USERPROFILE')
81 if userprofile:
79 if userprofile:
82 path.append(os.path.join(userprofile, 'mercurial.ini'))
80 path.append(os.path.join(userprofile, 'mercurial.ini'))
83 path.append(os.path.join(userprofile, '.hgrc'))
81 path.append(os.path.join(userprofile, '.hgrc'))
84 return path
82 return path
85
83
86 def parse_patch_output(output_line):
84 def parse_patch_output(output_line):
87 """parses the output produced by patch and returns the file name"""
85 """parses the output produced by patch and returns the file name"""
88 pf = output_line[14:]
86 pf = output_line[14:]
89 if pf[0] == '`':
87 if pf[0] == '`':
90 pf = pf[1:-1] # Remove the quotes
88 pf = pf[1:-1] # Remove the quotes
91 return pf
89 return pf
92
90
93 def sshargs(sshcmd, host, user, port):
91 def sshargs(sshcmd, host, user, port):
94 '''Build argument list for ssh or Plink'''
92 '''Build argument list for ssh or Plink'''
95 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
93 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
96 args = user and ("%s@%s" % (user, host)) or host
94 args = user and ("%s@%s" % (user, host)) or host
97 return port and ("%s %s %s" % (args, pflag, port)) or args
95 return port and ("%s %s %s" % (args, pflag, port)) or args
98
96
99 def testpid(pid):
97 def testpid(pid):
100 '''return False if pid dead, True if running or not known'''
98 '''return False if pid dead, True if running or not known'''
101 return True
99 return True
102
100
103 def set_flags(f, l, x):
101 def set_flags(f, l, x):
104 pass
102 pass
105
103
106 def set_binary(fd):
104 def set_binary(fd):
107 # When run without console, pipes may expose invalid
105 # When run without console, pipes may expose invalid
108 # fileno(), usually set to -1.
106 # fileno(), usually set to -1.
109 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
107 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
110 msvcrt.setmode(fd.fileno(), os.O_BINARY)
108 msvcrt.setmode(fd.fileno(), os.O_BINARY)
111
109
112 def pconvert(path):
110 def pconvert(path):
113 return '/'.join(path.split(os.sep))
111 return '/'.join(path.split(os.sep))
114
112
115 def localpath(path):
113 def localpath(path):
116 return path.replace('/', '\\')
114 return path.replace('/', '\\')
117
115
118 def normpath(path):
116 def normpath(path):
119 return pconvert(os.path.normpath(path))
117 return pconvert(os.path.normpath(path))
120
118
121 def samestat(s1, s2):
119 def samestat(s1, s2):
122 return False
120 return False
123
121
124 # A sequence of backslashes is special iff it precedes a double quote:
122 # A sequence of backslashes is special iff it precedes a double quote:
125 # - if there's an even number of backslashes, the double quote is not
123 # - if there's an even number of backslashes, the double quote is not
126 # quoted (i.e. it ends the quoted region)
124 # quoted (i.e. it ends the quoted region)
127 # - if there's an odd number of backslashes, the double quote is quoted
125 # - if there's an odd number of backslashes, the double quote is quoted
128 # - in both cases, every pair of backslashes is unquoted into a single
126 # - in both cases, every pair of backslashes is unquoted into a single
129 # backslash
127 # backslash
130 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
128 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
131 # So, to quote a string, we must surround it in double quotes, double
129 # So, to quote a string, we must surround it in double quotes, double
132 # the number of backslashes that preceed double quotes and add another
130 # the number of backslashes that preceed double quotes and add another
133 # backslash before every double quote (being careful with the double
131 # backslash before every double quote (being careful with the double
134 # quote we've appended to the end)
132 # quote we've appended to the end)
135 _quotere = None
133 _quotere = None
136 def shellquote(s):
134 def shellquote(s):
137 global _quotere
135 global _quotere
138 if _quotere is None:
136 if _quotere is None:
139 _quotere = re.compile(r'(\\*)("|\\$)')
137 _quotere = re.compile(r'(\\*)("|\\$)')
140 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
138 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
141
139
142 def quotecommand(cmd):
140 def quotecommand(cmd):
143 """Build a command string suitable for os.popen* calls."""
141 """Build a command string suitable for os.popen* calls."""
144 # The extra quotes are needed because popen* runs the command
142 # The extra quotes are needed because popen* runs the command
145 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
143 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
146 return '"' + cmd + '"'
144 return '"' + cmd + '"'
147
145
148 def popen(command, mode='r'):
146 def popen(command, mode='r'):
149 # Work around "popen spawned process may not write to stdout
147 # Work around "popen spawned process may not write to stdout
150 # under windows"
148 # under windows"
151 # http://bugs.python.org/issue1366
149 # http://bugs.python.org/issue1366
152 command += " 2> %s" % nulldev
150 command += " 2> %s" % nulldev
153 return os.popen(quotecommand(command), mode)
151 return os.popen(quotecommand(command), mode)
154
152
155 def explain_exit(code):
153 def explain_exit(code):
156 return _("exited with status %d") % code, code
154 return _("exited with status %d") % code, code
157
155
158 # if you change this stub into a real check, please try to implement the
156 # if you change this stub into a real check, please try to implement the
159 # username and groupname functions above, too.
157 # username and groupname functions above, too.
160 def isowner(fp, st=None):
158 def isowner(fp, st=None):
161 return True
159 return True
162
160
163 def find_exe(command):
161 def find_exe(command):
164 '''Find executable for command searching like cmd.exe does.
162 '''Find executable for command searching like cmd.exe does.
165 If command is a basename then PATH is searched for command.
163 If command is a basename then PATH is searched for command.
166 PATH isn't searched if command is an absolute or relative path.
164 PATH isn't searched if command is an absolute or relative path.
167 An extension from PATHEXT is found and added if not present.
165 An extension from PATHEXT is found and added if not present.
168 If command isn't found None is returned.'''
166 If command isn't found None is returned.'''
169 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
167 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
170 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
168 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
171 if os.path.splitext(command)[1].lower() in pathexts:
169 if os.path.splitext(command)[1].lower() in pathexts:
172 pathexts = ['']
170 pathexts = ['']
173
171
174 def findexisting(pathcommand):
172 def findexisting(pathcommand):
175 'Will append extension (if needed) and return existing file'
173 'Will append extension (if needed) and return existing file'
176 for ext in pathexts:
174 for ext in pathexts:
177 executable = pathcommand + ext
175 executable = pathcommand + ext
178 if os.path.exists(executable):
176 if os.path.exists(executable):
179 return executable
177 return executable
180 return None
178 return None
181
179
182 if os.sep in command:
180 if os.sep in command:
183 return findexisting(command)
181 return findexisting(command)
184
182
185 for path in os.environ.get('PATH', '').split(os.pathsep):
183 for path in os.environ.get('PATH', '').split(os.pathsep):
186 executable = findexisting(os.path.join(path, command))
184 executable = findexisting(os.path.join(path, command))
187 if executable is not None:
185 if executable is not None:
188 return executable
186 return executable
189 return None
187 return None
190
188
191 def set_signal_handler():
189 def set_signal_handler():
192 try:
190 try:
193 set_signal_handler_win32()
191 set_signal_handler_win32()
194 except NameError:
192 except NameError:
195 pass
193 pass
196
194
197 def statfiles(files):
195 def statfiles(files):
198 '''Stat each file in files and yield stat or None if file does not exist.
196 '''Stat each file in files and yield stat or None if file does not exist.
199 Cluster and cache stat per directory to minimize number of OS stat calls.'''
197 Cluster and cache stat per directory to minimize number of OS stat calls.'''
200 ncase = os.path.normcase
198 ncase = os.path.normcase
201 sep = os.sep
199 sep = os.sep
202 dircache = {} # dirname -> filename -> status | None if file does not exist
200 dircache = {} # dirname -> filename -> status | None if file does not exist
203 for nf in files:
201 for nf in files:
204 nf = ncase(nf)
202 nf = ncase(nf)
205 pos = nf.rfind(sep)
203 pos = nf.rfind(sep)
206 if pos == -1:
204 if pos == -1:
207 dir, base = '.', nf
205 dir, base = '.', nf
208 else:
206 else:
209 dir, base = nf[:pos+1], nf[pos+1:]
207 dir, base = nf[:pos+1], nf[pos+1:]
210 cache = dircache.get(dir, None)
208 cache = dircache.get(dir, None)
211 if cache is None:
209 if cache is None:
212 try:
210 try:
213 dmap = dict([(ncase(n), s)
211 dmap = dict([(ncase(n), s)
214 for n, k, s in osutil.listdir(dir, True)])
212 for n, k, s in osutil.listdir(dir, True)])
215 except OSError, err:
213 except OSError, err:
216 # handle directory not found in Python version prior to 2.5
214 # handle directory not found in Python version prior to 2.5
217 # Python <= 2.4 returns native Windows code 3 in errno
215 # Python <= 2.4 returns native Windows code 3 in errno
218 # Python >= 2.5 returns ENOENT and adds winerror field
216 # Python >= 2.5 returns ENOENT and adds winerror field
219 # EINVAL is raised if dir is not a directory.
217 # EINVAL is raised if dir is not a directory.
220 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
218 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
221 errno.ENOTDIR):
219 errno.ENOTDIR):
222 raise
220 raise
223 dmap = {}
221 dmap = {}
224 cache = dircache.setdefault(dir, dmap)
222 cache = dircache.setdefault(dir, dmap)
225 yield cache.get(base, None)
223 yield cache.get(base, None)
226
224
227 def getuser():
225 def getuser():
228 '''return name of current user'''
226 '''return name of current user'''
229 raise error.Abort(_('user name not available - set USERNAME '
227 raise error.Abort(_('user name not available - set USERNAME '
230 'environment variable'))
228 'environment variable'))
231
229
232 def username(uid=None):
230 def username(uid=None):
233 """Return the name of the user with the given uid.
231 """Return the name of the user with the given uid.
234
232
235 If uid is None, return the name of the current user."""
233 If uid is None, return the name of the current user."""
236 return None
234 return None
237
235
238 def groupname(gid=None):
236 def groupname(gid=None):
239 """Return the name of the group with the given gid.
237 """Return the name of the group with the given gid.
240
238
241 If gid is None, return the name of the current group."""
239 If gid is None, return the name of the current group."""
242 return None
240 return None
243
241
244 posixfile = file
242 posixfile = file
245 try:
243 try:
246 # override functions with win32 versions if possible
244 # override functions with win32 versions if possible
247 from win32 import *
245 from win32 import *
248 if not _is_win_9x():
246 if not _is_win_9x():
249 posixfile = posixfile_nt
247 posixfile = posixfile_nt
250 except ImportError:
248 except ImportError:
251 pass
249 pass
252
250
General Comments 0
You need to be logged in to leave comments. Login now