##// END OF EJS Templates
fix warnings spotted by pychecker
Benoit Boissinot -
r3131:cff3c58a default
parent child Browse files
Show More
@@ -1,981 +1,980 b''
1 # hgweb/hgweb_mod.py - Web interface for a repository.
1 # hgweb/hgweb_mod.py - Web interface for a repository.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import os.path
10 import os.path
11 import mimetypes
11 import mimetypes
12 from mercurial.demandload import demandload
12 from mercurial.demandload import demandload
13 demandload(globals(), "re zlib ConfigParser mimetools cStringIO sys tempfile")
13 demandload(globals(), "re zlib ConfigParser mimetools cStringIO sys tempfile")
14 demandload(globals(), "mercurial:mdiff,ui,hg,util,archival,streamclone,patch")
14 demandload(globals(), "mercurial:mdiff,ui,hg,util,archival,streamclone,patch")
15 demandload(globals(), "mercurial:templater")
15 demandload(globals(), "mercurial:templater")
16 demandload(globals(), "mercurial.hgweb.common:get_mtime,staticfile")
16 demandload(globals(), "mercurial.hgweb.common:get_mtime,staticfile")
17 from mercurial.node import *
17 from mercurial.node import *
18 from mercurial.i18n import gettext as _
18 from mercurial.i18n import gettext as _
19
19
20 def _up(p):
20 def _up(p):
21 if p[0] != "/":
21 if p[0] != "/":
22 p = "/" + p
22 p = "/" + p
23 if p[-1] == "/":
23 if p[-1] == "/":
24 p = p[:-1]
24 p = p[:-1]
25 up = os.path.dirname(p)
25 up = os.path.dirname(p)
26 if up == "/":
26 if up == "/":
27 return "/"
27 return "/"
28 return up + "/"
28 return up + "/"
29
29
30 class hgweb(object):
30 class hgweb(object):
31 def __init__(self, repo, name=None):
31 def __init__(self, repo, name=None):
32 if type(repo) == type(""):
32 if type(repo) == type(""):
33 self.repo = hg.repository(ui.ui(), repo)
33 self.repo = hg.repository(ui.ui(), repo)
34 else:
34 else:
35 self.repo = repo
35 self.repo = repo
36
36
37 self.mtime = -1
37 self.mtime = -1
38 self.reponame = name
38 self.reponame = name
39 self.archives = 'zip', 'gz', 'bz2'
39 self.archives = 'zip', 'gz', 'bz2'
40 self.stripecount = 1
40 self.stripecount = 1
41 self.templatepath = self.repo.ui.config("web", "templates",
41 self.templatepath = self.repo.ui.config("web", "templates",
42 templater.templatepath())
42 templater.templatepath())
43
43
44 def refresh(self):
44 def refresh(self):
45 mtime = get_mtime(self.repo.root)
45 mtime = get_mtime(self.repo.root)
46 if mtime != self.mtime:
46 if mtime != self.mtime:
47 self.mtime = mtime
47 self.mtime = mtime
48 self.repo = hg.repository(self.repo.ui, self.repo.root)
48 self.repo = hg.repository(self.repo.ui, self.repo.root)
49 self.maxchanges = int(self.repo.ui.config("web", "maxchanges", 10))
49 self.maxchanges = int(self.repo.ui.config("web", "maxchanges", 10))
50 self.stripecount = int(self.repo.ui.config("web", "stripes", 1))
50 self.stripecount = int(self.repo.ui.config("web", "stripes", 1))
51 self.maxshortchanges = int(self.repo.ui.config("web", "maxshortchanges", 60))
51 self.maxshortchanges = int(self.repo.ui.config("web", "maxshortchanges", 60))
52 self.maxfiles = int(self.repo.ui.config("web", "maxfiles", 10))
52 self.maxfiles = int(self.repo.ui.config("web", "maxfiles", 10))
53 self.allowpull = self.repo.ui.configbool("web", "allowpull", True)
53 self.allowpull = self.repo.ui.configbool("web", "allowpull", True)
54
54
55 def archivelist(self, nodeid):
55 def archivelist(self, nodeid):
56 allowed = self.repo.ui.configlist("web", "allow_archive")
56 allowed = self.repo.ui.configlist("web", "allow_archive")
57 for i in self.archives:
57 for i in self.archives:
58 if i in allowed or self.repo.ui.configbool("web", "allow" + i):
58 if i in allowed or self.repo.ui.configbool("web", "allow" + i):
59 yield {"type" : i, "node" : nodeid, "url": ""}
59 yield {"type" : i, "node" : nodeid, "url": ""}
60
60
61 def listfiles(self, files, mf):
61 def listfiles(self, files, mf):
62 for f in files[:self.maxfiles]:
62 for f in files[:self.maxfiles]:
63 yield self.t("filenodelink", node=hex(mf[f]), file=f)
63 yield self.t("filenodelink", node=hex(mf[f]), file=f)
64 if len(files) > self.maxfiles:
64 if len(files) > self.maxfiles:
65 yield self.t("fileellipses")
65 yield self.t("fileellipses")
66
66
67 def listfilediffs(self, files, changeset):
67 def listfilediffs(self, files, changeset):
68 for f in files[:self.maxfiles]:
68 for f in files[:self.maxfiles]:
69 yield self.t("filedifflink", node=hex(changeset), file=f)
69 yield self.t("filedifflink", node=hex(changeset), file=f)
70 if len(files) > self.maxfiles:
70 if len(files) > self.maxfiles:
71 yield self.t("fileellipses")
71 yield self.t("fileellipses")
72
72
73 def siblings(self, siblings=[], rev=None, hiderev=None, **args):
73 def siblings(self, siblings=[], rev=None, hiderev=None, **args):
74 if not rev:
74 if not rev:
75 rev = lambda x: ""
75 rev = lambda x: ""
76 siblings = [s for s in siblings if s != nullid]
76 siblings = [s for s in siblings if s != nullid]
77 if len(siblings) == 1 and rev(siblings[0]) == hiderev:
77 if len(siblings) == 1 and rev(siblings[0]) == hiderev:
78 return
78 return
79 for s in siblings:
79 for s in siblings:
80 yield dict(node=hex(s), rev=rev(s), **args)
80 yield dict(node=hex(s), rev=rev(s), **args)
81
81
82 def renamelink(self, fl, node):
82 def renamelink(self, fl, node):
83 r = fl.renamed(node)
83 r = fl.renamed(node)
84 if r:
84 if r:
85 return [dict(file=r[0], node=hex(r[1]))]
85 return [dict(file=r[0], node=hex(r[1]))]
86 return []
86 return []
87
87
88 def showtag(self, t1, node=nullid, **args):
88 def showtag(self, t1, node=nullid, **args):
89 for t in self.repo.nodetags(node):
89 for t in self.repo.nodetags(node):
90 yield self.t(t1, tag=t, **args)
90 yield self.t(t1, tag=t, **args)
91
91
92 def diff(self, node1, node2, files):
92 def diff(self, node1, node2, files):
93 def filterfiles(filters, files):
93 def filterfiles(filters, files):
94 l = [x for x in files if x in filters]
94 l = [x for x in files if x in filters]
95
95
96 for t in filters:
96 for t in filters:
97 if t and t[-1] != os.sep:
97 if t and t[-1] != os.sep:
98 t += os.sep
98 t += os.sep
99 l += [x for x in files if x.startswith(t)]
99 l += [x for x in files if x.startswith(t)]
100 return l
100 return l
101
101
102 parity = [0]
102 parity = [0]
103 def diffblock(diff, f, fn):
103 def diffblock(diff, f, fn):
104 yield self.t("diffblock",
104 yield self.t("diffblock",
105 lines=prettyprintlines(diff),
105 lines=prettyprintlines(diff),
106 parity=parity[0],
106 parity=parity[0],
107 file=f,
107 file=f,
108 filenode=hex(fn or nullid))
108 filenode=hex(fn or nullid))
109 parity[0] = 1 - parity[0]
109 parity[0] = 1 - parity[0]
110
110
111 def prettyprintlines(diff):
111 def prettyprintlines(diff):
112 for l in diff.splitlines(1):
112 for l in diff.splitlines(1):
113 if l.startswith('+'):
113 if l.startswith('+'):
114 yield self.t("difflineplus", line=l)
114 yield self.t("difflineplus", line=l)
115 elif l.startswith('-'):
115 elif l.startswith('-'):
116 yield self.t("difflineminus", line=l)
116 yield self.t("difflineminus", line=l)
117 elif l.startswith('@'):
117 elif l.startswith('@'):
118 yield self.t("difflineat", line=l)
118 yield self.t("difflineat", line=l)
119 else:
119 else:
120 yield self.t("diffline", line=l)
120 yield self.t("diffline", line=l)
121
121
122 r = self.repo
122 r = self.repo
123 cl = r.changelog
123 cl = r.changelog
124 mf = r.manifest
124 mf = r.manifest
125 change1 = cl.read(node1)
125 change1 = cl.read(node1)
126 change2 = cl.read(node2)
126 change2 = cl.read(node2)
127 mmap1 = mf.read(change1[0])
127 mmap1 = mf.read(change1[0])
128 mmap2 = mf.read(change2[0])
128 mmap2 = mf.read(change2[0])
129 date1 = util.datestr(change1[2])
129 date1 = util.datestr(change1[2])
130 date2 = util.datestr(change2[2])
130 date2 = util.datestr(change2[2])
131
131
132 modified, added, removed, deleted, unknown = r.status(node1, node2)[:5]
132 modified, added, removed, deleted, unknown = r.status(node1, node2)[:5]
133 if files:
133 if files:
134 modified, added, removed = map(lambda x: filterfiles(files, x),
134 modified, added, removed = map(lambda x: filterfiles(files, x),
135 (modified, added, removed))
135 (modified, added, removed))
136
136
137 diffopts = patch.diffopts(self.repo.ui)
137 diffopts = patch.diffopts(self.repo.ui)
138 for f in modified:
138 for f in modified:
139 to = r.file(f).read(mmap1[f])
139 to = r.file(f).read(mmap1[f])
140 tn = r.file(f).read(mmap2[f])
140 tn = r.file(f).read(mmap2[f])
141 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
141 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
142 opts=diffopts), f, tn)
142 opts=diffopts), f, tn)
143 for f in added:
143 for f in added:
144 to = None
144 to = None
145 tn = r.file(f).read(mmap2[f])
145 tn = r.file(f).read(mmap2[f])
146 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
146 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
147 opts=diffopts), f, tn)
147 opts=diffopts), f, tn)
148 for f in removed:
148 for f in removed:
149 to = r.file(f).read(mmap1[f])
149 to = r.file(f).read(mmap1[f])
150 tn = None
150 tn = None
151 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
151 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
152 opts=diffopts), f, tn)
152 opts=diffopts), f, tn)
153
153
154 def changelog(self, pos, shortlog=False):
154 def changelog(self, pos, shortlog=False):
155 def changenav(**map):
155 def changenav(**map):
156 def seq(factor, maxchanges=None):
156 def seq(factor, maxchanges=None):
157 if maxchanges:
157 if maxchanges:
158 yield maxchanges
158 yield maxchanges
159 if maxchanges >= 20 and maxchanges <= 40:
159 if maxchanges >= 20 and maxchanges <= 40:
160 yield 50
160 yield 50
161 else:
161 else:
162 yield 1 * factor
162 yield 1 * factor
163 yield 3 * factor
163 yield 3 * factor
164 for f in seq(factor * 10):
164 for f in seq(factor * 10):
165 yield f
165 yield f
166
166
167 l = []
167 l = []
168 last = 0
168 last = 0
169 maxchanges = shortlog and self.maxshortchanges or self.maxchanges
169 maxchanges = shortlog and self.maxshortchanges or self.maxchanges
170 for f in seq(1, maxchanges):
170 for f in seq(1, maxchanges):
171 if f < maxchanges or f <= last:
171 if f < maxchanges or f <= last:
172 continue
172 continue
173 if f > count:
173 if f > count:
174 break
174 break
175 last = f
175 last = f
176 r = "%d" % f
176 r = "%d" % f
177 if pos + f < count:
177 if pos + f < count:
178 l.append(("+" + r, pos + f))
178 l.append(("+" + r, pos + f))
179 if pos - f >= 0:
179 if pos - f >= 0:
180 l.insert(0, ("-" + r, pos - f))
180 l.insert(0, ("-" + r, pos - f))
181
181
182 yield {"rev": 0, "label": "(0)"}
182 yield {"rev": 0, "label": "(0)"}
183
183
184 for label, rev in l:
184 for label, rev in l:
185 yield {"label": label, "rev": rev}
185 yield {"label": label, "rev": rev}
186
186
187 yield {"label": "tip", "rev": "tip"}
187 yield {"label": "tip", "rev": "tip"}
188
188
189 def changelist(**map):
189 def changelist(**map):
190 parity = (start - end) & 1
190 parity = (start - end) & 1
191 cl = self.repo.changelog
191 cl = self.repo.changelog
192 l = [] # build a list in forward order for efficiency
192 l = [] # build a list in forward order for efficiency
193 for i in range(start, end):
193 for i in range(start, end):
194 n = cl.node(i)
194 n = cl.node(i)
195 changes = cl.read(n)
195 changes = cl.read(n)
196 hn = hex(n)
196 hn = hex(n)
197
197
198 l.insert(0, {"parity": parity,
198 l.insert(0, {"parity": parity,
199 "author": changes[1],
199 "author": changes[1],
200 "parent": self.siblings(cl.parents(n), cl.rev,
200 "parent": self.siblings(cl.parents(n), cl.rev,
201 cl.rev(n) - 1),
201 cl.rev(n) - 1),
202 "child": self.siblings(cl.children(n), cl.rev,
202 "child": self.siblings(cl.children(n), cl.rev,
203 cl.rev(n) + 1),
203 cl.rev(n) + 1),
204 "changelogtag": self.showtag("changelogtag",n),
204 "changelogtag": self.showtag("changelogtag",n),
205 "manifest": hex(changes[0]),
205 "manifest": hex(changes[0]),
206 "desc": changes[4],
206 "desc": changes[4],
207 "date": changes[2],
207 "date": changes[2],
208 "files": self.listfilediffs(changes[3], n),
208 "files": self.listfilediffs(changes[3], n),
209 "rev": i,
209 "rev": i,
210 "node": hn})
210 "node": hn})
211 parity = 1 - parity
211 parity = 1 - parity
212
212
213 for e in l:
213 for e in l:
214 yield e
214 yield e
215
215
216 maxchanges = shortlog and self.maxshortchanges or self.maxchanges
216 maxchanges = shortlog and self.maxshortchanges or self.maxchanges
217 cl = self.repo.changelog
217 cl = self.repo.changelog
218 mf = cl.read(cl.tip())[0]
218 mf = cl.read(cl.tip())[0]
219 count = cl.count()
219 count = cl.count()
220 start = max(0, pos - maxchanges + 1)
220 start = max(0, pos - maxchanges + 1)
221 end = min(count, start + maxchanges)
221 end = min(count, start + maxchanges)
222 pos = end - 1
222 pos = end - 1
223
223
224 yield self.t(shortlog and 'shortlog' or 'changelog',
224 yield self.t(shortlog and 'shortlog' or 'changelog',
225 changenav=changenav,
225 changenav=changenav,
226 manifest=hex(mf),
226 manifest=hex(mf),
227 rev=pos, changesets=count, entries=changelist,
227 rev=pos, changesets=count, entries=changelist,
228 archives=self.archivelist("tip"))
228 archives=self.archivelist("tip"))
229
229
230 def search(self, query):
230 def search(self, query):
231
231
232 def changelist(**map):
232 def changelist(**map):
233 cl = self.repo.changelog
233 cl = self.repo.changelog
234 count = 0
234 count = 0
235 qw = query.lower().split()
235 qw = query.lower().split()
236
236
237 def revgen():
237 def revgen():
238 for i in range(cl.count() - 1, 0, -100):
238 for i in range(cl.count() - 1, 0, -100):
239 l = []
239 l = []
240 for j in range(max(0, i - 100), i):
240 for j in range(max(0, i - 100), i):
241 n = cl.node(j)
241 n = cl.node(j)
242 changes = cl.read(n)
242 changes = cl.read(n)
243 l.append((n, j, changes))
243 l.append((n, j, changes))
244 l.reverse()
244 l.reverse()
245 for e in l:
245 for e in l:
246 yield e
246 yield e
247
247
248 for n, i, changes in revgen():
248 for n, i, changes in revgen():
249 miss = 0
249 miss = 0
250 for q in qw:
250 for q in qw:
251 if not (q in changes[1].lower() or
251 if not (q in changes[1].lower() or
252 q in changes[4].lower() or
252 q in changes[4].lower() or
253 q in " ".join(changes[3][:20]).lower()):
253 q in " ".join(changes[3][:20]).lower()):
254 miss = 1
254 miss = 1
255 break
255 break
256 if miss:
256 if miss:
257 continue
257 continue
258
258
259 count += 1
259 count += 1
260 hn = hex(n)
260 hn = hex(n)
261
261
262 yield self.t('searchentry',
262 yield self.t('searchentry',
263 parity=self.stripes(count),
263 parity=self.stripes(count),
264 author=changes[1],
264 author=changes[1],
265 parent=self.siblings(cl.parents(n), cl.rev),
265 parent=self.siblings(cl.parents(n), cl.rev),
266 child=self.siblings(cl.children(n), cl.rev),
266 child=self.siblings(cl.children(n), cl.rev),
267 changelogtag=self.showtag("changelogtag",n),
267 changelogtag=self.showtag("changelogtag",n),
268 manifest=hex(changes[0]),
268 manifest=hex(changes[0]),
269 desc=changes[4],
269 desc=changes[4],
270 date=changes[2],
270 date=changes[2],
271 files=self.listfilediffs(changes[3], n),
271 files=self.listfilediffs(changes[3], n),
272 rev=i,
272 rev=i,
273 node=hn)
273 node=hn)
274
274
275 if count >= self.maxchanges:
275 if count >= self.maxchanges:
276 break
276 break
277
277
278 cl = self.repo.changelog
278 cl = self.repo.changelog
279 mf = cl.read(cl.tip())[0]
279 mf = cl.read(cl.tip())[0]
280
280
281 yield self.t('search',
281 yield self.t('search',
282 query=query,
282 query=query,
283 manifest=hex(mf),
283 manifest=hex(mf),
284 entries=changelist)
284 entries=changelist)
285
285
286 def changeset(self, nodeid):
286 def changeset(self, nodeid):
287 cl = self.repo.changelog
287 cl = self.repo.changelog
288 n = self.repo.lookup(nodeid)
288 n = self.repo.lookup(nodeid)
289 nodeid = hex(n)
289 nodeid = hex(n)
290 changes = cl.read(n)
290 changes = cl.read(n)
291 p1 = cl.parents(n)[0]
291 p1 = cl.parents(n)[0]
292
292
293 files = []
293 files = []
294 mf = self.repo.manifest.read(changes[0])
294 mf = self.repo.manifest.read(changes[0])
295 for f in changes[3]:
295 for f in changes[3]:
296 files.append(self.t("filenodelink",
296 files.append(self.t("filenodelink",
297 filenode=hex(mf.get(f, nullid)), file=f))
297 filenode=hex(mf.get(f, nullid)), file=f))
298
298
299 def diff(**map):
299 def diff(**map):
300 yield self.diff(p1, n, None)
300 yield self.diff(p1, n, None)
301
301
302 yield self.t('changeset',
302 yield self.t('changeset',
303 diff=diff,
303 diff=diff,
304 rev=cl.rev(n),
304 rev=cl.rev(n),
305 node=nodeid,
305 node=nodeid,
306 parent=self.siblings(cl.parents(n), cl.rev),
306 parent=self.siblings(cl.parents(n), cl.rev),
307 child=self.siblings(cl.children(n), cl.rev),
307 child=self.siblings(cl.children(n), cl.rev),
308 changesettag=self.showtag("changesettag",n),
308 changesettag=self.showtag("changesettag",n),
309 manifest=hex(changes[0]),
309 manifest=hex(changes[0]),
310 author=changes[1],
310 author=changes[1],
311 desc=changes[4],
311 desc=changes[4],
312 date=changes[2],
312 date=changes[2],
313 files=files,
313 files=files,
314 archives=self.archivelist(nodeid))
314 archives=self.archivelist(nodeid))
315
315
316 def filelog(self, f, filenode):
316 def filelog(self, f, filenode):
317 cl = self.repo.changelog
317 cl = self.repo.changelog
318 fl = self.repo.file(f)
318 fl = self.repo.file(f)
319 filenode = hex(fl.lookup(filenode))
319 filenode = hex(fl.lookup(filenode))
320 count = fl.count()
320 count = fl.count()
321
321
322 def entries(**map):
322 def entries(**map):
323 l = []
323 l = []
324 parity = (count - 1) & 1
324 parity = (count - 1) & 1
325
325
326 for i in range(count):
326 for i in range(count):
327 n = fl.node(i)
327 n = fl.node(i)
328 lr = fl.linkrev(n)
328 lr = fl.linkrev(n)
329 cn = cl.node(lr)
329 cn = cl.node(lr)
330 cs = cl.read(cl.node(lr))
330 cs = cl.read(cl.node(lr))
331
331
332 l.insert(0, {"parity": parity,
332 l.insert(0, {"parity": parity,
333 "filenode": hex(n),
333 "filenode": hex(n),
334 "filerev": i,
334 "filerev": i,
335 "file": f,
335 "file": f,
336 "node": hex(cn),
336 "node": hex(cn),
337 "author": cs[1],
337 "author": cs[1],
338 "date": cs[2],
338 "date": cs[2],
339 "rename": self.renamelink(fl, n),
339 "rename": self.renamelink(fl, n),
340 "parent": self.siblings(fl.parents(n),
340 "parent": self.siblings(fl.parents(n),
341 fl.rev, file=f),
341 fl.rev, file=f),
342 "child": self.siblings(fl.children(n),
342 "child": self.siblings(fl.children(n),
343 fl.rev, file=f),
343 fl.rev, file=f),
344 "desc": cs[4]})
344 "desc": cs[4]})
345 parity = 1 - parity
345 parity = 1 - parity
346
346
347 for e in l:
347 for e in l:
348 yield e
348 yield e
349
349
350 yield self.t("filelog", file=f, filenode=filenode, entries=entries)
350 yield self.t("filelog", file=f, filenode=filenode, entries=entries)
351
351
352 def filerevision(self, f, node):
352 def filerevision(self, f, node):
353 fl = self.repo.file(f)
353 fl = self.repo.file(f)
354 n = fl.lookup(node)
354 n = fl.lookup(node)
355 node = hex(n)
355 node = hex(n)
356 text = fl.read(n)
356 text = fl.read(n)
357 changerev = fl.linkrev(n)
357 changerev = fl.linkrev(n)
358 cl = self.repo.changelog
358 cl = self.repo.changelog
359 cn = cl.node(changerev)
359 cn = cl.node(changerev)
360 cs = cl.read(cn)
360 cs = cl.read(cn)
361 mfn = cs[0]
361 mfn = cs[0]
362
362
363 mt = mimetypes.guess_type(f)[0]
363 mt = mimetypes.guess_type(f)[0]
364 rawtext = text
364 rawtext = text
365 if util.binary(text):
365 if util.binary(text):
366 mt = mt or 'application/octet-stream'
366 mt = mt or 'application/octet-stream'
367 text = "(binary:%s)" % mt
367 text = "(binary:%s)" % mt
368 mt = mt or 'text/plain'
368 mt = mt or 'text/plain'
369
369
370 def lines():
370 def lines():
371 for l, t in enumerate(text.splitlines(1)):
371 for l, t in enumerate(text.splitlines(1)):
372 yield {"line": t,
372 yield {"line": t,
373 "linenumber": "% 6d" % (l + 1),
373 "linenumber": "% 6d" % (l + 1),
374 "parity": self.stripes(l)}
374 "parity": self.stripes(l)}
375
375
376 yield self.t("filerevision",
376 yield self.t("filerevision",
377 file=f,
377 file=f,
378 filenode=node,
378 filenode=node,
379 path=_up(f),
379 path=_up(f),
380 text=lines(),
380 text=lines(),
381 raw=rawtext,
381 raw=rawtext,
382 mimetype=mt,
382 mimetype=mt,
383 rev=changerev,
383 rev=changerev,
384 node=hex(cn),
384 node=hex(cn),
385 manifest=hex(mfn),
385 manifest=hex(mfn),
386 author=cs[1],
386 author=cs[1],
387 date=cs[2],
387 date=cs[2],
388 parent=self.siblings(fl.parents(n), fl.rev, file=f),
388 parent=self.siblings(fl.parents(n), fl.rev, file=f),
389 child=self.siblings(fl.children(n), fl.rev, file=f),
389 child=self.siblings(fl.children(n), fl.rev, file=f),
390 rename=self.renamelink(fl, n),
390 rename=self.renamelink(fl, n),
391 permissions=self.repo.manifest.read(mfn).execf(f))
391 permissions=self.repo.manifest.read(mfn).execf(f))
392
392
393 def fileannotate(self, f, node):
393 def fileannotate(self, f, node):
394 bcache = {}
394 bcache = {}
395 ncache = {}
395 ncache = {}
396 fl = self.repo.file(f)
396 fl = self.repo.file(f)
397 n = fl.lookup(node)
397 n = fl.lookup(node)
398 node = hex(n)
398 node = hex(n)
399 changerev = fl.linkrev(n)
399 changerev = fl.linkrev(n)
400
400
401 cl = self.repo.changelog
401 cl = self.repo.changelog
402 cn = cl.node(changerev)
402 cn = cl.node(changerev)
403 cs = cl.read(cn)
403 cs = cl.read(cn)
404 mfn = cs[0]
404 mfn = cs[0]
405
405
406 def annotate(**map):
406 def annotate(**map):
407 parity = 0
407 parity = 0
408 last = None
408 last = None
409 for r, l in fl.annotate(n):
409 for r, l in fl.annotate(n):
410 try:
410 try:
411 cnode = ncache[r]
411 cnode = ncache[r]
412 except KeyError:
412 except KeyError:
413 cnode = ncache[r] = self.repo.changelog.node(r)
413 cnode = ncache[r] = self.repo.changelog.node(r)
414
414
415 try:
415 try:
416 name = bcache[r]
416 name = bcache[r]
417 except KeyError:
417 except KeyError:
418 cl = self.repo.changelog.read(cnode)
418 cl = self.repo.changelog.read(cnode)
419 bcache[r] = name = self.repo.ui.shortuser(cl[1])
419 bcache[r] = name = self.repo.ui.shortuser(cl[1])
420
420
421 if last != cnode:
421 if last != cnode:
422 parity = 1 - parity
422 parity = 1 - parity
423 last = cnode
423 last = cnode
424
424
425 yield {"parity": parity,
425 yield {"parity": parity,
426 "node": hex(cnode),
426 "node": hex(cnode),
427 "rev": r,
427 "rev": r,
428 "author": name,
428 "author": name,
429 "file": f,
429 "file": f,
430 "line": l}
430 "line": l}
431
431
432 yield self.t("fileannotate",
432 yield self.t("fileannotate",
433 file=f,
433 file=f,
434 filenode=node,
434 filenode=node,
435 annotate=annotate,
435 annotate=annotate,
436 path=_up(f),
436 path=_up(f),
437 rev=changerev,
437 rev=changerev,
438 node=hex(cn),
438 node=hex(cn),
439 manifest=hex(mfn),
439 manifest=hex(mfn),
440 author=cs[1],
440 author=cs[1],
441 date=cs[2],
441 date=cs[2],
442 rename=self.renamelink(fl, n),
442 rename=self.renamelink(fl, n),
443 parent=self.siblings(fl.parents(n), fl.rev, file=f),
443 parent=self.siblings(fl.parents(n), fl.rev, file=f),
444 child=self.siblings(fl.children(n), fl.rev, file=f),
444 child=self.siblings(fl.children(n), fl.rev, file=f),
445 permissions=self.repo.manifest.read(mfn).execf(f))
445 permissions=self.repo.manifest.read(mfn).execf(f))
446
446
447 def manifest(self, mnode, path):
447 def manifest(self, mnode, path):
448 man = self.repo.manifest
448 man = self.repo.manifest
449 mn = man.lookup(mnode)
449 mn = man.lookup(mnode)
450 mnode = hex(mn)
450 mnode = hex(mn)
451 mf = man.read(mn)
451 mf = man.read(mn)
452 rev = man.rev(mn)
452 rev = man.rev(mn)
453 changerev = man.linkrev(mn)
453 changerev = man.linkrev(mn)
454 node = self.repo.changelog.node(changerev)
454 node = self.repo.changelog.node(changerev)
455
455
456 files = {}
456 files = {}
457
457
458 p = path[1:]
458 p = path[1:]
459 if p and p[-1] != "/":
459 if p and p[-1] != "/":
460 p += "/"
460 p += "/"
461 l = len(p)
461 l = len(p)
462
462
463 for f,n in mf.items():
463 for f,n in mf.items():
464 if f[:l] != p:
464 if f[:l] != p:
465 continue
465 continue
466 remain = f[l:]
466 remain = f[l:]
467 if "/" in remain:
467 if "/" in remain:
468 short = remain[:remain.index("/") + 1] # bleah
468 short = remain[:remain.index("/") + 1] # bleah
469 files[short] = (f, None)
469 files[short] = (f, None)
470 else:
470 else:
471 short = os.path.basename(remain)
471 short = os.path.basename(remain)
472 files[short] = (f, n)
472 files[short] = (f, n)
473
473
474 def filelist(**map):
474 def filelist(**map):
475 parity = 0
475 parity = 0
476 fl = files.keys()
476 fl = files.keys()
477 fl.sort()
477 fl.sort()
478 for f in fl:
478 for f in fl:
479 full, fnode = files[f]
479 full, fnode = files[f]
480 if not fnode:
480 if not fnode:
481 continue
481 continue
482
482
483 yield {"file": full,
483 yield {"file": full,
484 "manifest": mnode,
484 "manifest": mnode,
485 "filenode": hex(fnode),
485 "filenode": hex(fnode),
486 "parity": self.stripes(parity),
486 "parity": self.stripes(parity),
487 "basename": f,
487 "basename": f,
488 "permissions": mf.execf(full)}
488 "permissions": mf.execf(full)}
489 parity += 1
489 parity += 1
490
490
491 def dirlist(**map):
491 def dirlist(**map):
492 parity = 0
492 parity = 0
493 fl = files.keys()
493 fl = files.keys()
494 fl.sort()
494 fl.sort()
495 for f in fl:
495 for f in fl:
496 full, fnode = files[f]
496 full, fnode = files[f]
497 if fnode:
497 if fnode:
498 continue
498 continue
499
499
500 yield {"parity": self.stripes(parity),
500 yield {"parity": self.stripes(parity),
501 "path": os.path.join(path, f),
501 "path": os.path.join(path, f),
502 "manifest": mnode,
502 "manifest": mnode,
503 "basename": f[:-1]}
503 "basename": f[:-1]}
504 parity += 1
504 parity += 1
505
505
506 yield self.t("manifest",
506 yield self.t("manifest",
507 manifest=mnode,
507 manifest=mnode,
508 rev=rev,
508 rev=rev,
509 node=hex(node),
509 node=hex(node),
510 path=path,
510 path=path,
511 up=_up(path),
511 up=_up(path),
512 fentries=filelist,
512 fentries=filelist,
513 dentries=dirlist,
513 dentries=dirlist,
514 archives=self.archivelist(hex(node)))
514 archives=self.archivelist(hex(node)))
515
515
516 def tags(self):
516 def tags(self):
517 cl = self.repo.changelog
517 cl = self.repo.changelog
518 mf = cl.read(cl.tip())[0]
518 mf = cl.read(cl.tip())[0]
519
519
520 i = self.repo.tagslist()
520 i = self.repo.tagslist()
521 i.reverse()
521 i.reverse()
522
522
523 def entries(notip=False, **map):
523 def entries(notip=False, **map):
524 parity = 0
524 parity = 0
525 for k,n in i:
525 for k,n in i:
526 if notip and k == "tip": continue
526 if notip and k == "tip": continue
527 yield {"parity": self.stripes(parity),
527 yield {"parity": self.stripes(parity),
528 "tag": k,
528 "tag": k,
529 "tagmanifest": hex(cl.read(n)[0]),
529 "tagmanifest": hex(cl.read(n)[0]),
530 "date": cl.read(n)[2],
530 "date": cl.read(n)[2],
531 "node": hex(n)}
531 "node": hex(n)}
532 parity += 1
532 parity += 1
533
533
534 yield self.t("tags",
534 yield self.t("tags",
535 manifest=hex(mf),
535 manifest=hex(mf),
536 entries=lambda **x: entries(False, **x),
536 entries=lambda **x: entries(False, **x),
537 entriesnotip=lambda **x: entries(True, **x))
537 entriesnotip=lambda **x: entries(True, **x))
538
538
539 def summary(self):
539 def summary(self):
540 cl = self.repo.changelog
540 cl = self.repo.changelog
541 mf = cl.read(cl.tip())[0]
541 mf = cl.read(cl.tip())[0]
542
542
543 i = self.repo.tagslist()
543 i = self.repo.tagslist()
544 i.reverse()
544 i.reverse()
545
545
546 def tagentries(**map):
546 def tagentries(**map):
547 parity = 0
547 parity = 0
548 count = 0
548 count = 0
549 for k,n in i:
549 for k,n in i:
550 if k == "tip": # skip tip
550 if k == "tip": # skip tip
551 continue;
551 continue;
552
552
553 count += 1
553 count += 1
554 if count > 10: # limit to 10 tags
554 if count > 10: # limit to 10 tags
555 break;
555 break;
556
556
557 c = cl.read(n)
557 c = cl.read(n)
558 m = c[0]
558 m = c[0]
559 t = c[2]
559 t = c[2]
560
560
561 yield self.t("tagentry",
561 yield self.t("tagentry",
562 parity = self.stripes(parity),
562 parity = self.stripes(parity),
563 tag = k,
563 tag = k,
564 node = hex(n),
564 node = hex(n),
565 date = t,
565 date = t,
566 tagmanifest = hex(m))
566 tagmanifest = hex(m))
567 parity += 1
567 parity += 1
568
568
569 def changelist(**map):
569 def changelist(**map):
570 parity = 0
570 parity = 0
571 cl = self.repo.changelog
571 cl = self.repo.changelog
572 l = [] # build a list in forward order for efficiency
572 l = [] # build a list in forward order for efficiency
573 for i in range(start, end):
573 for i in range(start, end):
574 n = cl.node(i)
574 n = cl.node(i)
575 changes = cl.read(n)
575 changes = cl.read(n)
576 hn = hex(n)
576 hn = hex(n)
577 t = changes[2]
577 t = changes[2]
578
578
579 l.insert(0, self.t(
579 l.insert(0, self.t(
580 'shortlogentry',
580 'shortlogentry',
581 parity = parity,
581 parity = parity,
582 author = changes[1],
582 author = changes[1],
583 manifest = hex(changes[0]),
583 manifest = hex(changes[0]),
584 desc = changes[4],
584 desc = changes[4],
585 date = t,
585 date = t,
586 rev = i,
586 rev = i,
587 node = hn))
587 node = hn))
588 parity = 1 - parity
588 parity = 1 - parity
589
589
590 yield l
590 yield l
591
591
592 cl = self.repo.changelog
592 cl = self.repo.changelog
593 mf = cl.read(cl.tip())[0]
593 mf = cl.read(cl.tip())[0]
594 count = cl.count()
594 count = cl.count()
595 start = max(0, count - self.maxchanges)
595 start = max(0, count - self.maxchanges)
596 end = min(count, start + self.maxchanges)
596 end = min(count, start + self.maxchanges)
597
597
598 yield self.t("summary",
598 yield self.t("summary",
599 desc = self.repo.ui.config("web", "description", "unknown"),
599 desc = self.repo.ui.config("web", "description", "unknown"),
600 owner = (self.repo.ui.config("ui", "username") or # preferred
600 owner = (self.repo.ui.config("ui", "username") or # preferred
601 self.repo.ui.config("web", "contact") or # deprecated
601 self.repo.ui.config("web", "contact") or # deprecated
602 self.repo.ui.config("web", "author", "unknown")), # also
602 self.repo.ui.config("web", "author", "unknown")), # also
603 lastchange = (0, 0), # FIXME
603 lastchange = (0, 0), # FIXME
604 manifest = hex(mf),
604 manifest = hex(mf),
605 tags = tagentries,
605 tags = tagentries,
606 shortlog = changelist,
606 shortlog = changelist,
607 archives=self.archivelist("tip"))
607 archives=self.archivelist("tip"))
608
608
609 def filediff(self, file, changeset):
609 def filediff(self, file, changeset):
610 cl = self.repo.changelog
610 cl = self.repo.changelog
611 n = self.repo.lookup(changeset)
611 n = self.repo.lookup(changeset)
612 changeset = hex(n)
612 changeset = hex(n)
613 p1 = cl.parents(n)[0]
613 p1 = cl.parents(n)[0]
614 cs = cl.read(n)
614 cs = cl.read(n)
615 mf = self.repo.manifest.read(cs[0])
615 mf = self.repo.manifest.read(cs[0])
616
616
617 def diff(**map):
617 def diff(**map):
618 yield self.diff(p1, n, [file])
618 yield self.diff(p1, n, [file])
619
619
620 yield self.t("filediff",
620 yield self.t("filediff",
621 file=file,
621 file=file,
622 filenode=hex(mf.get(file, nullid)),
622 filenode=hex(mf.get(file, nullid)),
623 node=changeset,
623 node=changeset,
624 rev=self.repo.changelog.rev(n),
624 rev=self.repo.changelog.rev(n),
625 parent=self.siblings(cl.parents(n), cl.rev),
625 parent=self.siblings(cl.parents(n), cl.rev),
626 child=self.siblings(cl.children(n), cl.rev),
626 child=self.siblings(cl.children(n), cl.rev),
627 diff=diff)
627 diff=diff)
628
628
629 archive_specs = {
629 archive_specs = {
630 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
630 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
631 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
631 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
632 'zip': ('application/zip', 'zip', '.zip', None),
632 'zip': ('application/zip', 'zip', '.zip', None),
633 }
633 }
634
634
635 def archive(self, req, cnode, type_):
635 def archive(self, req, cnode, type_):
636 reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame))
636 reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame))
637 name = "%s-%s" % (reponame, short(cnode))
637 name = "%s-%s" % (reponame, short(cnode))
638 mimetype, artype, extension, encoding = self.archive_specs[type_]
638 mimetype, artype, extension, encoding = self.archive_specs[type_]
639 headers = [('Content-type', mimetype),
639 headers = [('Content-type', mimetype),
640 ('Content-disposition', 'attachment; filename=%s%s' %
640 ('Content-disposition', 'attachment; filename=%s%s' %
641 (name, extension))]
641 (name, extension))]
642 if encoding:
642 if encoding:
643 headers.append(('Content-encoding', encoding))
643 headers.append(('Content-encoding', encoding))
644 req.header(headers)
644 req.header(headers)
645 archival.archive(self.repo, req.out, cnode, artype, prefix=name)
645 archival.archive(self.repo, req.out, cnode, artype, prefix=name)
646
646
647 # add tags to things
647 # add tags to things
648 # tags -> list of changesets corresponding to tags
648 # tags -> list of changesets corresponding to tags
649 # find tag, changeset, file
649 # find tag, changeset, file
650
650
651 def cleanpath(self, path):
651 def cleanpath(self, path):
652 p = util.normpath(path)
652 p = util.normpath(path)
653 if p[:2] == "..":
653 if p[:2] == "..":
654 raise Exception("suspicious path")
654 raise Exception("suspicious path")
655 return p
655 return p
656
656
657 def run(self):
657 def run(self):
658 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
658 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
659 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
659 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
660 import mercurial.hgweb.wsgicgi as wsgicgi
660 import mercurial.hgweb.wsgicgi as wsgicgi
661 from request import wsgiapplication
661 from request import wsgiapplication
662 def make_web_app():
662 def make_web_app():
663 return self
663 return self
664 wsgicgi.launch(wsgiapplication(make_web_app))
664 wsgicgi.launch(wsgiapplication(make_web_app))
665
665
666 def run_wsgi(self, req):
666 def run_wsgi(self, req):
667 def header(**map):
667 def header(**map):
668 header_file = cStringIO.StringIO(''.join(self.t("header", **map)))
668 header_file = cStringIO.StringIO(''.join(self.t("header", **map)))
669 msg = mimetools.Message(header_file, 0)
669 msg = mimetools.Message(header_file, 0)
670 req.header(msg.items())
670 req.header(msg.items())
671 yield header_file.read()
671 yield header_file.read()
672
672
673 def rawfileheader(**map):
673 def rawfileheader(**map):
674 req.header([('Content-type', map['mimetype']),
674 req.header([('Content-type', map['mimetype']),
675 ('Content-disposition', 'filename=%s' % map['file']),
675 ('Content-disposition', 'filename=%s' % map['file']),
676 ('Content-length', str(len(map['raw'])))])
676 ('Content-length', str(len(map['raw'])))])
677 yield ''
677 yield ''
678
678
679 def footer(**map):
679 def footer(**map):
680 yield self.t("footer",
680 yield self.t("footer",
681 motd=self.repo.ui.config("web", "motd", ""),
681 motd=self.repo.ui.config("web", "motd", ""),
682 **map)
682 **map)
683
683
684 def expand_form(form):
684 def expand_form(form):
685 shortcuts = {
685 shortcuts = {
686 'cl': [('cmd', ['changelog']), ('rev', None)],
686 'cl': [('cmd', ['changelog']), ('rev', None)],
687 'sl': [('cmd', ['shortlog']), ('rev', None)],
687 'sl': [('cmd', ['shortlog']), ('rev', None)],
688 'cs': [('cmd', ['changeset']), ('node', None)],
688 'cs': [('cmd', ['changeset']), ('node', None)],
689 'f': [('cmd', ['file']), ('filenode', None)],
689 'f': [('cmd', ['file']), ('filenode', None)],
690 'fl': [('cmd', ['filelog']), ('filenode', None)],
690 'fl': [('cmd', ['filelog']), ('filenode', None)],
691 'fd': [('cmd', ['filediff']), ('node', None)],
691 'fd': [('cmd', ['filediff']), ('node', None)],
692 'fa': [('cmd', ['annotate']), ('filenode', None)],
692 'fa': [('cmd', ['annotate']), ('filenode', None)],
693 'mf': [('cmd', ['manifest']), ('manifest', None)],
693 'mf': [('cmd', ['manifest']), ('manifest', None)],
694 'ca': [('cmd', ['archive']), ('node', None)],
694 'ca': [('cmd', ['archive']), ('node', None)],
695 'tags': [('cmd', ['tags'])],
695 'tags': [('cmd', ['tags'])],
696 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
696 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
697 'static': [('cmd', ['static']), ('file', None)]
697 'static': [('cmd', ['static']), ('file', None)]
698 }
698 }
699
699
700 for k in shortcuts.iterkeys():
700 for k in shortcuts.iterkeys():
701 if form.has_key(k):
701 if form.has_key(k):
702 for name, value in shortcuts[k]:
702 for name, value in shortcuts[k]:
703 if value is None:
703 if value is None:
704 value = form[k]
704 value = form[k]
705 form[name] = value
705 form[name] = value
706 del form[k]
706 del form[k]
707
707
708 self.refresh()
708 self.refresh()
709
709
710 expand_form(req.form)
710 expand_form(req.form)
711
711
712 m = os.path.join(self.templatepath, "map")
712 m = os.path.join(self.templatepath, "map")
713 style = self.repo.ui.config("web", "style", "")
713 style = self.repo.ui.config("web", "style", "")
714 if req.form.has_key('style'):
714 if req.form.has_key('style'):
715 style = req.form['style'][0]
715 style = req.form['style'][0]
716 if style:
716 if style:
717 b = os.path.basename("map-" + style)
717 b = os.path.basename("map-" + style)
718 p = os.path.join(self.templatepath, b)
718 p = os.path.join(self.templatepath, b)
719 if os.path.isfile(p):
719 if os.path.isfile(p):
720 m = p
720 m = p
721
721
722 port = req.env["SERVER_PORT"]
722 port = req.env["SERVER_PORT"]
723 port = port != "80" and (":" + port) or ""
723 port = port != "80" and (":" + port) or ""
724 uri = req.env["REQUEST_URI"]
724 uri = req.env["REQUEST_URI"]
725 if "?" in uri:
725 if "?" in uri:
726 uri = uri.split("?")[0]
726 uri = uri.split("?")[0]
727 url = "http://%s%s%s" % (req.env["SERVER_NAME"], port, uri)
727 url = "http://%s%s%s" % (req.env["SERVER_NAME"], port, uri)
728 if not self.reponame:
728 if not self.reponame:
729 self.reponame = (self.repo.ui.config("web", "name")
729 self.reponame = (self.repo.ui.config("web", "name")
730 or uri.strip('/') or self.repo.root)
730 or uri.strip('/') or self.repo.root)
731
731
732 self.t = templater.templater(m, templater.common_filters,
732 self.t = templater.templater(m, templater.common_filters,
733 defaults={"url": url,
733 defaults={"url": url,
734 "repo": self.reponame,
734 "repo": self.reponame,
735 "header": header,
735 "header": header,
736 "footer": footer,
736 "footer": footer,
737 "rawfileheader": rawfileheader,
737 "rawfileheader": rawfileheader,
738 })
738 })
739
739
740 if not req.form.has_key('cmd'):
740 if not req.form.has_key('cmd'):
741 req.form['cmd'] = [self.t.cache['default'],]
741 req.form['cmd'] = [self.t.cache['default'],]
742
742
743 cmd = req.form['cmd'][0]
743 cmd = req.form['cmd'][0]
744
744
745 method = getattr(self, 'do_' + cmd, None)
745 method = getattr(self, 'do_' + cmd, None)
746 if method:
746 if method:
747 method(req)
747 method(req)
748 else:
748 else:
749 req.write(self.t("error"))
749 req.write(self.t("error"))
750
750
751 def stripes(self, parity):
751 def stripes(self, parity):
752 "make horizontal stripes for easier reading"
752 "make horizontal stripes for easier reading"
753 if self.stripecount:
753 if self.stripecount:
754 return (1 + parity / self.stripecount) & 1
754 return (1 + parity / self.stripecount) & 1
755 else:
755 else:
756 return 0
756 return 0
757
757
758 def do_changelog(self, req):
758 def do_changelog(self, req):
759 hi = self.repo.changelog.count() - 1
759 hi = self.repo.changelog.count() - 1
760 if req.form.has_key('rev'):
760 if req.form.has_key('rev'):
761 hi = req.form['rev'][0]
761 hi = req.form['rev'][0]
762 try:
762 try:
763 hi = self.repo.changelog.rev(self.repo.lookup(hi))
763 hi = self.repo.changelog.rev(self.repo.lookup(hi))
764 except hg.RepoError:
764 except hg.RepoError:
765 req.write(self.search(hi)) # XXX redirect to 404 page?
765 req.write(self.search(hi)) # XXX redirect to 404 page?
766 return
766 return
767
767
768 req.write(self.changelog(hi))
768 req.write(self.changelog(hi))
769
769
770 def do_shortlog(self, req):
770 def do_shortlog(self, req):
771 hi = self.repo.changelog.count() - 1
771 hi = self.repo.changelog.count() - 1
772 if req.form.has_key('rev'):
772 if req.form.has_key('rev'):
773 hi = req.form['rev'][0]
773 hi = req.form['rev'][0]
774 try:
774 try:
775 hi = self.repo.changelog.rev(self.repo.lookup(hi))
775 hi = self.repo.changelog.rev(self.repo.lookup(hi))
776 except hg.RepoError:
776 except hg.RepoError:
777 req.write(self.search(hi)) # XXX redirect to 404 page?
777 req.write(self.search(hi)) # XXX redirect to 404 page?
778 return
778 return
779
779
780 req.write(self.changelog(hi, shortlog = True))
780 req.write(self.changelog(hi, shortlog = True))
781
781
782 def do_changeset(self, req):
782 def do_changeset(self, req):
783 req.write(self.changeset(req.form['node'][0]))
783 req.write(self.changeset(req.form['node'][0]))
784
784
785 def do_manifest(self, req):
785 def do_manifest(self, req):
786 req.write(self.manifest(req.form['manifest'][0],
786 req.write(self.manifest(req.form['manifest'][0],
787 self.cleanpath(req.form['path'][0])))
787 self.cleanpath(req.form['path'][0])))
788
788
789 def do_tags(self, req):
789 def do_tags(self, req):
790 req.write(self.tags())
790 req.write(self.tags())
791
791
792 def do_summary(self, req):
792 def do_summary(self, req):
793 req.write(self.summary())
793 req.write(self.summary())
794
794
795 def do_filediff(self, req):
795 def do_filediff(self, req):
796 req.write(self.filediff(self.cleanpath(req.form['file'][0]),
796 req.write(self.filediff(self.cleanpath(req.form['file'][0]),
797 req.form['node'][0]))
797 req.form['node'][0]))
798
798
799 def do_file(self, req):
799 def do_file(self, req):
800 req.write(self.filerevision(self.cleanpath(req.form['file'][0]),
800 req.write(self.filerevision(self.cleanpath(req.form['file'][0]),
801 req.form['filenode'][0]))
801 req.form['filenode'][0]))
802
802
803 def do_annotate(self, req):
803 def do_annotate(self, req):
804 req.write(self.fileannotate(self.cleanpath(req.form['file'][0]),
804 req.write(self.fileannotate(self.cleanpath(req.form['file'][0]),
805 req.form['filenode'][0]))
805 req.form['filenode'][0]))
806
806
807 def do_filelog(self, req):
807 def do_filelog(self, req):
808 req.write(self.filelog(self.cleanpath(req.form['file'][0]),
808 req.write(self.filelog(self.cleanpath(req.form['file'][0]),
809 req.form['filenode'][0]))
809 req.form['filenode'][0]))
810
810
811 def do_heads(self, req):
811 def do_heads(self, req):
812 resp = " ".join(map(hex, self.repo.heads())) + "\n"
812 resp = " ".join(map(hex, self.repo.heads())) + "\n"
813 req.httphdr("application/mercurial-0.1", length=len(resp))
813 req.httphdr("application/mercurial-0.1", length=len(resp))
814 req.write(resp)
814 req.write(resp)
815
815
816 def do_branches(self, req):
816 def do_branches(self, req):
817 nodes = []
817 nodes = []
818 if req.form.has_key('nodes'):
818 if req.form.has_key('nodes'):
819 nodes = map(bin, req.form['nodes'][0].split(" "))
819 nodes = map(bin, req.form['nodes'][0].split(" "))
820 resp = cStringIO.StringIO()
820 resp = cStringIO.StringIO()
821 for b in self.repo.branches(nodes):
821 for b in self.repo.branches(nodes):
822 resp.write(" ".join(map(hex, b)) + "\n")
822 resp.write(" ".join(map(hex, b)) + "\n")
823 resp = resp.getvalue()
823 resp = resp.getvalue()
824 req.httphdr("application/mercurial-0.1", length=len(resp))
824 req.httphdr("application/mercurial-0.1", length=len(resp))
825 req.write(resp)
825 req.write(resp)
826
826
827 def do_between(self, req):
827 def do_between(self, req):
828 nodes = []
829 if req.form.has_key('pairs'):
828 if req.form.has_key('pairs'):
830 pairs = [map(bin, p.split("-"))
829 pairs = [map(bin, p.split("-"))
831 for p in req.form['pairs'][0].split(" ")]
830 for p in req.form['pairs'][0].split(" ")]
832 resp = cStringIO.StringIO()
831 resp = cStringIO.StringIO()
833 for b in self.repo.between(pairs):
832 for b in self.repo.between(pairs):
834 resp.write(" ".join(map(hex, b)) + "\n")
833 resp.write(" ".join(map(hex, b)) + "\n")
835 resp = resp.getvalue()
834 resp = resp.getvalue()
836 req.httphdr("application/mercurial-0.1", length=len(resp))
835 req.httphdr("application/mercurial-0.1", length=len(resp))
837 req.write(resp)
836 req.write(resp)
838
837
839 def do_changegroup(self, req):
838 def do_changegroup(self, req):
840 req.httphdr("application/mercurial-0.1")
839 req.httphdr("application/mercurial-0.1")
841 nodes = []
840 nodes = []
842 if not self.allowpull:
841 if not self.allowpull:
843 return
842 return
844
843
845 if req.form.has_key('roots'):
844 if req.form.has_key('roots'):
846 nodes = map(bin, req.form['roots'][0].split(" "))
845 nodes = map(bin, req.form['roots'][0].split(" "))
847
846
848 z = zlib.compressobj()
847 z = zlib.compressobj()
849 f = self.repo.changegroup(nodes, 'serve')
848 f = self.repo.changegroup(nodes, 'serve')
850 while 1:
849 while 1:
851 chunk = f.read(4096)
850 chunk = f.read(4096)
852 if not chunk:
851 if not chunk:
853 break
852 break
854 req.write(z.compress(chunk))
853 req.write(z.compress(chunk))
855
854
856 req.write(z.flush())
855 req.write(z.flush())
857
856
858 def do_archive(self, req):
857 def do_archive(self, req):
859 changeset = self.repo.lookup(req.form['node'][0])
858 changeset = self.repo.lookup(req.form['node'][0])
860 type_ = req.form['type'][0]
859 type_ = req.form['type'][0]
861 allowed = self.repo.ui.configlist("web", "allow_archive")
860 allowed = self.repo.ui.configlist("web", "allow_archive")
862 if (type_ in self.archives and (type_ in allowed or
861 if (type_ in self.archives and (type_ in allowed or
863 self.repo.ui.configbool("web", "allow" + type_, False))):
862 self.repo.ui.configbool("web", "allow" + type_, False))):
864 self.archive(req, changeset, type_)
863 self.archive(req, changeset, type_)
865 return
864 return
866
865
867 req.write(self.t("error"))
866 req.write(self.t("error"))
868
867
869 def do_static(self, req):
868 def do_static(self, req):
870 fname = req.form['file'][0]
869 fname = req.form['file'][0]
871 static = self.repo.ui.config("web", "static",
870 static = self.repo.ui.config("web", "static",
872 os.path.join(self.templatepath,
871 os.path.join(self.templatepath,
873 "static"))
872 "static"))
874 req.write(staticfile(static, fname, req)
873 req.write(staticfile(static, fname, req)
875 or self.t("error", error="%r not found" % fname))
874 or self.t("error", error="%r not found" % fname))
876
875
877 def do_capabilities(self, req):
876 def do_capabilities(self, req):
878 caps = ['unbundle']
877 caps = ['unbundle']
879 if self.repo.ui.configbool('server', 'uncompressed'):
878 if self.repo.ui.configbool('server', 'uncompressed'):
880 caps.append('stream=%d' % self.repo.revlogversion)
879 caps.append('stream=%d' % self.repo.revlogversion)
881 resp = ' '.join(caps)
880 resp = ' '.join(caps)
882 req.httphdr("application/mercurial-0.1", length=len(resp))
881 req.httphdr("application/mercurial-0.1", length=len(resp))
883 req.write(resp)
882 req.write(resp)
884
883
885 def check_perm(self, req, op, default):
884 def check_perm(self, req, op, default):
886 '''check permission for operation based on user auth.
885 '''check permission for operation based on user auth.
887 return true if op allowed, else false.
886 return true if op allowed, else false.
888 default is policy to use if no config given.'''
887 default is policy to use if no config given.'''
889
888
890 user = req.env.get('REMOTE_USER')
889 user = req.env.get('REMOTE_USER')
891
890
892 deny = self.repo.ui.configlist('web', 'deny_' + op)
891 deny = self.repo.ui.configlist('web', 'deny_' + op)
893 if deny and (not user or deny == ['*'] or user in deny):
892 if deny and (not user or deny == ['*'] or user in deny):
894 return False
893 return False
895
894
896 allow = self.repo.ui.configlist('web', 'allow_' + op)
895 allow = self.repo.ui.configlist('web', 'allow_' + op)
897 return (allow and (allow == ['*'] or user in allow)) or default
896 return (allow and (allow == ['*'] or user in allow)) or default
898
897
899 def do_unbundle(self, req):
898 def do_unbundle(self, req):
900 def bail(response, headers={}):
899 def bail(response, headers={}):
901 length = int(req.env['CONTENT_LENGTH'])
900 length = int(req.env['CONTENT_LENGTH'])
902 for s in util.filechunkiter(req, limit=length):
901 for s in util.filechunkiter(req, limit=length):
903 # drain incoming bundle, else client will not see
902 # drain incoming bundle, else client will not see
904 # response when run outside cgi script
903 # response when run outside cgi script
905 pass
904 pass
906 req.httphdr("application/mercurial-0.1", headers=headers)
905 req.httphdr("application/mercurial-0.1", headers=headers)
907 req.write('0\n')
906 req.write('0\n')
908 req.write(response)
907 req.write(response)
909
908
910 # require ssl by default, auth info cannot be sniffed and
909 # require ssl by default, auth info cannot be sniffed and
911 # replayed
910 # replayed
912 ssl_req = self.repo.ui.configbool('web', 'push_ssl', True)
911 ssl_req = self.repo.ui.configbool('web', 'push_ssl', True)
913 if ssl_req:
912 if ssl_req:
914 if not req.env.get('HTTPS'):
913 if not req.env.get('HTTPS'):
915 bail(_('ssl required\n'))
914 bail(_('ssl required\n'))
916 return
915 return
917 proto = 'https'
916 proto = 'https'
918 else:
917 else:
919 proto = 'http'
918 proto = 'http'
920
919
921 # do not allow push unless explicitly allowed
920 # do not allow push unless explicitly allowed
922 if not self.check_perm(req, 'push', False):
921 if not self.check_perm(req, 'push', False):
923 bail(_('push not authorized\n'),
922 bail(_('push not authorized\n'),
924 headers={'status': '401 Unauthorized'})
923 headers={'status': '401 Unauthorized'})
925 return
924 return
926
925
927 req.httphdr("application/mercurial-0.1")
926 req.httphdr("application/mercurial-0.1")
928
927
929 their_heads = req.form['heads'][0].split(' ')
928 their_heads = req.form['heads'][0].split(' ')
930
929
931 def check_heads():
930 def check_heads():
932 heads = map(hex, self.repo.heads())
931 heads = map(hex, self.repo.heads())
933 return their_heads == [hex('force')] or their_heads == heads
932 return their_heads == [hex('force')] or their_heads == heads
934
933
935 # fail early if possible
934 # fail early if possible
936 if not check_heads():
935 if not check_heads():
937 bail(_('unsynced changes\n'))
936 bail(_('unsynced changes\n'))
938 return
937 return
939
938
940 # do not lock repo until all changegroup data is
939 # do not lock repo until all changegroup data is
941 # streamed. save to temporary file.
940 # streamed. save to temporary file.
942
941
943 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
942 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
944 fp = os.fdopen(fd, 'wb+')
943 fp = os.fdopen(fd, 'wb+')
945 try:
944 try:
946 length = int(req.env['CONTENT_LENGTH'])
945 length = int(req.env['CONTENT_LENGTH'])
947 for s in util.filechunkiter(req, limit=length):
946 for s in util.filechunkiter(req, limit=length):
948 fp.write(s)
947 fp.write(s)
949
948
950 lock = self.repo.lock()
949 lock = self.repo.lock()
951 try:
950 try:
952 if not check_heads():
951 if not check_heads():
953 req.write('0\n')
952 req.write('0\n')
954 req.write(_('unsynced changes\n'))
953 req.write(_('unsynced changes\n'))
955 return
954 return
956
955
957 fp.seek(0)
956 fp.seek(0)
958
957
959 # send addchangegroup output to client
958 # send addchangegroup output to client
960
959
961 old_stdout = sys.stdout
960 old_stdout = sys.stdout
962 sys.stdout = cStringIO.StringIO()
961 sys.stdout = cStringIO.StringIO()
963
962
964 try:
963 try:
965 url = 'remote:%s:%s' % (proto,
964 url = 'remote:%s:%s' % (proto,
966 req.env.get('REMOTE_HOST', ''))
965 req.env.get('REMOTE_HOST', ''))
967 ret = self.repo.addchangegroup(fp, 'serve', url)
966 ret = self.repo.addchangegroup(fp, 'serve', url)
968 finally:
967 finally:
969 val = sys.stdout.getvalue()
968 val = sys.stdout.getvalue()
970 sys.stdout = old_stdout
969 sys.stdout = old_stdout
971 req.write('%d\n' % ret)
970 req.write('%d\n' % ret)
972 req.write(val)
971 req.write(val)
973 finally:
972 finally:
974 lock.release()
973 lock.release()
975 finally:
974 finally:
976 fp.close()
975 fp.close()
977 os.unlink(tempname)
976 os.unlink(tempname)
978
977
979 def do_stream_out(self, req):
978 def do_stream_out(self, req):
980 req.httphdr("application/mercurial-0.1")
979 req.httphdr("application/mercurial-0.1")
981 streamclone.stream_out(self.repo, req)
980 streamclone.stream_out(self.repo, req)
@@ -1,352 +1,351 b''
1 # httprepo.py - HTTP repository proxy classes for mercurial
1 # httprepo.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from remoterepo import *
10 from remoterepo import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "hg os urllib urllib2 urlparse zlib util httplib")
13 demandload(globals(), "hg os urllib urllib2 urlparse zlib util httplib")
14 demandload(globals(), "errno keepalive tempfile socket")
14 demandload(globals(), "errno keepalive tempfile socket")
15
15
16 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
16 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
17 def __init__(self, ui):
17 def __init__(self, ui):
18 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
18 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
19 self.ui = ui
19 self.ui = ui
20
20
21 def find_user_password(self, realm, authuri):
21 def find_user_password(self, realm, authuri):
22 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
22 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
23 self, realm, authuri)
23 self, realm, authuri)
24 user, passwd = authinfo
24 user, passwd = authinfo
25 if user and passwd:
25 if user and passwd:
26 return (user, passwd)
26 return (user, passwd)
27
27
28 if not self.ui.interactive:
28 if not self.ui.interactive:
29 raise util.Abort(_('http authorization required'))
29 raise util.Abort(_('http authorization required'))
30
30
31 self.ui.write(_("http authorization required\n"))
31 self.ui.write(_("http authorization required\n"))
32 self.ui.status(_("realm: %s\n") % realm)
32 self.ui.status(_("realm: %s\n") % realm)
33 if user:
33 if user:
34 self.ui.status(_("user: %s\n") % user)
34 self.ui.status(_("user: %s\n") % user)
35 else:
35 else:
36 user = self.ui.prompt(_("user:"), default=None)
36 user = self.ui.prompt(_("user:"), default=None)
37
37
38 if not passwd:
38 if not passwd:
39 passwd = self.ui.getpass()
39 passwd = self.ui.getpass()
40
40
41 self.add_password(realm, authuri, user, passwd)
41 self.add_password(realm, authuri, user, passwd)
42 return (user, passwd)
42 return (user, passwd)
43
43
44 def netlocsplit(netloc):
44 def netlocsplit(netloc):
45 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
45 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
46
46
47 a = netloc.find('@')
47 a = netloc.find('@')
48 if a == -1:
48 if a == -1:
49 user, passwd = None, None
49 user, passwd = None, None
50 else:
50 else:
51 userpass, netloc = netloc[:a], netloc[a+1:]
51 userpass, netloc = netloc[:a], netloc[a+1:]
52 c = userpass.find(':')
52 c = userpass.find(':')
53 if c == -1:
53 if c == -1:
54 user, passwd = urllib.unquote(userpass), None
54 user, passwd = urllib.unquote(userpass), None
55 else:
55 else:
56 user = urllib.unquote(userpass[:c])
56 user = urllib.unquote(userpass[:c])
57 passwd = urllib.unquote(userpass[c+1:])
57 passwd = urllib.unquote(userpass[c+1:])
58 c = netloc.find(':')
58 c = netloc.find(':')
59 if c == -1:
59 if c == -1:
60 host, port = netloc, None
60 host, port = netloc, None
61 else:
61 else:
62 host, port = netloc[:c], netloc[c+1:]
62 host, port = netloc[:c], netloc[c+1:]
63 return host, port, user, passwd
63 return host, port, user, passwd
64
64
65 def netlocunsplit(host, port, user=None, passwd=None):
65 def netlocunsplit(host, port, user=None, passwd=None):
66 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
66 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
67 if port:
67 if port:
68 hostport = host + ':' + port
68 hostport = host + ':' + port
69 else:
69 else:
70 hostport = host
70 hostport = host
71 if user:
71 if user:
72 if passwd:
72 if passwd:
73 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
73 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
74 else:
74 else:
75 userpass = urllib.quote(user)
75 userpass = urllib.quote(user)
76 return userpass + '@' + hostport
76 return userpass + '@' + hostport
77 return hostport
77 return hostport
78
78
79 class httpconnection(keepalive.HTTPConnection):
79 class httpconnection(keepalive.HTTPConnection):
80 # must be able to send big bundle as stream.
80 # must be able to send big bundle as stream.
81
81
82 def send(self, data):
82 def send(self, data):
83 if isinstance(data, str):
83 if isinstance(data, str):
84 keepalive.HTTPConnection.send(self, data)
84 keepalive.HTTPConnection.send(self, data)
85 else:
85 else:
86 # if auth required, some data sent twice, so rewind here
86 # if auth required, some data sent twice, so rewind here
87 data.seek(0)
87 data.seek(0)
88 for chunk in util.filechunkiter(data):
88 for chunk in util.filechunkiter(data):
89 keepalive.HTTPConnection.send(self, chunk)
89 keepalive.HTTPConnection.send(self, chunk)
90
90
91 class basehttphandler(keepalive.HTTPHandler):
91 class basehttphandler(keepalive.HTTPHandler):
92 def http_open(self, req):
92 def http_open(self, req):
93 return self.do_open(httpconnection, req)
93 return self.do_open(httpconnection, req)
94
94
95 has_https = hasattr(urllib2, 'HTTPSHandler')
95 has_https = hasattr(urllib2, 'HTTPSHandler')
96 if has_https:
96 if has_https:
97 class httpsconnection(httplib.HTTPSConnection):
97 class httpsconnection(httplib.HTTPSConnection):
98 response_class = keepalive.HTTPResponse
98 response_class = keepalive.HTTPResponse
99 # must be able to send big bundle as stream.
99 # must be able to send big bundle as stream.
100
100
101 def send(self, data):
101 def send(self, data):
102 if isinstance(data, str):
102 if isinstance(data, str):
103 httplib.HTTPSConnection.send(self, data)
103 httplib.HTTPSConnection.send(self, data)
104 else:
104 else:
105 # if auth required, some data sent twice, so rewind here
105 # if auth required, some data sent twice, so rewind here
106 data.seek(0)
106 data.seek(0)
107 for chunk in util.filechunkiter(data):
107 for chunk in util.filechunkiter(data):
108 httplib.HTTPSConnection.send(self, chunk)
108 httplib.HTTPSConnection.send(self, chunk)
109
109
110 class httphandler(basehttphandler, urllib2.HTTPSHandler):
110 class httphandler(basehttphandler, urllib2.HTTPSHandler):
111 def https_open(self, req):
111 def https_open(self, req):
112 return self.do_open(httpsconnection, req)
112 return self.do_open(httpsconnection, req)
113 else:
113 else:
114 class httphandler(basehttphandler):
114 class httphandler(basehttphandler):
115 pass
115 pass
116
116
117 class httprepository(remoterepository):
117 class httprepository(remoterepository):
118 def __init__(self, ui, path):
118 def __init__(self, ui, path):
119 self.path = path
119 self.path = path
120 self.caps = None
120 self.caps = None
121 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
121 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
122 if query or frag:
122 if query or frag:
123 raise util.Abort(_('unsupported URL component: "%s"') %
123 raise util.Abort(_('unsupported URL component: "%s"') %
124 (query or frag))
124 (query or frag))
125 if not urlpath: urlpath = '/'
125 if not urlpath: urlpath = '/'
126 host, port, user, passwd = netlocsplit(netloc)
126 host, port, user, passwd = netlocsplit(netloc)
127
127
128 # urllib cannot handle URLs with embedded user or passwd
128 # urllib cannot handle URLs with embedded user or passwd
129 self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
129 self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
130 urlpath, '', ''))
130 urlpath, '', ''))
131 self.ui = ui
131 self.ui = ui
132
132
133 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
133 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
134 proxyauthinfo = None
134 # XXX proxyauthinfo = None
135 handler = httphandler()
135 handler = httphandler()
136
136
137 if proxyurl:
137 if proxyurl:
138 # proxy can be proper url or host[:port]
138 # proxy can be proper url or host[:port]
139 if not (proxyurl.startswith('http:') or
139 if not (proxyurl.startswith('http:') or
140 proxyurl.startswith('https:')):
140 proxyurl.startswith('https:')):
141 proxyurl = 'http://' + proxyurl + '/'
141 proxyurl = 'http://' + proxyurl + '/'
142 snpqf = urlparse.urlsplit(proxyurl)
142 snpqf = urlparse.urlsplit(proxyurl)
143 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
143 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
144 hpup = netlocsplit(proxynetloc)
144 hpup = netlocsplit(proxynetloc)
145
145
146 proxyhost, proxyport, proxyuser, proxypasswd = hpup
146 proxyhost, proxyport, proxyuser, proxypasswd = hpup
147 if not proxyuser:
147 if not proxyuser:
148 proxyuser = ui.config("http_proxy", "user")
148 proxyuser = ui.config("http_proxy", "user")
149 proxypasswd = ui.config("http_proxy", "passwd")
149 proxypasswd = ui.config("http_proxy", "passwd")
150
150
151 # see if we should use a proxy for this url
151 # see if we should use a proxy for this url
152 no_list = [ "localhost", "127.0.0.1" ]
152 no_list = [ "localhost", "127.0.0.1" ]
153 no_list.extend([p.lower() for
153 no_list.extend([p.lower() for
154 p in ui.configlist("http_proxy", "no")])
154 p in ui.configlist("http_proxy", "no")])
155 no_list.extend([p.strip().lower() for
155 no_list.extend([p.strip().lower() for
156 p in os.getenv("no_proxy", '').split(',')
156 p in os.getenv("no_proxy", '').split(',')
157 if p.strip()])
157 if p.strip()])
158 # "http_proxy.always" config is for running tests on localhost
158 # "http_proxy.always" config is for running tests on localhost
159 if (not ui.configbool("http_proxy", "always") and
159 if (not ui.configbool("http_proxy", "always") and
160 host.lower() in no_list):
160 host.lower() in no_list):
161 ui.debug(_('disabling proxy for %s\n') % host)
161 ui.debug(_('disabling proxy for %s\n') % host)
162 else:
162 else:
163 proxyurl = urlparse.urlunsplit((
163 proxyurl = urlparse.urlunsplit((
164 proxyscheme, netlocunsplit(proxyhost, proxyport,
164 proxyscheme, netlocunsplit(proxyhost, proxyport,
165 proxyuser, proxypasswd or ''),
165 proxyuser, proxypasswd or ''),
166 proxypath, proxyquery, proxyfrag))
166 proxypath, proxyquery, proxyfrag))
167 handler = urllib2.ProxyHandler({scheme: proxyurl})
167 handler = urllib2.ProxyHandler({scheme: proxyurl})
168 ui.debug(_('proxying through %s\n') % proxyurl)
168 ui.debug(_('proxying through %s\n') % proxyurl)
169
169
170 # urllib2 takes proxy values from the environment and those
170 # urllib2 takes proxy values from the environment and those
171 # will take precedence if found, so drop them
171 # will take precedence if found, so drop them
172 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
172 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
173 try:
173 try:
174 if os.environ.has_key(env):
174 if os.environ.has_key(env):
175 del os.environ[env]
175 del os.environ[env]
176 except OSError:
176 except OSError:
177 pass
177 pass
178
178
179 passmgr = passwordmgr(ui)
179 passmgr = passwordmgr(ui)
180 if user:
180 if user:
181 ui.debug(_('http auth: user %s, password %s\n') %
181 ui.debug(_('http auth: user %s, password %s\n') %
182 (user, passwd and '*' * len(passwd) or 'not set'))
182 (user, passwd and '*' * len(passwd) or 'not set'))
183 passmgr.add_password(None, host, user, passwd or '')
183 passmgr.add_password(None, host, user, passwd or '')
184
184
185 opener = urllib2.build_opener(
185 opener = urllib2.build_opener(
186 handler,
186 handler,
187 urllib2.HTTPBasicAuthHandler(passmgr),
187 urllib2.HTTPBasicAuthHandler(passmgr),
188 urllib2.HTTPDigestAuthHandler(passmgr))
188 urllib2.HTTPDigestAuthHandler(passmgr))
189
189
190 # 1.0 here is the _protocol_ version
190 # 1.0 here is the _protocol_ version
191 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
191 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
192 urllib2.install_opener(opener)
192 urllib2.install_opener(opener)
193
193
194 def url(self):
194 def url(self):
195 return self.path
195 return self.path
196
196
197 # look up capabilities only when needed
197 # look up capabilities only when needed
198
198
199 def get_caps(self):
199 def get_caps(self):
200 if self.caps is None:
200 if self.caps is None:
201 try:
201 try:
202 self.caps = self.do_read('capabilities').split()
202 self.caps = self.do_read('capabilities').split()
203 except hg.RepoError:
203 except hg.RepoError:
204 self.caps = ()
204 self.caps = ()
205 self.ui.debug(_('capabilities: %s\n') %
205 self.ui.debug(_('capabilities: %s\n') %
206 (' '.join(self.caps or ['none'])))
206 (' '.join(self.caps or ['none'])))
207 return self.caps
207 return self.caps
208
208
209 capabilities = property(get_caps)
209 capabilities = property(get_caps)
210
210
211 def lock(self):
211 def lock(self):
212 raise util.Abort(_('operation not supported over http'))
212 raise util.Abort(_('operation not supported over http'))
213
213
214 def do_cmd(self, cmd, **args):
214 def do_cmd(self, cmd, **args):
215 data = args.pop('data', None)
215 data = args.pop('data', None)
216 headers = args.pop('headers', {})
216 headers = args.pop('headers', {})
217 self.ui.debug(_("sending %s command\n") % cmd)
217 self.ui.debug(_("sending %s command\n") % cmd)
218 q = {"cmd": cmd}
218 q = {"cmd": cmd}
219 q.update(args)
219 q.update(args)
220 qs = urllib.urlencode(q)
220 qs = urllib.urlencode(q)
221 cu = "%s?%s" % (self._url, qs)
221 cu = "%s?%s" % (self._url, qs)
222 try:
222 try:
223 resp = urllib2.urlopen(urllib2.Request(cu, data, headers))
223 resp = urllib2.urlopen(urllib2.Request(cu, data, headers))
224 except urllib2.HTTPError, inst:
224 except urllib2.HTTPError, inst:
225 if inst.code == 401:
225 if inst.code == 401:
226 raise util.Abort(_('authorization failed'))
226 raise util.Abort(_('authorization failed'))
227 raise
227 raise
228 except httplib.HTTPException, inst:
228 except httplib.HTTPException, inst:
229 self.ui.debug(_('http error while sending %s command\n') % cmd)
229 self.ui.debug(_('http error while sending %s command\n') % cmd)
230 self.ui.print_exc()
230 self.ui.print_exc()
231 raise IOError(None, inst)
231 raise IOError(None, inst)
232 try:
232 try:
233 proto = resp.getheader('content-type')
233 proto = resp.getheader('content-type')
234 except AttributeError:
234 except AttributeError:
235 proto = resp.headers['content-type']
235 proto = resp.headers['content-type']
236
236
237 # accept old "text/plain" and "application/hg-changegroup" for now
237 # accept old "text/plain" and "application/hg-changegroup" for now
238 if not proto.startswith('application/mercurial') and \
238 if not proto.startswith('application/mercurial') and \
239 not proto.startswith('text/plain') and \
239 not proto.startswith('text/plain') and \
240 not proto.startswith('application/hg-changegroup'):
240 not proto.startswith('application/hg-changegroup'):
241 raise hg.RepoError(_("'%s' does not appear to be an hg repository") %
241 raise hg.RepoError(_("'%s' does not appear to be an hg repository") %
242 self._url)
242 self._url)
243
243
244 if proto.startswith('application/mercurial'):
244 if proto.startswith('application/mercurial'):
245 version = proto[22:]
245 version = proto[22:]
246 if float(version) > 0.1:
246 if float(version) > 0.1:
247 raise hg.RepoError(_("'%s' uses newer protocol %s") %
247 raise hg.RepoError(_("'%s' uses newer protocol %s") %
248 (self._url, version))
248 (self._url, version))
249
249
250 return resp
250 return resp
251
251
252 def do_read(self, cmd, **args):
252 def do_read(self, cmd, **args):
253 fp = self.do_cmd(cmd, **args)
253 fp = self.do_cmd(cmd, **args)
254 try:
254 try:
255 return fp.read()
255 return fp.read()
256 finally:
256 finally:
257 # if using keepalive, allow connection to be reused
257 # if using keepalive, allow connection to be reused
258 fp.close()
258 fp.close()
259
259
260 def heads(self):
260 def heads(self):
261 d = self.do_read("heads")
261 d = self.do_read("heads")
262 try:
262 try:
263 return map(bin, d[:-1].split(" "))
263 return map(bin, d[:-1].split(" "))
264 except:
264 except:
265 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
265 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
266 raise
266 raise
267
267
268 def branches(self, nodes):
268 def branches(self, nodes):
269 n = " ".join(map(hex, nodes))
269 n = " ".join(map(hex, nodes))
270 d = self.do_read("branches", nodes=n)
270 d = self.do_read("branches", nodes=n)
271 try:
271 try:
272 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
272 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
273 return br
273 return br
274 except:
274 except:
275 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
275 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
276 raise
276 raise
277
277
278 def between(self, pairs):
278 def between(self, pairs):
279 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
279 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
280 d = self.do_read("between", pairs=n)
280 d = self.do_read("between", pairs=n)
281 try:
281 try:
282 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
282 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
283 return p
283 return p
284 except:
284 except:
285 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
285 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
286 raise
286 raise
287
287
288 def changegroup(self, nodes, kind):
288 def changegroup(self, nodes, kind):
289 n = " ".join(map(hex, nodes))
289 n = " ".join(map(hex, nodes))
290 f = self.do_cmd("changegroup", roots=n)
290 f = self.do_cmd("changegroup", roots=n)
291 bytes = 0
292
291
293 def zgenerator(f):
292 def zgenerator(f):
294 zd = zlib.decompressobj()
293 zd = zlib.decompressobj()
295 try:
294 try:
296 for chnk in f:
295 for chnk in f:
297 yield zd.decompress(chnk)
296 yield zd.decompress(chnk)
298 except httplib.HTTPException, inst:
297 except httplib.HTTPException:
299 raise IOError(None, _('connection ended unexpectedly'))
298 raise IOError(None, _('connection ended unexpectedly'))
300 yield zd.flush()
299 yield zd.flush()
301
300
302 return util.chunkbuffer(zgenerator(util.filechunkiter(f)))
301 return util.chunkbuffer(zgenerator(util.filechunkiter(f)))
303
302
304 def unbundle(self, cg, heads, source):
303 def unbundle(self, cg, heads, source):
305 # have to stream bundle to a temp file because we do not have
304 # have to stream bundle to a temp file because we do not have
306 # http 1.1 chunked transfer.
305 # http 1.1 chunked transfer.
307
306
308 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
307 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
309 fp = os.fdopen(fd, 'wb+')
308 fp = os.fdopen(fd, 'wb+')
310 try:
309 try:
311 for chunk in util.filechunkiter(cg):
310 for chunk in util.filechunkiter(cg):
312 fp.write(chunk)
311 fp.write(chunk)
313 length = fp.tell()
312 length = fp.tell()
314 try:
313 try:
315 rfp = self.do_cmd(
314 rfp = self.do_cmd(
316 'unbundle', data=fp,
315 'unbundle', data=fp,
317 headers={'content-length': length,
316 headers={'content-length': length,
318 'content-type': 'application/octet-stream'},
317 'content-type': 'application/octet-stream'},
319 heads=' '.join(map(hex, heads)))
318 heads=' '.join(map(hex, heads)))
320 try:
319 try:
321 ret = int(rfp.readline())
320 ret = int(rfp.readline())
322 self.ui.write(rfp.read())
321 self.ui.write(rfp.read())
323 return ret
322 return ret
324 finally:
323 finally:
325 rfp.close()
324 rfp.close()
326 except socket.error, err:
325 except socket.error, err:
327 if err[0] in (errno.ECONNRESET, errno.EPIPE):
326 if err[0] in (errno.ECONNRESET, errno.EPIPE):
328 raise util.Abort(_('push failed: %s') % err[1])
327 raise util.Abort(_('push failed: %s') % err[1])
329 raise util.Abort(err[1])
328 raise util.Abort(err[1])
330 finally:
329 finally:
331 fp.close()
330 fp.close()
332 os.unlink(tempname)
331 os.unlink(tempname)
333
332
334 def stream_out(self):
333 def stream_out(self):
335 return self.do_cmd('stream_out')
334 return self.do_cmd('stream_out')
336
335
337 class httpsrepository(httprepository):
336 class httpsrepository(httprepository):
338 def __init__(self, ui, path):
337 def __init__(self, ui, path):
339 if not has_https:
338 if not has_https:
340 raise util.Abort(_('Python support for SSL and HTTPS '
339 raise util.Abort(_('Python support for SSL and HTTPS '
341 'is not installed'))
340 'is not installed'))
342 httprepository.__init__(self, ui, path)
341 httprepository.__init__(self, ui, path)
343
342
344 def instance(ui, path, create):
343 def instance(ui, path, create):
345 if create:
344 if create:
346 raise util.Abort(_('cannot create new http repository'))
345 raise util.Abort(_('cannot create new http repository'))
347 if path.startswith('hg:'):
346 if path.startswith('hg:'):
348 ui.warn(_("hg:// syntax is deprecated, please use http:// instead\n"))
347 ui.warn(_("hg:// syntax is deprecated, please use http:// instead\n"))
349 path = 'http:' + path[3:]
348 path = 'http:' + path[3:]
350 if path.startswith('https:'):
349 if path.startswith('https:'):
351 return httpsrepository(ui, path)
350 return httpsrepository(ui, path)
352 return httprepository(ui, path)
351 return httprepository(ui, path)
@@ -1,1303 +1,1302 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 demandload(globals(), "sha struct util zlib")
17 demandload(globals(), "sha struct util zlib")
18
18
19 # revlog version strings
19 # revlog version strings
20 REVLOGV0 = 0
20 REVLOGV0 = 0
21 REVLOGNG = 1
21 REVLOGNG = 1
22
22
23 # revlog flags
23 # revlog flags
24 REVLOGNGINLINEDATA = (1 << 16)
24 REVLOGNGINLINEDATA = (1 << 16)
25 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
25 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
26
26
27 REVLOG_DEFAULT_FORMAT = REVLOGNG
27 REVLOG_DEFAULT_FORMAT = REVLOGNG
28 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
28 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
29
29
30 def flagstr(flag):
30 def flagstr(flag):
31 if flag == "inline":
31 if flag == "inline":
32 return REVLOGNGINLINEDATA
32 return REVLOGNGINLINEDATA
33 raise RevlogError(_("unknown revlog flag %s" % flag))
33 raise RevlogError(_("unknown revlog flag %s" % flag))
34
34
35 def hash(text, p1, p2):
35 def hash(text, p1, p2):
36 """generate a hash from the given text and its parent hashes
36 """generate a hash from the given text and its parent hashes
37
37
38 This hash combines both the current file contents and its history
38 This hash combines both the current file contents and its history
39 in a manner that makes it easy to distinguish nodes with the same
39 in a manner that makes it easy to distinguish nodes with the same
40 content in the revision graph.
40 content in the revision graph.
41 """
41 """
42 l = [p1, p2]
42 l = [p1, p2]
43 l.sort()
43 l.sort()
44 s = sha.new(l[0])
44 s = sha.new(l[0])
45 s.update(l[1])
45 s.update(l[1])
46 s.update(text)
46 s.update(text)
47 return s.digest()
47 return s.digest()
48
48
49 def compress(text):
49 def compress(text):
50 """ generate a possibly-compressed representation of text """
50 """ generate a possibly-compressed representation of text """
51 if not text: return ("", text)
51 if not text: return ("", text)
52 if len(text) < 44:
52 if len(text) < 44:
53 if text[0] == '\0': return ("", text)
53 if text[0] == '\0': return ("", text)
54 return ('u', text)
54 return ('u', text)
55 bin = zlib.compress(text)
55 bin = zlib.compress(text)
56 if len(bin) > len(text):
56 if len(bin) > len(text):
57 if text[0] == '\0': return ("", text)
57 if text[0] == '\0': return ("", text)
58 return ('u', text)
58 return ('u', text)
59 return ("", bin)
59 return ("", bin)
60
60
61 def decompress(bin):
61 def decompress(bin):
62 """ decompress the given input """
62 """ decompress the given input """
63 if not bin: return bin
63 if not bin: return bin
64 t = bin[0]
64 t = bin[0]
65 if t == '\0': return bin
65 if t == '\0': return bin
66 if t == 'x': return zlib.decompress(bin)
66 if t == 'x': return zlib.decompress(bin)
67 if t == 'u': return bin[1:]
67 if t == 'u': return bin[1:]
68 raise RevlogError(_("unknown compression type %r") % t)
68 raise RevlogError(_("unknown compression type %r") % t)
69
69
70 indexformatv0 = ">4l20s20s20s"
70 indexformatv0 = ">4l20s20s20s"
71 v0shaoffset = 56
71 v0shaoffset = 56
72 # index ng:
72 # index ng:
73 # 6 bytes offset
73 # 6 bytes offset
74 # 2 bytes flags
74 # 2 bytes flags
75 # 4 bytes compressed length
75 # 4 bytes compressed length
76 # 4 bytes uncompressed length
76 # 4 bytes uncompressed length
77 # 4 bytes: base rev
77 # 4 bytes: base rev
78 # 4 bytes link rev
78 # 4 bytes link rev
79 # 4 bytes parent 1 rev
79 # 4 bytes parent 1 rev
80 # 4 bytes parent 2 rev
80 # 4 bytes parent 2 rev
81 # 32 bytes: nodeid
81 # 32 bytes: nodeid
82 indexformatng = ">Qiiiiii20s12x"
82 indexformatng = ">Qiiiiii20s12x"
83 ngshaoffset = 32
83 ngshaoffset = 32
84 versionformat = ">i"
84 versionformat = ">i"
85
85
86 class lazyparser(object):
86 class lazyparser(object):
87 """
87 """
88 this class avoids the need to parse the entirety of large indices
88 this class avoids the need to parse the entirety of large indices
89 """
89 """
90
90
91 # lazyparser is not safe to use on windows if win32 extensions not
91 # lazyparser is not safe to use on windows if win32 extensions not
92 # available. it keeps file handle open, which make it not possible
92 # available. it keeps file handle open, which make it not possible
93 # to break hardlinks on local cloned repos.
93 # to break hardlinks on local cloned repos.
94 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
94 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
95 hasattr(util, 'win32api'))
95 hasattr(util, 'win32api'))
96
96
97 def __init__(self, dataf, size, indexformat, shaoffset):
97 def __init__(self, dataf, size, indexformat, shaoffset):
98 self.dataf = dataf
98 self.dataf = dataf
99 self.format = indexformat
99 self.format = indexformat
100 self.s = struct.calcsize(indexformat)
100 self.s = struct.calcsize(indexformat)
101 self.indexformat = indexformat
101 self.indexformat = indexformat
102 self.datasize = size
102 self.datasize = size
103 self.l = size/self.s
103 self.l = size/self.s
104 self.index = [None] * self.l
104 self.index = [None] * self.l
105 self.map = {nullid: -1}
105 self.map = {nullid: -1}
106 self.allmap = 0
106 self.allmap = 0
107 self.all = 0
107 self.all = 0
108 self.mapfind_count = 0
108 self.mapfind_count = 0
109 self.shaoffset = shaoffset
109 self.shaoffset = shaoffset
110
110
111 def loadmap(self):
111 def loadmap(self):
112 """
112 """
113 during a commit, we need to make sure the rev being added is
113 during a commit, we need to make sure the rev being added is
114 not a duplicate. This requires loading the entire index,
114 not a duplicate. This requires loading the entire index,
115 which is fairly slow. loadmap can load up just the node map,
115 which is fairly slow. loadmap can load up just the node map,
116 which takes much less time.
116 which takes much less time.
117 """
117 """
118 if self.allmap: return
118 if self.allmap: return
119 start = 0
120 end = self.datasize
119 end = self.datasize
121 self.allmap = 1
120 self.allmap = 1
122 cur = 0
121 cur = 0
123 count = 0
122 count = 0
124 blocksize = self.s * 256
123 blocksize = self.s * 256
125 self.dataf.seek(0)
124 self.dataf.seek(0)
126 while cur < end:
125 while cur < end:
127 data = self.dataf.read(blocksize)
126 data = self.dataf.read(blocksize)
128 off = 0
127 off = 0
129 for x in xrange(256):
128 for x in xrange(256):
130 n = data[off + self.shaoffset:off + self.shaoffset + 20]
129 n = data[off + self.shaoffset:off + self.shaoffset + 20]
131 self.map[n] = count
130 self.map[n] = count
132 count += 1
131 count += 1
133 if count >= self.l:
132 if count >= self.l:
134 break
133 break
135 off += self.s
134 off += self.s
136 cur += blocksize
135 cur += blocksize
137
136
138 def loadblock(self, blockstart, blocksize, data=None):
137 def loadblock(self, blockstart, blocksize, data=None):
139 if self.all: return
138 if self.all: return
140 if data is None:
139 if data is None:
141 self.dataf.seek(blockstart)
140 self.dataf.seek(blockstart)
142 if blockstart + blocksize > self.datasize:
141 if blockstart + blocksize > self.datasize:
143 # the revlog may have grown since we've started running,
142 # the revlog may have grown since we've started running,
144 # but we don't have space in self.index for more entries.
143 # but we don't have space in self.index for more entries.
145 # limit blocksize so that we don't get too much data.
144 # limit blocksize so that we don't get too much data.
146 blocksize = max(self.datasize - blockstart, 0)
145 blocksize = max(self.datasize - blockstart, 0)
147 data = self.dataf.read(blocksize)
146 data = self.dataf.read(blocksize)
148 lend = len(data) / self.s
147 lend = len(data) / self.s
149 i = blockstart / self.s
148 i = blockstart / self.s
150 off = 0
149 off = 0
151 for x in xrange(lend):
150 for x in xrange(lend):
152 if self.index[i + x] == None:
151 if self.index[i + x] == None:
153 b = data[off : off + self.s]
152 b = data[off : off + self.s]
154 self.index[i + x] = b
153 self.index[i + x] = b
155 n = b[self.shaoffset:self.shaoffset + 20]
154 n = b[self.shaoffset:self.shaoffset + 20]
156 self.map[n] = i + x
155 self.map[n] = i + x
157 off += self.s
156 off += self.s
158
157
159 def findnode(self, node):
158 def findnode(self, node):
160 """search backwards through the index file for a specific node"""
159 """search backwards through the index file for a specific node"""
161 if self.allmap: return None
160 if self.allmap: return None
162
161
163 # hg log will cause many many searches for the manifest
162 # hg log will cause many many searches for the manifest
164 # nodes. After we get called a few times, just load the whole
163 # nodes. After we get called a few times, just load the whole
165 # thing.
164 # thing.
166 if self.mapfind_count > 8:
165 if self.mapfind_count > 8:
167 self.loadmap()
166 self.loadmap()
168 if node in self.map:
167 if node in self.map:
169 return node
168 return node
170 return None
169 return None
171 self.mapfind_count += 1
170 self.mapfind_count += 1
172 last = self.l - 1
171 last = self.l - 1
173 while self.index[last] != None:
172 while self.index[last] != None:
174 if last == 0:
173 if last == 0:
175 self.all = 1
174 self.all = 1
176 self.allmap = 1
175 self.allmap = 1
177 return None
176 return None
178 last -= 1
177 last -= 1
179 end = (last + 1) * self.s
178 end = (last + 1) * self.s
180 blocksize = self.s * 256
179 blocksize = self.s * 256
181 while end >= 0:
180 while end >= 0:
182 start = max(end - blocksize, 0)
181 start = max(end - blocksize, 0)
183 self.dataf.seek(start)
182 self.dataf.seek(start)
184 data = self.dataf.read(end - start)
183 data = self.dataf.read(end - start)
185 findend = end - start
184 findend = end - start
186 while True:
185 while True:
187 # we're searching backwards, so weh have to make sure
186 # we're searching backwards, so weh have to make sure
188 # we don't find a changeset where this node is a parent
187 # we don't find a changeset where this node is a parent
189 off = data.rfind(node, 0, findend)
188 off = data.rfind(node, 0, findend)
190 findend = off
189 findend = off
191 if off >= 0:
190 if off >= 0:
192 i = off / self.s
191 i = off / self.s
193 off = i * self.s
192 off = i * self.s
194 n = data[off + self.shaoffset:off + self.shaoffset + 20]
193 n = data[off + self.shaoffset:off + self.shaoffset + 20]
195 if n == node:
194 if n == node:
196 self.map[n] = i + start / self.s
195 self.map[n] = i + start / self.s
197 return node
196 return node
198 else:
197 else:
199 break
198 break
200 end -= blocksize
199 end -= blocksize
201 return None
200 return None
202
201
203 def loadindex(self, i=None, end=None):
202 def loadindex(self, i=None, end=None):
204 if self.all: return
203 if self.all: return
205 all = False
204 all = False
206 if i == None:
205 if i == None:
207 blockstart = 0
206 blockstart = 0
208 blocksize = (512 / self.s) * self.s
207 blocksize = (512 / self.s) * self.s
209 end = self.datasize
208 end = self.datasize
210 all = True
209 all = True
211 else:
210 else:
212 if end:
211 if end:
213 blockstart = i * self.s
212 blockstart = i * self.s
214 end = end * self.s
213 end = end * self.s
215 blocksize = end - blockstart
214 blocksize = end - blockstart
216 else:
215 else:
217 blockstart = (i & ~(32)) * self.s
216 blockstart = (i & ~(32)) * self.s
218 blocksize = self.s * 64
217 blocksize = self.s * 64
219 end = blockstart + blocksize
218 end = blockstart + blocksize
220 while blockstart < end:
219 while blockstart < end:
221 self.loadblock(blockstart, blocksize)
220 self.loadblock(blockstart, blocksize)
222 blockstart += blocksize
221 blockstart += blocksize
223 if all: self.all = True
222 if all: self.all = True
224
223
225 class lazyindex(object):
224 class lazyindex(object):
226 """a lazy version of the index array"""
225 """a lazy version of the index array"""
227 def __init__(self, parser):
226 def __init__(self, parser):
228 self.p = parser
227 self.p = parser
229 def __len__(self):
228 def __len__(self):
230 return len(self.p.index)
229 return len(self.p.index)
231 def load(self, pos):
230 def load(self, pos):
232 if pos < 0:
231 if pos < 0:
233 pos += len(self.p.index)
232 pos += len(self.p.index)
234 self.p.loadindex(pos)
233 self.p.loadindex(pos)
235 return self.p.index[pos]
234 return self.p.index[pos]
236 def __getitem__(self, pos):
235 def __getitem__(self, pos):
237 ret = self.p.index[pos] or self.load(pos)
236 ret = self.p.index[pos] or self.load(pos)
238 if isinstance(ret, str):
237 if isinstance(ret, str):
239 ret = struct.unpack(self.p.indexformat, ret)
238 ret = struct.unpack(self.p.indexformat, ret)
240 return ret
239 return ret
241 def __setitem__(self, pos, item):
240 def __setitem__(self, pos, item):
242 self.p.index[pos] = item
241 self.p.index[pos] = item
243 def __delitem__(self, pos):
242 def __delitem__(self, pos):
244 del self.p.index[pos]
243 del self.p.index[pos]
245 def append(self, e):
244 def append(self, e):
246 self.p.index.append(e)
245 self.p.index.append(e)
247
246
248 class lazymap(object):
247 class lazymap(object):
249 """a lazy version of the node map"""
248 """a lazy version of the node map"""
250 def __init__(self, parser):
249 def __init__(self, parser):
251 self.p = parser
250 self.p = parser
252 def load(self, key):
251 def load(self, key):
253 n = self.p.findnode(key)
252 n = self.p.findnode(key)
254 if n == None:
253 if n == None:
255 raise KeyError(key)
254 raise KeyError(key)
256 def __contains__(self, key):
255 def __contains__(self, key):
257 if key in self.p.map:
256 if key in self.p.map:
258 return True
257 return True
259 self.p.loadmap()
258 self.p.loadmap()
260 return key in self.p.map
259 return key in self.p.map
261 def __iter__(self):
260 def __iter__(self):
262 yield nullid
261 yield nullid
263 for i in xrange(self.p.l):
262 for i in xrange(self.p.l):
264 ret = self.p.index[i]
263 ret = self.p.index[i]
265 if not ret:
264 if not ret:
266 self.p.loadindex(i)
265 self.p.loadindex(i)
267 ret = self.p.index[i]
266 ret = self.p.index[i]
268 if isinstance(ret, str):
267 if isinstance(ret, str):
269 ret = struct.unpack(self.p.indexformat, ret)
268 ret = struct.unpack(self.p.indexformat, ret)
270 yield ret[-1]
269 yield ret[-1]
271 def __getitem__(self, key):
270 def __getitem__(self, key):
272 try:
271 try:
273 return self.p.map[key]
272 return self.p.map[key]
274 except KeyError:
273 except KeyError:
275 try:
274 try:
276 self.load(key)
275 self.load(key)
277 return self.p.map[key]
276 return self.p.map[key]
278 except KeyError:
277 except KeyError:
279 raise KeyError("node " + hex(key))
278 raise KeyError("node " + hex(key))
280 def __setitem__(self, key, val):
279 def __setitem__(self, key, val):
281 self.p.map[key] = val
280 self.p.map[key] = val
282 def __delitem__(self, key):
281 def __delitem__(self, key):
283 del self.p.map[key]
282 del self.p.map[key]
284
283
285 class RevlogError(Exception): pass
284 class RevlogError(Exception): pass
286
285
287 class revlog(object):
286 class revlog(object):
288 """
287 """
289 the underlying revision storage object
288 the underlying revision storage object
290
289
291 A revlog consists of two parts, an index and the revision data.
290 A revlog consists of two parts, an index and the revision data.
292
291
293 The index is a file with a fixed record size containing
292 The index is a file with a fixed record size containing
294 information on each revision, includings its nodeid (hash), the
293 information on each revision, includings its nodeid (hash), the
295 nodeids of its parents, the position and offset of its data within
294 nodeids of its parents, the position and offset of its data within
296 the data file, and the revision it's based on. Finally, each entry
295 the data file, and the revision it's based on. Finally, each entry
297 contains a linkrev entry that can serve as a pointer to external
296 contains a linkrev entry that can serve as a pointer to external
298 data.
297 data.
299
298
300 The revision data itself is a linear collection of data chunks.
299 The revision data itself is a linear collection of data chunks.
301 Each chunk represents a revision and is usually represented as a
300 Each chunk represents a revision and is usually represented as a
302 delta against the previous chunk. To bound lookup time, runs of
301 delta against the previous chunk. To bound lookup time, runs of
303 deltas are limited to about 2 times the length of the original
302 deltas are limited to about 2 times the length of the original
304 version data. This makes retrieval of a version proportional to
303 version data. This makes retrieval of a version proportional to
305 its size, or O(1) relative to the number of revisions.
304 its size, or O(1) relative to the number of revisions.
306
305
307 Both pieces of the revlog are written to in an append-only
306 Both pieces of the revlog are written to in an append-only
308 fashion, which means we never need to rewrite a file to insert or
307 fashion, which means we never need to rewrite a file to insert or
309 remove data, and can use some simple techniques to avoid the need
308 remove data, and can use some simple techniques to avoid the need
310 for locking while reading.
309 for locking while reading.
311 """
310 """
312 def __init__(self, opener, indexfile, datafile,
311 def __init__(self, opener, indexfile, datafile,
313 defversion=REVLOG_DEFAULT_VERSION):
312 defversion=REVLOG_DEFAULT_VERSION):
314 """
313 """
315 create a revlog object
314 create a revlog object
316
315
317 opener is a function that abstracts the file opening operation
316 opener is a function that abstracts the file opening operation
318 and can be used to implement COW semantics or the like.
317 and can be used to implement COW semantics or the like.
319 """
318 """
320 self.indexfile = indexfile
319 self.indexfile = indexfile
321 self.datafile = datafile
320 self.datafile = datafile
322 self.opener = opener
321 self.opener = opener
323
322
324 self.indexstat = None
323 self.indexstat = None
325 self.cache = None
324 self.cache = None
326 self.chunkcache = None
325 self.chunkcache = None
327 self.defversion = defversion
326 self.defversion = defversion
328 self.load()
327 self.load()
329
328
330 def load(self):
329 def load(self):
331 v = self.defversion
330 v = self.defversion
332 try:
331 try:
333 f = self.opener(self.indexfile)
332 f = self.opener(self.indexfile)
334 i = f.read(4)
333 i = f.read(4)
335 f.seek(0)
334 f.seek(0)
336 except IOError, inst:
335 except IOError, inst:
337 if inst.errno != errno.ENOENT:
336 if inst.errno != errno.ENOENT:
338 raise
337 raise
339 i = ""
338 i = ""
340 else:
339 else:
341 try:
340 try:
342 st = util.fstat(f)
341 st = util.fstat(f)
343 except AttributeError, inst:
342 except AttributeError, inst:
344 st = None
343 st = None
345 else:
344 else:
346 oldst = self.indexstat
345 oldst = self.indexstat
347 if (oldst and st.st_dev == oldst.st_dev
346 if (oldst and st.st_dev == oldst.st_dev
348 and st.st_ino == oldst.st_ino
347 and st.st_ino == oldst.st_ino
349 and st.st_mtime == oldst.st_mtime
348 and st.st_mtime == oldst.st_mtime
350 and st.st_ctime == oldst.st_ctime):
349 and st.st_ctime == oldst.st_ctime):
351 return
350 return
352 self.indexstat = st
351 self.indexstat = st
353 if len(i) > 0:
352 if len(i) > 0:
354 v = struct.unpack(versionformat, i)[0]
353 v = struct.unpack(versionformat, i)[0]
355 flags = v & ~0xFFFF
354 flags = v & ~0xFFFF
356 fmt = v & 0xFFFF
355 fmt = v & 0xFFFF
357 if fmt == REVLOGV0:
356 if fmt == REVLOGV0:
358 if flags:
357 if flags:
359 raise RevlogError(_("index %s invalid flags %x for format v0" %
358 raise RevlogError(_("index %s invalid flags %x for format v0" %
360 (self.indexfile, flags)))
359 (self.indexfile, flags)))
361 elif fmt == REVLOGNG:
360 elif fmt == REVLOGNG:
362 if flags & ~REVLOGNGINLINEDATA:
361 if flags & ~REVLOGNGINLINEDATA:
363 raise RevlogError(_("index %s invalid flags %x for revlogng" %
362 raise RevlogError(_("index %s invalid flags %x for revlogng" %
364 (self.indexfile, flags)))
363 (self.indexfile, flags)))
365 else:
364 else:
366 raise RevlogError(_("index %s invalid format %d" %
365 raise RevlogError(_("index %s invalid format %d" %
367 (self.indexfile, fmt)))
366 (self.indexfile, fmt)))
368 self.version = v
367 self.version = v
369 if v == REVLOGV0:
368 if v == REVLOGV0:
370 self.indexformat = indexformatv0
369 self.indexformat = indexformatv0
371 shaoffset = v0shaoffset
370 shaoffset = v0shaoffset
372 else:
371 else:
373 self.indexformat = indexformatng
372 self.indexformat = indexformatng
374 shaoffset = ngshaoffset
373 shaoffset = ngshaoffset
375
374
376 if i:
375 if i:
377 if (lazyparser.safe_to_use and not self.inlinedata() and
376 if (lazyparser.safe_to_use and not self.inlinedata() and
378 st and st.st_size > 10000):
377 st and st.st_size > 10000):
379 # big index, let's parse it on demand
378 # big index, let's parse it on demand
380 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
379 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
381 self.index = lazyindex(parser)
380 self.index = lazyindex(parser)
382 self.nodemap = lazymap(parser)
381 self.nodemap = lazymap(parser)
383 else:
382 else:
384 self.parseindex(f, st)
383 self.parseindex(f, st)
385 if self.version != REVLOGV0:
384 if self.version != REVLOGV0:
386 e = list(self.index[0])
385 e = list(self.index[0])
387 type = self.ngtype(e[0])
386 type = self.ngtype(e[0])
388 e[0] = self.offset_type(0, type)
387 e[0] = self.offset_type(0, type)
389 self.index[0] = e
388 self.index[0] = e
390 else:
389 else:
391 self.nodemap = { nullid: -1}
390 self.nodemap = { nullid: -1}
392 self.index = []
391 self.index = []
393
392
394
393
395 def parseindex(self, fp, st):
394 def parseindex(self, fp, st):
396 s = struct.calcsize(self.indexformat)
395 s = struct.calcsize(self.indexformat)
397 self.index = []
396 self.index = []
398 self.nodemap = {nullid: -1}
397 self.nodemap = {nullid: -1}
399 inline = self.inlinedata()
398 inline = self.inlinedata()
400 n = 0
399 n = 0
401 leftover = None
400 leftover = None
402 while True:
401 while True:
403 if st:
402 if st:
404 data = fp.read(65536)
403 data = fp.read(65536)
405 else:
404 else:
406 # hack for httprangereader, it doesn't do partial reads well
405 # hack for httprangereader, it doesn't do partial reads well
407 data = fp.read()
406 data = fp.read()
408 if not data:
407 if not data:
409 break
408 break
410 if n == 0 and self.inlinedata():
409 if n == 0 and self.inlinedata():
411 # cache the first chunk
410 # cache the first chunk
412 self.chunkcache = (0, data)
411 self.chunkcache = (0, data)
413 if leftover:
412 if leftover:
414 data = leftover + data
413 data = leftover + data
415 leftover = None
414 leftover = None
416 off = 0
415 off = 0
417 l = len(data)
416 l = len(data)
418 while off < l:
417 while off < l:
419 if l - off < s:
418 if l - off < s:
420 leftover = data[off:]
419 leftover = data[off:]
421 break
420 break
422 cur = data[off:off + s]
421 cur = data[off:off + s]
423 off += s
422 off += s
424 e = struct.unpack(self.indexformat, cur)
423 e = struct.unpack(self.indexformat, cur)
425 self.index.append(e)
424 self.index.append(e)
426 self.nodemap[e[-1]] = n
425 self.nodemap[e[-1]] = n
427 n += 1
426 n += 1
428 if inline:
427 if inline:
429 off += e[1]
428 off += e[1]
430 if off > l:
429 if off > l:
431 # some things don't seek well, just read it
430 # some things don't seek well, just read it
432 fp.read(off - l)
431 fp.read(off - l)
433 if not st:
432 if not st:
434 break
433 break
435
434
436
435
437 def ngoffset(self, q):
436 def ngoffset(self, q):
438 if q & 0xFFFF:
437 if q & 0xFFFF:
439 raise RevlogError(_('%s: incompatible revision flag %x') %
438 raise RevlogError(_('%s: incompatible revision flag %x') %
440 (self.indexfile, q))
439 (self.indexfile, q))
441 return long(q >> 16)
440 return long(q >> 16)
442
441
443 def ngtype(self, q):
442 def ngtype(self, q):
444 return int(q & 0xFFFF)
443 return int(q & 0xFFFF)
445
444
446 def offset_type(self, offset, type):
445 def offset_type(self, offset, type):
447 return long(long(offset) << 16 | type)
446 return long(long(offset) << 16 | type)
448
447
449 def loadindex(self, start, end):
448 def loadindex(self, start, end):
450 """load a block of indexes all at once from the lazy parser"""
449 """load a block of indexes all at once from the lazy parser"""
451 if isinstance(self.index, lazyindex):
450 if isinstance(self.index, lazyindex):
452 self.index.p.loadindex(start, end)
451 self.index.p.loadindex(start, end)
453
452
454 def loadindexmap(self):
453 def loadindexmap(self):
455 """loads both the map and the index from the lazy parser"""
454 """loads both the map and the index from the lazy parser"""
456 if isinstance(self.index, lazyindex):
455 if isinstance(self.index, lazyindex):
457 p = self.index.p
456 p = self.index.p
458 p.loadindex()
457 p.loadindex()
459 self.nodemap = p.map
458 self.nodemap = p.map
460
459
461 def loadmap(self):
460 def loadmap(self):
462 """loads the map from the lazy parser"""
461 """loads the map from the lazy parser"""
463 if isinstance(self.nodemap, lazymap):
462 if isinstance(self.nodemap, lazymap):
464 self.nodemap.p.loadmap()
463 self.nodemap.p.loadmap()
465 self.nodemap = self.nodemap.p.map
464 self.nodemap = self.nodemap.p.map
466
465
467 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
466 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
468 def tip(self): return self.node(len(self.index) - 1)
467 def tip(self): return self.node(len(self.index) - 1)
469 def count(self): return len(self.index)
468 def count(self): return len(self.index)
470 def node(self, rev):
469 def node(self, rev):
471 return (rev < 0) and nullid or self.index[rev][-1]
470 return (rev < 0) and nullid or self.index[rev][-1]
472 def rev(self, node):
471 def rev(self, node):
473 try:
472 try:
474 return self.nodemap[node]
473 return self.nodemap[node]
475 except KeyError:
474 except KeyError:
476 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
475 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
477 def linkrev(self, node):
476 def linkrev(self, node):
478 return (node == nullid) and -1 or self.index[self.rev(node)][-4]
477 return (node == nullid) and -1 or self.index[self.rev(node)][-4]
479 def parents(self, node):
478 def parents(self, node):
480 if node == nullid: return (nullid, nullid)
479 if node == nullid: return (nullid, nullid)
481 r = self.rev(node)
480 r = self.rev(node)
482 d = self.index[r][-3:-1]
481 d = self.index[r][-3:-1]
483 if self.version == REVLOGV0:
482 if self.version == REVLOGV0:
484 return d
483 return d
485 return [ self.node(x) for x in d ]
484 return [ self.node(x) for x in d ]
486 def parentrevs(self, rev):
485 def parentrevs(self, rev):
487 if rev == -1:
486 if rev == -1:
488 return (-1, -1)
487 return (-1, -1)
489 d = self.index[rev][-3:-1]
488 d = self.index[rev][-3:-1]
490 if self.version == REVLOGV0:
489 if self.version == REVLOGV0:
491 return [ self.rev(x) for x in d ]
490 return [ self.rev(x) for x in d ]
492 return d
491 return d
493 def start(self, rev):
492 def start(self, rev):
494 if rev < 0:
493 if rev < 0:
495 return -1
494 return -1
496 if self.version != REVLOGV0:
495 if self.version != REVLOGV0:
497 return self.ngoffset(self.index[rev][0])
496 return self.ngoffset(self.index[rev][0])
498 return self.index[rev][0]
497 return self.index[rev][0]
499
498
500 def end(self, rev): return self.start(rev) + self.length(rev)
499 def end(self, rev): return self.start(rev) + self.length(rev)
501
500
502 def size(self, rev):
501 def size(self, rev):
503 """return the length of the uncompressed text for a given revision"""
502 """return the length of the uncompressed text for a given revision"""
504 l = -1
503 l = -1
505 if self.version != REVLOGV0:
504 if self.version != REVLOGV0:
506 l = self.index[rev][2]
505 l = self.index[rev][2]
507 if l >= 0:
506 if l >= 0:
508 return l
507 return l
509
508
510 t = self.revision(self.node(rev))
509 t = self.revision(self.node(rev))
511 return len(t)
510 return len(t)
512
511
513 # alternate implementation, The advantage to this code is it
512 # alternate implementation, The advantage to this code is it
514 # will be faster for a single revision. But, the results are not
513 # will be faster for a single revision. But, the results are not
515 # cached, so finding the size of every revision will be slower.
514 # cached, so finding the size of every revision will be slower.
516 """
515 """
517 if self.cache and self.cache[1] == rev:
516 if self.cache and self.cache[1] == rev:
518 return len(self.cache[2])
517 return len(self.cache[2])
519
518
520 base = self.base(rev)
519 base = self.base(rev)
521 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
520 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
522 base = self.cache[1]
521 base = self.cache[1]
523 text = self.cache[2]
522 text = self.cache[2]
524 else:
523 else:
525 text = self.revision(self.node(base))
524 text = self.revision(self.node(base))
526
525
527 l = len(text)
526 l = len(text)
528 for x in xrange(base + 1, rev + 1):
527 for x in xrange(base + 1, rev + 1):
529 l = mdiff.patchedsize(l, self.chunk(x))
528 l = mdiff.patchedsize(l, self.chunk(x))
530 return l
529 return l
531 """
530 """
532
531
533 def length(self, rev):
532 def length(self, rev):
534 if rev < 0:
533 if rev < 0:
535 return 0
534 return 0
536 else:
535 else:
537 return self.index[rev][1]
536 return self.index[rev][1]
538 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
537 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
539
538
540 def reachable(self, rev, stop=None):
539 def reachable(self, rev, stop=None):
541 reachable = {}
540 reachable = {}
542 visit = [rev]
541 visit = [rev]
543 reachable[rev] = 1
542 reachable[rev] = 1
544 if stop:
543 if stop:
545 stopn = self.rev(stop)
544 stopn = self.rev(stop)
546 else:
545 else:
547 stopn = 0
546 stopn = 0
548 while visit:
547 while visit:
549 n = visit.pop(0)
548 n = visit.pop(0)
550 if n == stop:
549 if n == stop:
551 continue
550 continue
552 if n == nullid:
551 if n == nullid:
553 continue
552 continue
554 for p in self.parents(n):
553 for p in self.parents(n):
555 if self.rev(p) < stopn:
554 if self.rev(p) < stopn:
556 continue
555 continue
557 if p not in reachable:
556 if p not in reachable:
558 reachable[p] = 1
557 reachable[p] = 1
559 visit.append(p)
558 visit.append(p)
560 return reachable
559 return reachable
561
560
562 def nodesbetween(self, roots=None, heads=None):
561 def nodesbetween(self, roots=None, heads=None):
563 """Return a tuple containing three elements. Elements 1 and 2 contain
562 """Return a tuple containing three elements. Elements 1 and 2 contain
564 a final list bases and heads after all the unreachable ones have been
563 a final list bases and heads after all the unreachable ones have been
565 pruned. Element 0 contains a topologically sorted list of all
564 pruned. Element 0 contains a topologically sorted list of all
566
565
567 nodes that satisfy these constraints:
566 nodes that satisfy these constraints:
568 1. All nodes must be descended from a node in roots (the nodes on
567 1. All nodes must be descended from a node in roots (the nodes on
569 roots are considered descended from themselves).
568 roots are considered descended from themselves).
570 2. All nodes must also be ancestors of a node in heads (the nodes in
569 2. All nodes must also be ancestors of a node in heads (the nodes in
571 heads are considered to be their own ancestors).
570 heads are considered to be their own ancestors).
572
571
573 If roots is unspecified, nullid is assumed as the only root.
572 If roots is unspecified, nullid is assumed as the only root.
574 If heads is unspecified, it is taken to be the output of the
573 If heads is unspecified, it is taken to be the output of the
575 heads method (i.e. a list of all nodes in the repository that
574 heads method (i.e. a list of all nodes in the repository that
576 have no children)."""
575 have no children)."""
577 nonodes = ([], [], [])
576 nonodes = ([], [], [])
578 if roots is not None:
577 if roots is not None:
579 roots = list(roots)
578 roots = list(roots)
580 if not roots:
579 if not roots:
581 return nonodes
580 return nonodes
582 lowestrev = min([self.rev(n) for n in roots])
581 lowestrev = min([self.rev(n) for n in roots])
583 else:
582 else:
584 roots = [nullid] # Everybody's a descendent of nullid
583 roots = [nullid] # Everybody's a descendent of nullid
585 lowestrev = -1
584 lowestrev = -1
586 if (lowestrev == -1) and (heads is None):
585 if (lowestrev == -1) and (heads is None):
587 # We want _all_ the nodes!
586 # We want _all_ the nodes!
588 return ([self.node(r) for r in xrange(0, self.count())],
587 return ([self.node(r) for r in xrange(0, self.count())],
589 [nullid], list(self.heads()))
588 [nullid], list(self.heads()))
590 if heads is None:
589 if heads is None:
591 # All nodes are ancestors, so the latest ancestor is the last
590 # All nodes are ancestors, so the latest ancestor is the last
592 # node.
591 # node.
593 highestrev = self.count() - 1
592 highestrev = self.count() - 1
594 # Set ancestors to None to signal that every node is an ancestor.
593 # Set ancestors to None to signal that every node is an ancestor.
595 ancestors = None
594 ancestors = None
596 # Set heads to an empty dictionary for later discovery of heads
595 # Set heads to an empty dictionary for later discovery of heads
597 heads = {}
596 heads = {}
598 else:
597 else:
599 heads = list(heads)
598 heads = list(heads)
600 if not heads:
599 if not heads:
601 return nonodes
600 return nonodes
602 ancestors = {}
601 ancestors = {}
603 # Start at the top and keep marking parents until we're done.
602 # Start at the top and keep marking parents until we're done.
604 nodestotag = heads[:]
603 nodestotag = heads[:]
605 # Turn heads into a dictionary so we can remove 'fake' heads.
604 # Turn heads into a dictionary so we can remove 'fake' heads.
606 # Also, later we will be using it to filter out the heads we can't
605 # Also, later we will be using it to filter out the heads we can't
607 # find from roots.
606 # find from roots.
608 heads = dict.fromkeys(heads, 0)
607 heads = dict.fromkeys(heads, 0)
609 # Remember where the top was so we can use it as a limit later.
608 # Remember where the top was so we can use it as a limit later.
610 highestrev = max([self.rev(n) for n in nodestotag])
609 highestrev = max([self.rev(n) for n in nodestotag])
611 while nodestotag:
610 while nodestotag:
612 # grab a node to tag
611 # grab a node to tag
613 n = nodestotag.pop()
612 n = nodestotag.pop()
614 # Never tag nullid
613 # Never tag nullid
615 if n == nullid:
614 if n == nullid:
616 continue
615 continue
617 # A node's revision number represents its place in a
616 # A node's revision number represents its place in a
618 # topologically sorted list of nodes.
617 # topologically sorted list of nodes.
619 r = self.rev(n)
618 r = self.rev(n)
620 if r >= lowestrev:
619 if r >= lowestrev:
621 if n not in ancestors:
620 if n not in ancestors:
622 # If we are possibly a descendent of one of the roots
621 # If we are possibly a descendent of one of the roots
623 # and we haven't already been marked as an ancestor
622 # and we haven't already been marked as an ancestor
624 ancestors[n] = 1 # Mark as ancestor
623 ancestors[n] = 1 # Mark as ancestor
625 # Add non-nullid parents to list of nodes to tag.
624 # Add non-nullid parents to list of nodes to tag.
626 nodestotag.extend([p for p in self.parents(n) if
625 nodestotag.extend([p for p in self.parents(n) if
627 p != nullid])
626 p != nullid])
628 elif n in heads: # We've seen it before, is it a fake head?
627 elif n in heads: # We've seen it before, is it a fake head?
629 # So it is, real heads should not be the ancestors of
628 # So it is, real heads should not be the ancestors of
630 # any other heads.
629 # any other heads.
631 heads.pop(n)
630 heads.pop(n)
632 if not ancestors:
631 if not ancestors:
633 return nonodes
632 return nonodes
634 # Now that we have our set of ancestors, we want to remove any
633 # Now that we have our set of ancestors, we want to remove any
635 # roots that are not ancestors.
634 # roots that are not ancestors.
636
635
637 # If one of the roots was nullid, everything is included anyway.
636 # If one of the roots was nullid, everything is included anyway.
638 if lowestrev > -1:
637 if lowestrev > -1:
639 # But, since we weren't, let's recompute the lowest rev to not
638 # But, since we weren't, let's recompute the lowest rev to not
640 # include roots that aren't ancestors.
639 # include roots that aren't ancestors.
641
640
642 # Filter out roots that aren't ancestors of heads
641 # Filter out roots that aren't ancestors of heads
643 roots = [n for n in roots if n in ancestors]
642 roots = [n for n in roots if n in ancestors]
644 # Recompute the lowest revision
643 # Recompute the lowest revision
645 if roots:
644 if roots:
646 lowestrev = min([self.rev(n) for n in roots])
645 lowestrev = min([self.rev(n) for n in roots])
647 else:
646 else:
648 # No more roots? Return empty list
647 # No more roots? Return empty list
649 return nonodes
648 return nonodes
650 else:
649 else:
651 # We are descending from nullid, and don't need to care about
650 # We are descending from nullid, and don't need to care about
652 # any other roots.
651 # any other roots.
653 lowestrev = -1
652 lowestrev = -1
654 roots = [nullid]
653 roots = [nullid]
655 # Transform our roots list into a 'set' (i.e. a dictionary where the
654 # Transform our roots list into a 'set' (i.e. a dictionary where the
656 # values don't matter.
655 # values don't matter.
657 descendents = dict.fromkeys(roots, 1)
656 descendents = dict.fromkeys(roots, 1)
658 # Also, keep the original roots so we can filter out roots that aren't
657 # Also, keep the original roots so we can filter out roots that aren't
659 # 'real' roots (i.e. are descended from other roots).
658 # 'real' roots (i.e. are descended from other roots).
660 roots = descendents.copy()
659 roots = descendents.copy()
661 # Our topologically sorted list of output nodes.
660 # Our topologically sorted list of output nodes.
662 orderedout = []
661 orderedout = []
663 # Don't start at nullid since we don't want nullid in our output list,
662 # Don't start at nullid since we don't want nullid in our output list,
664 # and if nullid shows up in descedents, empty parents will look like
663 # and if nullid shows up in descedents, empty parents will look like
665 # they're descendents.
664 # they're descendents.
666 for r in xrange(max(lowestrev, 0), highestrev + 1):
665 for r in xrange(max(lowestrev, 0), highestrev + 1):
667 n = self.node(r)
666 n = self.node(r)
668 isdescendent = False
667 isdescendent = False
669 if lowestrev == -1: # Everybody is a descendent of nullid
668 if lowestrev == -1: # Everybody is a descendent of nullid
670 isdescendent = True
669 isdescendent = True
671 elif n in descendents:
670 elif n in descendents:
672 # n is already a descendent
671 # n is already a descendent
673 isdescendent = True
672 isdescendent = True
674 # This check only needs to be done here because all the roots
673 # This check only needs to be done here because all the roots
675 # will start being marked is descendents before the loop.
674 # will start being marked is descendents before the loop.
676 if n in roots:
675 if n in roots:
677 # If n was a root, check if it's a 'real' root.
676 # If n was a root, check if it's a 'real' root.
678 p = tuple(self.parents(n))
677 p = tuple(self.parents(n))
679 # If any of its parents are descendents, it's not a root.
678 # If any of its parents are descendents, it's not a root.
680 if (p[0] in descendents) or (p[1] in descendents):
679 if (p[0] in descendents) or (p[1] in descendents):
681 roots.pop(n)
680 roots.pop(n)
682 else:
681 else:
683 p = tuple(self.parents(n))
682 p = tuple(self.parents(n))
684 # A node is a descendent if either of its parents are
683 # A node is a descendent if either of its parents are
685 # descendents. (We seeded the dependents list with the roots
684 # descendents. (We seeded the dependents list with the roots
686 # up there, remember?)
685 # up there, remember?)
687 if (p[0] in descendents) or (p[1] in descendents):
686 if (p[0] in descendents) or (p[1] in descendents):
688 descendents[n] = 1
687 descendents[n] = 1
689 isdescendent = True
688 isdescendent = True
690 if isdescendent and ((ancestors is None) or (n in ancestors)):
689 if isdescendent and ((ancestors is None) or (n in ancestors)):
691 # Only include nodes that are both descendents and ancestors.
690 # Only include nodes that are both descendents and ancestors.
692 orderedout.append(n)
691 orderedout.append(n)
693 if (ancestors is not None) and (n in heads):
692 if (ancestors is not None) and (n in heads):
694 # We're trying to figure out which heads are reachable
693 # We're trying to figure out which heads are reachable
695 # from roots.
694 # from roots.
696 # Mark this head as having been reached
695 # Mark this head as having been reached
697 heads[n] = 1
696 heads[n] = 1
698 elif ancestors is None:
697 elif ancestors is None:
699 # Otherwise, we're trying to discover the heads.
698 # Otherwise, we're trying to discover the heads.
700 # Assume this is a head because if it isn't, the next step
699 # Assume this is a head because if it isn't, the next step
701 # will eventually remove it.
700 # will eventually remove it.
702 heads[n] = 1
701 heads[n] = 1
703 # But, obviously its parents aren't.
702 # But, obviously its parents aren't.
704 for p in self.parents(n):
703 for p in self.parents(n):
705 heads.pop(p, None)
704 heads.pop(p, None)
706 heads = [n for n in heads.iterkeys() if heads[n] != 0]
705 heads = [n for n in heads.iterkeys() if heads[n] != 0]
707 roots = roots.keys()
706 roots = roots.keys()
708 assert orderedout
707 assert orderedout
709 assert roots
708 assert roots
710 assert heads
709 assert heads
711 return (orderedout, roots, heads)
710 return (orderedout, roots, heads)
712
711
713 def heads(self, start=None):
712 def heads(self, start=None):
714 """return the list of all nodes that have no children
713 """return the list of all nodes that have no children
715
714
716 if start is specified, only heads that are descendants of
715 if start is specified, only heads that are descendants of
717 start will be returned
716 start will be returned
718
717
719 """
718 """
720 if start is None:
719 if start is None:
721 start = nullid
720 start = nullid
722 startrev = self.rev(start)
721 startrev = self.rev(start)
723 reachable = {startrev: 1}
722 reachable = {startrev: 1}
724 heads = {startrev: 1}
723 heads = {startrev: 1}
725
724
726 parentrevs = self.parentrevs
725 parentrevs = self.parentrevs
727 for r in xrange(startrev + 1, self.count()):
726 for r in xrange(startrev + 1, self.count()):
728 for p in parentrevs(r):
727 for p in parentrevs(r):
729 if p in reachable:
728 if p in reachable:
730 reachable[r] = 1
729 reachable[r] = 1
731 heads[r] = 1
730 heads[r] = 1
732 if p in heads:
731 if p in heads:
733 del heads[p]
732 del heads[p]
734 return [self.node(r) for r in heads]
733 return [self.node(r) for r in heads]
735
734
736 def children(self, node):
735 def children(self, node):
737 """find the children of a given node"""
736 """find the children of a given node"""
738 c = []
737 c = []
739 p = self.rev(node)
738 p = self.rev(node)
740 for r in range(p + 1, self.count()):
739 for r in range(p + 1, self.count()):
741 n = self.node(r)
740 n = self.node(r)
742 for pn in self.parents(n):
741 for pn in self.parents(n):
743 if pn == node:
742 if pn == node:
744 c.append(n)
743 c.append(n)
745 continue
744 continue
746 elif pn == nullid:
745 elif pn == nullid:
747 continue
746 continue
748 return c
747 return c
749
748
750 def lookup(self, id):
749 def lookup(self, id):
751 """locate a node based on revision number or subset of hex nodeid"""
750 """locate a node based on revision number or subset of hex nodeid"""
752 if type(id) == type(0):
751 if type(id) == type(0):
753 return self.node(id)
752 return self.node(id)
754 try:
753 try:
755 rev = int(id)
754 rev = int(id)
756 if str(rev) != id: raise ValueError
755 if str(rev) != id: raise ValueError
757 if rev < 0: rev = self.count() + rev
756 if rev < 0: rev = self.count() + rev
758 if rev < 0 or rev >= self.count(): raise ValueError
757 if rev < 0 or rev >= self.count(): raise ValueError
759 return self.node(rev)
758 return self.node(rev)
760 except (ValueError, OverflowError):
759 except (ValueError, OverflowError):
761 c = []
760 c = []
762 for n in self.nodemap:
761 for n in self.nodemap:
763 if hex(n).startswith(id):
762 if hex(n).startswith(id):
764 c.append(n)
763 c.append(n)
765 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
764 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
766 if len(c) == 1: return c[0]
765 if len(c) == 1: return c[0]
767
766
768 # might need fixing if we change hash lengths
767 # might need fixing if we change hash lengths
769 if len(id) == 20 and id in self.nodemap:
768 if len(id) == 20 and id in self.nodemap:
770 return id
769 return id
771
770
772 raise RevlogError(_("No match found"))
771 raise RevlogError(_("No match found"))
773
772
774 def cmp(self, node, text):
773 def cmp(self, node, text):
775 """compare text with a given file revision"""
774 """compare text with a given file revision"""
776 p1, p2 = self.parents(node)
775 p1, p2 = self.parents(node)
777 return hash(text, p1, p2) != node
776 return hash(text, p1, p2) != node
778
777
779 def makenode(self, node, text):
778 def makenode(self, node, text):
780 """calculate a file nodeid for text, descended or possibly
779 """calculate a file nodeid for text, descended or possibly
781 unchanged from node"""
780 unchanged from node"""
782
781
783 if self.cmp(node, text):
782 if self.cmp(node, text):
784 return hash(text, node, nullid)
783 return hash(text, node, nullid)
785 return node
784 return node
786
785
787 def diff(self, a, b):
786 def diff(self, a, b):
788 """return a delta between two revisions"""
787 """return a delta between two revisions"""
789 return mdiff.textdiff(a, b)
788 return mdiff.textdiff(a, b)
790
789
791 def patches(self, t, pl):
790 def patches(self, t, pl):
792 """apply a list of patches to a string"""
791 """apply a list of patches to a string"""
793 return mdiff.patches(t, pl)
792 return mdiff.patches(t, pl)
794
793
795 def chunk(self, rev, df=None, cachelen=4096):
794 def chunk(self, rev, df=None, cachelen=4096):
796 start, length = self.start(rev), self.length(rev)
795 start, length = self.start(rev), self.length(rev)
797 inline = self.inlinedata()
796 inline = self.inlinedata()
798 if inline:
797 if inline:
799 start += (rev + 1) * struct.calcsize(self.indexformat)
798 start += (rev + 1) * struct.calcsize(self.indexformat)
800 end = start + length
799 end = start + length
801 def loadcache(df):
800 def loadcache(df):
802 cache_length = max(cachelen, length) # 4k
801 cache_length = max(cachelen, length) # 4k
803 if not df:
802 if not df:
804 if inline:
803 if inline:
805 df = self.opener(self.indexfile)
804 df = self.opener(self.indexfile)
806 else:
805 else:
807 df = self.opener(self.datafile)
806 df = self.opener(self.datafile)
808 df.seek(start)
807 df.seek(start)
809 self.chunkcache = (start, df.read(cache_length))
808 self.chunkcache = (start, df.read(cache_length))
810
809
811 if not self.chunkcache:
810 if not self.chunkcache:
812 loadcache(df)
811 loadcache(df)
813
812
814 cache_start = self.chunkcache[0]
813 cache_start = self.chunkcache[0]
815 cache_end = cache_start + len(self.chunkcache[1])
814 cache_end = cache_start + len(self.chunkcache[1])
816 if start >= cache_start and end <= cache_end:
815 if start >= cache_start and end <= cache_end:
817 # it is cached
816 # it is cached
818 offset = start - cache_start
817 offset = start - cache_start
819 else:
818 else:
820 loadcache(df)
819 loadcache(df)
821 offset = 0
820 offset = 0
822
821
823 #def checkchunk():
822 #def checkchunk():
824 # df = self.opener(self.datafile)
823 # df = self.opener(self.datafile)
825 # df.seek(start)
824 # df.seek(start)
826 # return df.read(length)
825 # return df.read(length)
827 #assert s == checkchunk()
826 #assert s == checkchunk()
828 return decompress(self.chunkcache[1][offset:offset + length])
827 return decompress(self.chunkcache[1][offset:offset + length])
829
828
830 def delta(self, node):
829 def delta(self, node):
831 """return or calculate a delta between a node and its predecessor"""
830 """return or calculate a delta between a node and its predecessor"""
832 r = self.rev(node)
831 r = self.rev(node)
833 return self.revdiff(r - 1, r)
832 return self.revdiff(r - 1, r)
834
833
835 def revdiff(self, rev1, rev2):
834 def revdiff(self, rev1, rev2):
836 """return or calculate a delta between two revisions"""
835 """return or calculate a delta between two revisions"""
837 b1 = self.base(rev1)
836 b1 = self.base(rev1)
838 b2 = self.base(rev2)
837 b2 = self.base(rev2)
839 if b1 == b2 and rev1 + 1 == rev2:
838 if b1 == b2 and rev1 + 1 == rev2:
840 return self.chunk(rev2)
839 return self.chunk(rev2)
841 else:
840 else:
842 return self.diff(self.revision(self.node(rev1)),
841 return self.diff(self.revision(self.node(rev1)),
843 self.revision(self.node(rev2)))
842 self.revision(self.node(rev2)))
844
843
845 def revision(self, node):
844 def revision(self, node):
846 """return an uncompressed revision of a given"""
845 """return an uncompressed revision of a given"""
847 if node == nullid: return ""
846 if node == nullid: return ""
848 if self.cache and self.cache[0] == node: return self.cache[2]
847 if self.cache and self.cache[0] == node: return self.cache[2]
849
848
850 # look up what we need to read
849 # look up what we need to read
851 text = None
850 text = None
852 rev = self.rev(node)
851 rev = self.rev(node)
853 base = self.base(rev)
852 base = self.base(rev)
854
853
855 if self.inlinedata():
854 if self.inlinedata():
856 # we probably have the whole chunk cached
855 # we probably have the whole chunk cached
857 df = None
856 df = None
858 else:
857 else:
859 df = self.opener(self.datafile)
858 df = self.opener(self.datafile)
860
859
861 # do we have useful data cached?
860 # do we have useful data cached?
862 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
861 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
863 base = self.cache[1]
862 base = self.cache[1]
864 text = self.cache[2]
863 text = self.cache[2]
865 self.loadindex(base, rev + 1)
864 self.loadindex(base, rev + 1)
866 else:
865 else:
867 self.loadindex(base, rev + 1)
866 self.loadindex(base, rev + 1)
868 text = self.chunk(base, df=df)
867 text = self.chunk(base, df=df)
869
868
870 bins = []
869 bins = []
871 for r in xrange(base + 1, rev + 1):
870 for r in xrange(base + 1, rev + 1):
872 bins.append(self.chunk(r, df=df))
871 bins.append(self.chunk(r, df=df))
873
872
874 text = self.patches(text, bins)
873 text = self.patches(text, bins)
875
874
876 p1, p2 = self.parents(node)
875 p1, p2 = self.parents(node)
877 if node != hash(text, p1, p2):
876 if node != hash(text, p1, p2):
878 raise RevlogError(_("integrity check failed on %s:%d")
877 raise RevlogError(_("integrity check failed on %s:%d")
879 % (self.datafile, rev))
878 % (self.datafile, rev))
880
879
881 self.cache = (node, rev, text)
880 self.cache = (node, rev, text)
882 return text
881 return text
883
882
884 def checkinlinesize(self, tr, fp=None):
883 def checkinlinesize(self, tr, fp=None):
885 if not self.inlinedata():
884 if not self.inlinedata():
886 return
885 return
887 if not fp:
886 if not fp:
888 fp = self.opener(self.indexfile, 'r')
887 fp = self.opener(self.indexfile, 'r')
889 fp.seek(0, 2)
888 fp.seek(0, 2)
890 size = fp.tell()
889 size = fp.tell()
891 if size < 131072:
890 if size < 131072:
892 return
891 return
893 trinfo = tr.find(self.indexfile)
892 trinfo = tr.find(self.indexfile)
894 if trinfo == None:
893 if trinfo == None:
895 raise RevlogError(_("%s not found in the transaction" %
894 raise RevlogError(_("%s not found in the transaction" %
896 self.indexfile))
895 self.indexfile))
897
896
898 trindex = trinfo[2]
897 trindex = trinfo[2]
899 dataoff = self.start(trindex)
898 dataoff = self.start(trindex)
900
899
901 tr.add(self.datafile, dataoff)
900 tr.add(self.datafile, dataoff)
902 df = self.opener(self.datafile, 'w')
901 df = self.opener(self.datafile, 'w')
903 calc = struct.calcsize(self.indexformat)
902 calc = struct.calcsize(self.indexformat)
904 for r in xrange(self.count()):
903 for r in xrange(self.count()):
905 start = self.start(r) + (r + 1) * calc
904 start = self.start(r) + (r + 1) * calc
906 length = self.length(r)
905 length = self.length(r)
907 fp.seek(start)
906 fp.seek(start)
908 d = fp.read(length)
907 d = fp.read(length)
909 df.write(d)
908 df.write(d)
910 fp.close()
909 fp.close()
911 df.close()
910 df.close()
912 fp = self.opener(self.indexfile, 'w', atomictemp=True)
911 fp = self.opener(self.indexfile, 'w', atomictemp=True)
913 self.version &= ~(REVLOGNGINLINEDATA)
912 self.version &= ~(REVLOGNGINLINEDATA)
914 if self.count():
913 if self.count():
915 x = self.index[0]
914 x = self.index[0]
916 e = struct.pack(self.indexformat, *x)[4:]
915 e = struct.pack(self.indexformat, *x)[4:]
917 l = struct.pack(versionformat, self.version)
916 l = struct.pack(versionformat, self.version)
918 fp.write(l)
917 fp.write(l)
919 fp.write(e)
918 fp.write(e)
920
919
921 for i in xrange(1, self.count()):
920 for i in xrange(1, self.count()):
922 x = self.index[i]
921 x = self.index[i]
923 e = struct.pack(self.indexformat, *x)
922 e = struct.pack(self.indexformat, *x)
924 fp.write(e)
923 fp.write(e)
925
924
926 # if we don't call rename, the temp file will never replace the
925 # if we don't call rename, the temp file will never replace the
927 # real index
926 # real index
928 fp.rename()
927 fp.rename()
929
928
930 tr.replace(self.indexfile, trindex * calc)
929 tr.replace(self.indexfile, trindex * calc)
931 self.chunkcache = None
930 self.chunkcache = None
932
931
933 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
932 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
934 """add a revision to the log
933 """add a revision to the log
935
934
936 text - the revision data to add
935 text - the revision data to add
937 transaction - the transaction object used for rollback
936 transaction - the transaction object used for rollback
938 link - the linkrev data to add
937 link - the linkrev data to add
939 p1, p2 - the parent nodeids of the revision
938 p1, p2 - the parent nodeids of the revision
940 d - an optional precomputed delta
939 d - an optional precomputed delta
941 """
940 """
942 if text is None: text = ""
941 if text is None: text = ""
943 if p1 is None: p1 = self.tip()
942 if p1 is None: p1 = self.tip()
944 if p2 is None: p2 = nullid
943 if p2 is None: p2 = nullid
945
944
946 node = hash(text, p1, p2)
945 node = hash(text, p1, p2)
947
946
948 if node in self.nodemap:
947 if node in self.nodemap:
949 return node
948 return node
950
949
951 n = self.count()
950 n = self.count()
952 t = n - 1
951 t = n - 1
953
952
954 if n:
953 if n:
955 base = self.base(t)
954 base = self.base(t)
956 start = self.start(base)
955 start = self.start(base)
957 end = self.end(t)
956 end = self.end(t)
958 if not d:
957 if not d:
959 prev = self.revision(self.tip())
958 prev = self.revision(self.tip())
960 d = self.diff(prev, str(text))
959 d = self.diff(prev, str(text))
961 data = compress(d)
960 data = compress(d)
962 l = len(data[1]) + len(data[0])
961 l = len(data[1]) + len(data[0])
963 dist = end - start + l
962 dist = end - start + l
964
963
965 # full versions are inserted when the needed deltas
964 # full versions are inserted when the needed deltas
966 # become comparable to the uncompressed text
965 # become comparable to the uncompressed text
967 if not n or dist > len(text) * 2:
966 if not n or dist > len(text) * 2:
968 data = compress(text)
967 data = compress(text)
969 l = len(data[1]) + len(data[0])
968 l = len(data[1]) + len(data[0])
970 base = n
969 base = n
971 else:
970 else:
972 base = self.base(t)
971 base = self.base(t)
973
972
974 offset = 0
973 offset = 0
975 if t >= 0:
974 if t >= 0:
976 offset = self.end(t)
975 offset = self.end(t)
977
976
978 if self.version == REVLOGV0:
977 if self.version == REVLOGV0:
979 e = (offset, l, base, link, p1, p2, node)
978 e = (offset, l, base, link, p1, p2, node)
980 else:
979 else:
981 e = (self.offset_type(offset, 0), l, len(text),
980 e = (self.offset_type(offset, 0), l, len(text),
982 base, link, self.rev(p1), self.rev(p2), node)
981 base, link, self.rev(p1), self.rev(p2), node)
983
982
984 self.index.append(e)
983 self.index.append(e)
985 self.nodemap[node] = n
984 self.nodemap[node] = n
986 entry = struct.pack(self.indexformat, *e)
985 entry = struct.pack(self.indexformat, *e)
987
986
988 if not self.inlinedata():
987 if not self.inlinedata():
989 transaction.add(self.datafile, offset)
988 transaction.add(self.datafile, offset)
990 transaction.add(self.indexfile, n * len(entry))
989 transaction.add(self.indexfile, n * len(entry))
991 f = self.opener(self.datafile, "a")
990 f = self.opener(self.datafile, "a")
992 if data[0]:
991 if data[0]:
993 f.write(data[0])
992 f.write(data[0])
994 f.write(data[1])
993 f.write(data[1])
995 f.close()
994 f.close()
996 f = self.opener(self.indexfile, "a")
995 f = self.opener(self.indexfile, "a")
997 else:
996 else:
998 f = self.opener(self.indexfile, "a+")
997 f = self.opener(self.indexfile, "a+")
999 f.seek(0, 2)
998 f.seek(0, 2)
1000 transaction.add(self.indexfile, f.tell(), self.count() - 1)
999 transaction.add(self.indexfile, f.tell(), self.count() - 1)
1001
1000
1002 if len(self.index) == 1 and self.version != REVLOGV0:
1001 if len(self.index) == 1 and self.version != REVLOGV0:
1003 l = struct.pack(versionformat, self.version)
1002 l = struct.pack(versionformat, self.version)
1004 f.write(l)
1003 f.write(l)
1005 entry = entry[4:]
1004 entry = entry[4:]
1006
1005
1007 f.write(entry)
1006 f.write(entry)
1008
1007
1009 if self.inlinedata():
1008 if self.inlinedata():
1010 f.write(data[0])
1009 f.write(data[0])
1011 f.write(data[1])
1010 f.write(data[1])
1012 self.checkinlinesize(transaction, f)
1011 self.checkinlinesize(transaction, f)
1013
1012
1014 self.cache = (node, n, text)
1013 self.cache = (node, n, text)
1015 return node
1014 return node
1016
1015
1017 def ancestor(self, a, b):
1016 def ancestor(self, a, b):
1018 """calculate the least common ancestor of nodes a and b"""
1017 """calculate the least common ancestor of nodes a and b"""
1019
1018
1020 # start with some short cuts for the linear cases
1019 # start with some short cuts for the linear cases
1021 if a == b:
1020 if a == b:
1022 return a
1021 return a
1023 ra = self.rev(a)
1022 ra = self.rev(a)
1024 rb = self.rev(b)
1023 rb = self.rev(b)
1025 if ra < rb:
1024 if ra < rb:
1026 last = b
1025 last = b
1027 first = a
1026 first = a
1028 else:
1027 else:
1029 last = a
1028 last = a
1030 first = b
1029 first = b
1031
1030
1032 # reachable won't include stop in the list, so we have to use a parent
1031 # reachable won't include stop in the list, so we have to use a parent
1033 reachable = self.reachable(last, stop=self.parents(first)[0])
1032 reachable = self.reachable(last, stop=self.parents(first)[0])
1034 if first in reachable:
1033 if first in reachable:
1035 return first
1034 return first
1036
1035
1037 # calculate the distance of every node from root
1036 # calculate the distance of every node from root
1038 dist = {nullid: 0}
1037 dist = {nullid: 0}
1039 for i in xrange(self.count()):
1038 for i in xrange(self.count()):
1040 n = self.node(i)
1039 n = self.node(i)
1041 p1, p2 = self.parents(n)
1040 p1, p2 = self.parents(n)
1042 dist[n] = max(dist[p1], dist[p2]) + 1
1041 dist[n] = max(dist[p1], dist[p2]) + 1
1043
1042
1044 # traverse ancestors in order of decreasing distance from root
1043 # traverse ancestors in order of decreasing distance from root
1045 def ancestors(node):
1044 def ancestors(node):
1046 # we store negative distances because heap returns smallest member
1045 # we store negative distances because heap returns smallest member
1047 h = [(-dist[node], node)]
1046 h = [(-dist[node], node)]
1048 seen = {}
1047 seen = {}
1049 while h:
1048 while h:
1050 d, n = heapq.heappop(h)
1049 d, n = heapq.heappop(h)
1051 if n not in seen:
1050 if n not in seen:
1052 seen[n] = 1
1051 seen[n] = 1
1053 yield (-d, n)
1052 yield (-d, n)
1054 for p in self.parents(n):
1053 for p in self.parents(n):
1055 heapq.heappush(h, (-dist[p], p))
1054 heapq.heappush(h, (-dist[p], p))
1056
1055
1057 def generations(node):
1056 def generations(node):
1058 sg, s = None, {}
1057 sg, s = None, {}
1059 for g,n in ancestors(node):
1058 for g,n in ancestors(node):
1060 if g != sg:
1059 if g != sg:
1061 if sg:
1060 if sg:
1062 yield sg, s
1061 yield sg, s
1063 sg, s = g, {n:1}
1062 sg, s = g, {n:1}
1064 else:
1063 else:
1065 s[n] = 1
1064 s[n] = 1
1066 yield sg, s
1065 yield sg, s
1067
1066
1068 x = generations(a)
1067 x = generations(a)
1069 y = generations(b)
1068 y = generations(b)
1070 gx = x.next()
1069 gx = x.next()
1071 gy = y.next()
1070 gy = y.next()
1072
1071
1073 # increment each ancestor list until it is closer to root than
1072 # increment each ancestor list until it is closer to root than
1074 # the other, or they match
1073 # the other, or they match
1075 while 1:
1074 while 1:
1076 #print "ancestor gen %s %s" % (gx[0], gy[0])
1075 #print "ancestor gen %s %s" % (gx[0], gy[0])
1077 if gx[0] == gy[0]:
1076 if gx[0] == gy[0]:
1078 # find the intersection
1077 # find the intersection
1079 i = [ n for n in gx[1] if n in gy[1] ]
1078 i = [ n for n in gx[1] if n in gy[1] ]
1080 if i:
1079 if i:
1081 return i[0]
1080 return i[0]
1082 else:
1081 else:
1083 #print "next"
1082 #print "next"
1084 gy = y.next()
1083 gy = y.next()
1085 gx = x.next()
1084 gx = x.next()
1086 elif gx[0] < gy[0]:
1085 elif gx[0] < gy[0]:
1087 #print "next y"
1086 #print "next y"
1088 gy = y.next()
1087 gy = y.next()
1089 else:
1088 else:
1090 #print "next x"
1089 #print "next x"
1091 gx = x.next()
1090 gx = x.next()
1092
1091
1093 def group(self, nodelist, lookup, infocollect=None):
1092 def group(self, nodelist, lookup, infocollect=None):
1094 """calculate a delta group
1093 """calculate a delta group
1095
1094
1096 Given a list of changeset revs, return a set of deltas and
1095 Given a list of changeset revs, return a set of deltas and
1097 metadata corresponding to nodes. the first delta is
1096 metadata corresponding to nodes. the first delta is
1098 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1097 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1099 have this parent as it has all history before these
1098 have this parent as it has all history before these
1100 changesets. parent is parent[0]
1099 changesets. parent is parent[0]
1101 """
1100 """
1102 revs = [self.rev(n) for n in nodelist]
1101 revs = [self.rev(n) for n in nodelist]
1103
1102
1104 # if we don't have any revisions touched by these changesets, bail
1103 # if we don't have any revisions touched by these changesets, bail
1105 if not revs:
1104 if not revs:
1106 yield changegroup.closechunk()
1105 yield changegroup.closechunk()
1107 return
1106 return
1108
1107
1109 # add the parent of the first rev
1108 # add the parent of the first rev
1110 p = self.parents(self.node(revs[0]))[0]
1109 p = self.parents(self.node(revs[0]))[0]
1111 revs.insert(0, self.rev(p))
1110 revs.insert(0, self.rev(p))
1112
1111
1113 # build deltas
1112 # build deltas
1114 for d in xrange(0, len(revs) - 1):
1113 for d in xrange(0, len(revs) - 1):
1115 a, b = revs[d], revs[d + 1]
1114 a, b = revs[d], revs[d + 1]
1116 nb = self.node(b)
1115 nb = self.node(b)
1117
1116
1118 if infocollect is not None:
1117 if infocollect is not None:
1119 infocollect(nb)
1118 infocollect(nb)
1120
1119
1121 d = self.revdiff(a, b)
1120 d = self.revdiff(a, b)
1122 p = self.parents(nb)
1121 p = self.parents(nb)
1123 meta = nb + p[0] + p[1] + lookup(nb)
1122 meta = nb + p[0] + p[1] + lookup(nb)
1124 yield changegroup.genchunk("%s%s" % (meta, d))
1123 yield changegroup.genchunk("%s%s" % (meta, d))
1125
1124
1126 yield changegroup.closechunk()
1125 yield changegroup.closechunk()
1127
1126
1128 def addgroup(self, revs, linkmapper, transaction, unique=0):
1127 def addgroup(self, revs, linkmapper, transaction, unique=0):
1129 """
1128 """
1130 add a delta group
1129 add a delta group
1131
1130
1132 given a set of deltas, add them to the revision log. the
1131 given a set of deltas, add them to the revision log. the
1133 first delta is against its parent, which should be in our
1132 first delta is against its parent, which should be in our
1134 log, the rest are against the previous delta.
1133 log, the rest are against the previous delta.
1135 """
1134 """
1136
1135
1137 #track the base of the current delta log
1136 #track the base of the current delta log
1138 r = self.count()
1137 r = self.count()
1139 t = r - 1
1138 t = r - 1
1140 node = None
1139 node = None
1141
1140
1142 base = prev = -1
1141 base = prev = -1
1143 start = end = textlen = 0
1142 start = end = textlen = 0
1144 if r:
1143 if r:
1145 end = self.end(t)
1144 end = self.end(t)
1146
1145
1147 ifh = self.opener(self.indexfile, "a+")
1146 ifh = self.opener(self.indexfile, "a+")
1148 ifh.seek(0, 2)
1147 ifh.seek(0, 2)
1149 transaction.add(self.indexfile, ifh.tell(), self.count())
1148 transaction.add(self.indexfile, ifh.tell(), self.count())
1150 if self.inlinedata():
1149 if self.inlinedata():
1151 dfh = None
1150 dfh = None
1152 else:
1151 else:
1153 transaction.add(self.datafile, end)
1152 transaction.add(self.datafile, end)
1154 dfh = self.opener(self.datafile, "a")
1153 dfh = self.opener(self.datafile, "a")
1155
1154
1156 # loop through our set of deltas
1155 # loop through our set of deltas
1157 chain = None
1156 chain = None
1158 for chunk in revs:
1157 for chunk in revs:
1159 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1158 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1160 link = linkmapper(cs)
1159 link = linkmapper(cs)
1161 if node in self.nodemap:
1160 if node in self.nodemap:
1162 # this can happen if two branches make the same change
1161 # this can happen if two branches make the same change
1163 # if unique:
1162 # if unique:
1164 # raise RevlogError(_("already have %s") % hex(node[:4]))
1163 # raise RevlogError(_("already have %s") % hex(node[:4]))
1165 chain = node
1164 chain = node
1166 continue
1165 continue
1167 delta = chunk[80:]
1166 delta = chunk[80:]
1168
1167
1169 for p in (p1, p2):
1168 for p in (p1, p2):
1170 if not p in self.nodemap:
1169 if not p in self.nodemap:
1171 raise RevlogError(_("unknown parent %s") % short(p))
1170 raise RevlogError(_("unknown parent %s") % short(p))
1172
1171
1173 if not chain:
1172 if not chain:
1174 # retrieve the parent revision of the delta chain
1173 # retrieve the parent revision of the delta chain
1175 chain = p1
1174 chain = p1
1176 if not chain in self.nodemap:
1175 if not chain in self.nodemap:
1177 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1176 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1178
1177
1179 # full versions are inserted when the needed deltas become
1178 # full versions are inserted when the needed deltas become
1180 # comparable to the uncompressed text or when the previous
1179 # comparable to the uncompressed text or when the previous
1181 # version is not the one we have a delta against. We use
1180 # version is not the one we have a delta against. We use
1182 # the size of the previous full rev as a proxy for the
1181 # the size of the previous full rev as a proxy for the
1183 # current size.
1182 # current size.
1184
1183
1185 if chain == prev:
1184 if chain == prev:
1186 tempd = compress(delta)
1185 tempd = compress(delta)
1187 cdelta = tempd[0] + tempd[1]
1186 cdelta = tempd[0] + tempd[1]
1188 textlen = mdiff.patchedsize(textlen, delta)
1187 textlen = mdiff.patchedsize(textlen, delta)
1189
1188
1190 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1189 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1191 # flush our writes here so we can read it in revision
1190 # flush our writes here so we can read it in revision
1192 if dfh:
1191 if dfh:
1193 dfh.flush()
1192 dfh.flush()
1194 ifh.flush()
1193 ifh.flush()
1195 text = self.revision(chain)
1194 text = self.revision(chain)
1196 text = self.patches(text, [delta])
1195 text = self.patches(text, [delta])
1197 chk = self.addrevision(text, transaction, link, p1, p2)
1196 chk = self.addrevision(text, transaction, link, p1, p2)
1198 if chk != node:
1197 if chk != node:
1199 raise RevlogError(_("consistency error adding group"))
1198 raise RevlogError(_("consistency error adding group"))
1200 textlen = len(text)
1199 textlen = len(text)
1201 else:
1200 else:
1202 if self.version == REVLOGV0:
1201 if self.version == REVLOGV0:
1203 e = (end, len(cdelta), base, link, p1, p2, node)
1202 e = (end, len(cdelta), base, link, p1, p2, node)
1204 else:
1203 else:
1205 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1204 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1206 link, self.rev(p1), self.rev(p2), node)
1205 link, self.rev(p1), self.rev(p2), node)
1207 self.index.append(e)
1206 self.index.append(e)
1208 self.nodemap[node] = r
1207 self.nodemap[node] = r
1209 if self.inlinedata():
1208 if self.inlinedata():
1210 ifh.write(struct.pack(self.indexformat, *e))
1209 ifh.write(struct.pack(self.indexformat, *e))
1211 ifh.write(cdelta)
1210 ifh.write(cdelta)
1212 self.checkinlinesize(transaction, ifh)
1211 self.checkinlinesize(transaction, ifh)
1213 if not self.inlinedata():
1212 if not self.inlinedata():
1214 dfh = self.opener(self.datafile, "a")
1213 dfh = self.opener(self.datafile, "a")
1215 ifh = self.opener(self.indexfile, "a")
1214 ifh = self.opener(self.indexfile, "a")
1216 else:
1215 else:
1217 if not dfh:
1216 if not dfh:
1218 # addrevision switched from inline to conventional
1217 # addrevision switched from inline to conventional
1219 # reopen the index
1218 # reopen the index
1220 dfh = self.opener(self.datafile, "a")
1219 dfh = self.opener(self.datafile, "a")
1221 ifh = self.opener(self.indexfile, "a")
1220 ifh = self.opener(self.indexfile, "a")
1222 dfh.write(cdelta)
1221 dfh.write(cdelta)
1223 ifh.write(struct.pack(self.indexformat, *e))
1222 ifh.write(struct.pack(self.indexformat, *e))
1224
1223
1225 t, r, chain, prev = r, r + 1, node, node
1224 t, r, chain, prev = r, r + 1, node, node
1226 base = self.base(t)
1225 base = self.base(t)
1227 start = self.start(base)
1226 start = self.start(base)
1228 end = self.end(t)
1227 end = self.end(t)
1229
1228
1230 return node
1229 return node
1231
1230
1232 def strip(self, rev, minlink):
1231 def strip(self, rev, minlink):
1233 if self.count() == 0 or rev >= self.count():
1232 if self.count() == 0 or rev >= self.count():
1234 return
1233 return
1235
1234
1236 if isinstance(self.index, lazyindex):
1235 if isinstance(self.index, lazyindex):
1237 self.loadindexmap()
1236 self.loadindexmap()
1238
1237
1239 # When stripping away a revision, we need to make sure it
1238 # When stripping away a revision, we need to make sure it
1240 # does not actually belong to an older changeset.
1239 # does not actually belong to an older changeset.
1241 # The minlink parameter defines the oldest revision
1240 # The minlink parameter defines the oldest revision
1242 # we're allowed to strip away.
1241 # we're allowed to strip away.
1243 while minlink > self.index[rev][-4]:
1242 while minlink > self.index[rev][-4]:
1244 rev += 1
1243 rev += 1
1245 if rev >= self.count():
1244 if rev >= self.count():
1246 return
1245 return
1247
1246
1248 # first truncate the files on disk
1247 # first truncate the files on disk
1249 end = self.start(rev)
1248 end = self.start(rev)
1250 if not self.inlinedata():
1249 if not self.inlinedata():
1251 df = self.opener(self.datafile, "a")
1250 df = self.opener(self.datafile, "a")
1252 df.truncate(end)
1251 df.truncate(end)
1253 end = rev * struct.calcsize(self.indexformat)
1252 end = rev * struct.calcsize(self.indexformat)
1254 else:
1253 else:
1255 end += rev * struct.calcsize(self.indexformat)
1254 end += rev * struct.calcsize(self.indexformat)
1256
1255
1257 indexf = self.opener(self.indexfile, "a")
1256 indexf = self.opener(self.indexfile, "a")
1258 indexf.truncate(end)
1257 indexf.truncate(end)
1259
1258
1260 # then reset internal state in memory to forget those revisions
1259 # then reset internal state in memory to forget those revisions
1261 self.cache = None
1260 self.cache = None
1262 self.chunkcache = None
1261 self.chunkcache = None
1263 for x in xrange(rev, self.count()):
1262 for x in xrange(rev, self.count()):
1264 del self.nodemap[self.node(x)]
1263 del self.nodemap[self.node(x)]
1265
1264
1266 del self.index[rev:]
1265 del self.index[rev:]
1267
1266
1268 def checksize(self):
1267 def checksize(self):
1269 expected = 0
1268 expected = 0
1270 if self.count():
1269 if self.count():
1271 expected = self.end(self.count() - 1)
1270 expected = self.end(self.count() - 1)
1272
1271
1273 try:
1272 try:
1274 f = self.opener(self.datafile)
1273 f = self.opener(self.datafile)
1275 f.seek(0, 2)
1274 f.seek(0, 2)
1276 actual = f.tell()
1275 actual = f.tell()
1277 dd = actual - expected
1276 dd = actual - expected
1278 except IOError, inst:
1277 except IOError, inst:
1279 if inst.errno != errno.ENOENT:
1278 if inst.errno != errno.ENOENT:
1280 raise
1279 raise
1281 dd = 0
1280 dd = 0
1282
1281
1283 try:
1282 try:
1284 f = self.opener(self.indexfile)
1283 f = self.opener(self.indexfile)
1285 f.seek(0, 2)
1284 f.seek(0, 2)
1286 actual = f.tell()
1285 actual = f.tell()
1287 s = struct.calcsize(self.indexformat)
1286 s = struct.calcsize(self.indexformat)
1288 i = actual / s
1287 i = actual / s
1289 di = actual - (i * s)
1288 di = actual - (i * s)
1290 if self.inlinedata():
1289 if self.inlinedata():
1291 databytes = 0
1290 databytes = 0
1292 for r in xrange(self.count()):
1291 for r in xrange(self.count()):
1293 databytes += self.length(r)
1292 databytes += self.length(r)
1294 dd = 0
1293 dd = 0
1295 di = actual - self.count() * s - databytes
1294 di = actual - self.count() * s - databytes
1296 except IOError, inst:
1295 except IOError, inst:
1297 if inst.errno != errno.ENOENT:
1296 if inst.errno != errno.ENOENT:
1298 raise
1297 raise
1299 di = 0
1298 di = 0
1300
1299
1301 return (dd, di)
1300 return (dd, di)
1302
1301
1303
1302
@@ -1,997 +1,998 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7
7
8 This software may be used and distributed according to the terms
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
9 of the GNU General Public License, incorporated herein by reference.
10
10
11 This contains helper routines that are independent of the SCM core and hide
11 This contains helper routines that are independent of the SCM core and hide
12 platform-specific details from the core.
12 platform-specific details from the core.
13 """
13 """
14
14
15 from i18n import gettext as _
15 from i18n import gettext as _
16 from demandload import *
16 from demandload import *
17 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
17 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
18 demandload(globals(), "os threading time")
18 demandload(globals(), "os threading time")
19
19
20 # used by parsedate
20 # used by parsedate
21 defaultdateformats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
21 defaultdateformats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
22 '%a %b %d %H:%M:%S %Y')
22 '%a %b %d %H:%M:%S %Y')
23
23
24 class SignalInterrupt(Exception):
24 class SignalInterrupt(Exception):
25 """Exception raised on SIGTERM and SIGHUP."""
25 """Exception raised on SIGTERM and SIGHUP."""
26
26
27 def pipefilter(s, cmd):
27 def pipefilter(s, cmd):
28 '''filter string S through command CMD, returning its output'''
28 '''filter string S through command CMD, returning its output'''
29 (pout, pin) = popen2.popen2(cmd, -1, 'b')
29 (pout, pin) = popen2.popen2(cmd, -1, 'b')
30 def writer():
30 def writer():
31 try:
31 try:
32 pin.write(s)
32 pin.write(s)
33 pin.close()
33 pin.close()
34 except IOError, inst:
34 except IOError, inst:
35 if inst.errno != errno.EPIPE:
35 if inst.errno != errno.EPIPE:
36 raise
36 raise
37
37
38 # we should use select instead on UNIX, but this will work on most
38 # we should use select instead on UNIX, but this will work on most
39 # systems, including Windows
39 # systems, including Windows
40 w = threading.Thread(target=writer)
40 w = threading.Thread(target=writer)
41 w.start()
41 w.start()
42 f = pout.read()
42 f = pout.read()
43 pout.close()
43 pout.close()
44 w.join()
44 w.join()
45 return f
45 return f
46
46
47 def tempfilter(s, cmd):
47 def tempfilter(s, cmd):
48 '''filter string S through a pair of temporary files with CMD.
48 '''filter string S through a pair of temporary files with CMD.
49 CMD is used as a template to create the real command to be run,
49 CMD is used as a template to create the real command to be run,
50 with the strings INFILE and OUTFILE replaced by the real names of
50 with the strings INFILE and OUTFILE replaced by the real names of
51 the temporary files generated.'''
51 the temporary files generated.'''
52 inname, outname = None, None
52 inname, outname = None, None
53 try:
53 try:
54 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
54 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
55 fp = os.fdopen(infd, 'wb')
55 fp = os.fdopen(infd, 'wb')
56 fp.write(s)
56 fp.write(s)
57 fp.close()
57 fp.close()
58 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
58 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
59 os.close(outfd)
59 os.close(outfd)
60 cmd = cmd.replace('INFILE', inname)
60 cmd = cmd.replace('INFILE', inname)
61 cmd = cmd.replace('OUTFILE', outname)
61 cmd = cmd.replace('OUTFILE', outname)
62 code = os.system(cmd)
62 code = os.system(cmd)
63 if code: raise Abort(_("command '%s' failed: %s") %
63 if code: raise Abort(_("command '%s' failed: %s") %
64 (cmd, explain_exit(code)))
64 (cmd, explain_exit(code)))
65 return open(outname, 'rb').read()
65 return open(outname, 'rb').read()
66 finally:
66 finally:
67 try:
67 try:
68 if inname: os.unlink(inname)
68 if inname: os.unlink(inname)
69 except: pass
69 except: pass
70 try:
70 try:
71 if outname: os.unlink(outname)
71 if outname: os.unlink(outname)
72 except: pass
72 except: pass
73
73
74 filtertable = {
74 filtertable = {
75 'tempfile:': tempfilter,
75 'tempfile:': tempfilter,
76 'pipe:': pipefilter,
76 'pipe:': pipefilter,
77 }
77 }
78
78
79 def filter(s, cmd):
79 def filter(s, cmd):
80 "filter a string through a command that transforms its input to its output"
80 "filter a string through a command that transforms its input to its output"
81 for name, fn in filtertable.iteritems():
81 for name, fn in filtertable.iteritems():
82 if cmd.startswith(name):
82 if cmd.startswith(name):
83 return fn(s, cmd[len(name):].lstrip())
83 return fn(s, cmd[len(name):].lstrip())
84 return pipefilter(s, cmd)
84 return pipefilter(s, cmd)
85
85
86 def find_in_path(name, path, default=None):
86 def find_in_path(name, path, default=None):
87 '''find name in search path. path can be string (will be split
87 '''find name in search path. path can be string (will be split
88 with os.pathsep), or iterable thing that returns strings. if name
88 with os.pathsep), or iterable thing that returns strings. if name
89 found, return path to name. else return default.'''
89 found, return path to name. else return default.'''
90 if isinstance(path, str):
90 if isinstance(path, str):
91 path = path.split(os.pathsep)
91 path = path.split(os.pathsep)
92 for p in path:
92 for p in path:
93 p_name = os.path.join(p, name)
93 p_name = os.path.join(p, name)
94 if os.path.exists(p_name):
94 if os.path.exists(p_name):
95 return p_name
95 return p_name
96 return default
96 return default
97
97
98 def binary(s):
98 def binary(s):
99 """return true if a string is binary data using diff's heuristic"""
99 """return true if a string is binary data using diff's heuristic"""
100 if s and '\0' in s[:4096]:
100 if s and '\0' in s[:4096]:
101 return True
101 return True
102 return False
102 return False
103
103
104 def unique(g):
104 def unique(g):
105 """return the uniq elements of iterable g"""
105 """return the uniq elements of iterable g"""
106 seen = {}
106 seen = {}
107 for f in g:
107 for f in g:
108 if f not in seen:
108 if f not in seen:
109 seen[f] = 1
109 seen[f] = 1
110 yield f
110 yield f
111
111
112 class Abort(Exception):
112 class Abort(Exception):
113 """Raised if a command needs to print an error and exit."""
113 """Raised if a command needs to print an error and exit."""
114
114
115 def always(fn): return True
115 def always(fn): return True
116 def never(fn): return False
116 def never(fn): return False
117
117
118 def patkind(name, dflt_pat='glob'):
118 def patkind(name, dflt_pat='glob'):
119 """Split a string into an optional pattern kind prefix and the
119 """Split a string into an optional pattern kind prefix and the
120 actual pattern."""
120 actual pattern."""
121 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
121 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
122 if name.startswith(prefix + ':'): return name.split(':', 1)
122 if name.startswith(prefix + ':'): return name.split(':', 1)
123 return dflt_pat, name
123 return dflt_pat, name
124
124
125 def globre(pat, head='^', tail='$'):
125 def globre(pat, head='^', tail='$'):
126 "convert a glob pattern into a regexp"
126 "convert a glob pattern into a regexp"
127 i, n = 0, len(pat)
127 i, n = 0, len(pat)
128 res = ''
128 res = ''
129 group = False
129 group = False
130 def peek(): return i < n and pat[i]
130 def peek(): return i < n and pat[i]
131 while i < n:
131 while i < n:
132 c = pat[i]
132 c = pat[i]
133 i = i+1
133 i = i+1
134 if c == '*':
134 if c == '*':
135 if peek() == '*':
135 if peek() == '*':
136 i += 1
136 i += 1
137 res += '.*'
137 res += '.*'
138 else:
138 else:
139 res += '[^/]*'
139 res += '[^/]*'
140 elif c == '?':
140 elif c == '?':
141 res += '.'
141 res += '.'
142 elif c == '[':
142 elif c == '[':
143 j = i
143 j = i
144 if j < n and pat[j] in '!]':
144 if j < n and pat[j] in '!]':
145 j += 1
145 j += 1
146 while j < n and pat[j] != ']':
146 while j < n and pat[j] != ']':
147 j += 1
147 j += 1
148 if j >= n:
148 if j >= n:
149 res += '\\['
149 res += '\\['
150 else:
150 else:
151 stuff = pat[i:j].replace('\\','\\\\')
151 stuff = pat[i:j].replace('\\','\\\\')
152 i = j + 1
152 i = j + 1
153 if stuff[0] == '!':
153 if stuff[0] == '!':
154 stuff = '^' + stuff[1:]
154 stuff = '^' + stuff[1:]
155 elif stuff[0] == '^':
155 elif stuff[0] == '^':
156 stuff = '\\' + stuff
156 stuff = '\\' + stuff
157 res = '%s[%s]' % (res, stuff)
157 res = '%s[%s]' % (res, stuff)
158 elif c == '{':
158 elif c == '{':
159 group = True
159 group = True
160 res += '(?:'
160 res += '(?:'
161 elif c == '}' and group:
161 elif c == '}' and group:
162 res += ')'
162 res += ')'
163 group = False
163 group = False
164 elif c == ',' and group:
164 elif c == ',' and group:
165 res += '|'
165 res += '|'
166 elif c == '\\':
166 elif c == '\\':
167 p = peek()
167 p = peek()
168 if p:
168 if p:
169 i += 1
169 i += 1
170 res += re.escape(p)
170 res += re.escape(p)
171 else:
171 else:
172 res += re.escape(c)
172 res += re.escape(c)
173 else:
173 else:
174 res += re.escape(c)
174 res += re.escape(c)
175 return head + res + tail
175 return head + res + tail
176
176
177 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
177 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
178
178
179 def pathto(n1, n2):
179 def pathto(n1, n2):
180 '''return the relative path from one place to another.
180 '''return the relative path from one place to another.
181 this returns a path in the form used by the local filesystem, not hg.'''
181 this returns a path in the form used by the local filesystem, not hg.'''
182 if not n1: return localpath(n2)
182 if not n1: return localpath(n2)
183 a, b = n1.split('/'), n2.split('/')
183 a, b = n1.split('/'), n2.split('/')
184 a.reverse()
184 a.reverse()
185 b.reverse()
185 b.reverse()
186 while a and b and a[-1] == b[-1]:
186 while a and b and a[-1] == b[-1]:
187 a.pop()
187 a.pop()
188 b.pop()
188 b.pop()
189 b.reverse()
189 b.reverse()
190 return os.sep.join((['..'] * len(a)) + b)
190 return os.sep.join((['..'] * len(a)) + b)
191
191
192 def canonpath(root, cwd, myname):
192 def canonpath(root, cwd, myname):
193 """return the canonical path of myname, given cwd and root"""
193 """return the canonical path of myname, given cwd and root"""
194 if root == os.sep:
194 if root == os.sep:
195 rootsep = os.sep
195 rootsep = os.sep
196 elif root.endswith(os.sep):
196 elif root.endswith(os.sep):
197 rootsep = root
197 rootsep = root
198 else:
198 else:
199 rootsep = root + os.sep
199 rootsep = root + os.sep
200 name = myname
200 name = myname
201 if not os.path.isabs(name):
201 if not os.path.isabs(name):
202 name = os.path.join(root, cwd, name)
202 name = os.path.join(root, cwd, name)
203 name = os.path.normpath(name)
203 name = os.path.normpath(name)
204 if name != rootsep and name.startswith(rootsep):
204 if name != rootsep and name.startswith(rootsep):
205 name = name[len(rootsep):]
205 name = name[len(rootsep):]
206 audit_path(name)
206 audit_path(name)
207 return pconvert(name)
207 return pconvert(name)
208 elif name == root:
208 elif name == root:
209 return ''
209 return ''
210 else:
210 else:
211 # Determine whether `name' is in the hierarchy at or beneath `root',
211 # Determine whether `name' is in the hierarchy at or beneath `root',
212 # by iterating name=dirname(name) until that causes no change (can't
212 # by iterating name=dirname(name) until that causes no change (can't
213 # check name == '/', because that doesn't work on windows). For each
213 # check name == '/', because that doesn't work on windows). For each
214 # `name', compare dev/inode numbers. If they match, the list `rel'
214 # `name', compare dev/inode numbers. If they match, the list `rel'
215 # holds the reversed list of components making up the relative file
215 # holds the reversed list of components making up the relative file
216 # name we want.
216 # name we want.
217 root_st = os.stat(root)
217 root_st = os.stat(root)
218 rel = []
218 rel = []
219 while True:
219 while True:
220 try:
220 try:
221 name_st = os.stat(name)
221 name_st = os.stat(name)
222 except OSError:
222 except OSError:
223 break
223 break
224 if samestat(name_st, root_st):
224 if samestat(name_st, root_st):
225 rel.reverse()
225 rel.reverse()
226 name = os.path.join(*rel)
226 name = os.path.join(*rel)
227 audit_path(name)
227 audit_path(name)
228 return pconvert(name)
228 return pconvert(name)
229 dirname, basename = os.path.split(name)
229 dirname, basename = os.path.split(name)
230 rel.append(basename)
230 rel.append(basename)
231 if dirname == name:
231 if dirname == name:
232 break
232 break
233 name = dirname
233 name = dirname
234
234
235 raise Abort('%s not under root' % myname)
235 raise Abort('%s not under root' % myname)
236
236
237 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
237 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
238 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
238 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
239
239
240 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
240 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
241 if os.name == 'nt':
241 if os.name == 'nt':
242 dflt_pat = 'glob'
242 dflt_pat = 'glob'
243 else:
243 else:
244 dflt_pat = 'relpath'
244 dflt_pat = 'relpath'
245 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
245 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
246
246
247 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
247 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
248 """build a function to match a set of file patterns
248 """build a function to match a set of file patterns
249
249
250 arguments:
250 arguments:
251 canonroot - the canonical root of the tree you're matching against
251 canonroot - the canonical root of the tree you're matching against
252 cwd - the current working directory, if relevant
252 cwd - the current working directory, if relevant
253 names - patterns to find
253 names - patterns to find
254 inc - patterns to include
254 inc - patterns to include
255 exc - patterns to exclude
255 exc - patterns to exclude
256 head - a regex to prepend to patterns to control whether a match is rooted
256 head - a regex to prepend to patterns to control whether a match is rooted
257
257
258 a pattern is one of:
258 a pattern is one of:
259 'glob:<rooted glob>'
259 'glob:<rooted glob>'
260 're:<rooted regexp>'
260 're:<rooted regexp>'
261 'path:<rooted path>'
261 'path:<rooted path>'
262 'relglob:<relative glob>'
262 'relglob:<relative glob>'
263 'relpath:<relative path>'
263 'relpath:<relative path>'
264 'relre:<relative regexp>'
264 'relre:<relative regexp>'
265 '<rooted path or regexp>'
265 '<rooted path or regexp>'
266
266
267 returns:
267 returns:
268 a 3-tuple containing
268 a 3-tuple containing
269 - list of explicit non-pattern names passed in
269 - list of explicit non-pattern names passed in
270 - a bool match(filename) function
270 - a bool match(filename) function
271 - a bool indicating if any patterns were passed in
271 - a bool indicating if any patterns were passed in
272
272
273 todo:
273 todo:
274 make head regex a rooted bool
274 make head regex a rooted bool
275 """
275 """
276
276
277 def contains_glob(name):
277 def contains_glob(name):
278 for c in name:
278 for c in name:
279 if c in _globchars: return True
279 if c in _globchars: return True
280 return False
280 return False
281
281
282 def regex(kind, name, tail):
282 def regex(kind, name, tail):
283 '''convert a pattern into a regular expression'''
283 '''convert a pattern into a regular expression'''
284 if kind == 're':
284 if kind == 're':
285 return name
285 return name
286 elif kind == 'path':
286 elif kind == 'path':
287 return '^' + re.escape(name) + '(?:/|$)'
287 return '^' + re.escape(name) + '(?:/|$)'
288 elif kind == 'relglob':
288 elif kind == 'relglob':
289 return head + globre(name, '(?:|.*/)', tail)
289 return head + globre(name, '(?:|.*/)', tail)
290 elif kind == 'relpath':
290 elif kind == 'relpath':
291 return head + re.escape(name) + tail
291 return head + re.escape(name) + tail
292 elif kind == 'relre':
292 elif kind == 'relre':
293 if name.startswith('^'):
293 if name.startswith('^'):
294 return name
294 return name
295 return '.*' + name
295 return '.*' + name
296 return head + globre(name, '', tail)
296 return head + globre(name, '', tail)
297
297
298 def matchfn(pats, tail):
298 def matchfn(pats, tail):
299 """build a matching function from a set of patterns"""
299 """build a matching function from a set of patterns"""
300 if not pats:
300 if not pats:
301 return
301 return
302 matches = []
302 matches = []
303 for k, p in pats:
303 for k, p in pats:
304 try:
304 try:
305 pat = '(?:%s)' % regex(k, p, tail)
305 pat = '(?:%s)' % regex(k, p, tail)
306 matches.append(re.compile(pat).match)
306 matches.append(re.compile(pat).match)
307 except re.error:
307 except re.error:
308 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
308 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
309 else: raise Abort("invalid pattern (%s): %s" % (k, p))
309 else: raise Abort("invalid pattern (%s): %s" % (k, p))
310
310
311 def buildfn(text):
311 def buildfn(text):
312 for m in matches:
312 for m in matches:
313 r = m(text)
313 r = m(text)
314 if r:
314 if r:
315 return r
315 return r
316
316
317 return buildfn
317 return buildfn
318
318
319 def globprefix(pat):
319 def globprefix(pat):
320 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
320 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
321 root = []
321 root = []
322 for p in pat.split(os.sep):
322 for p in pat.split(os.sep):
323 if contains_glob(p): break
323 if contains_glob(p): break
324 root.append(p)
324 root.append(p)
325 return '/'.join(root)
325 return '/'.join(root)
326
326
327 pats = []
327 pats = []
328 files = []
328 files = []
329 roots = []
329 roots = []
330 for kind, name in [patkind(p, dflt_pat) for p in names]:
330 for kind, name in [patkind(p, dflt_pat) for p in names]:
331 if kind in ('glob', 'relpath'):
331 if kind in ('glob', 'relpath'):
332 name = canonpath(canonroot, cwd, name)
332 name = canonpath(canonroot, cwd, name)
333 if name == '':
333 if name == '':
334 kind, name = 'glob', '**'
334 kind, name = 'glob', '**'
335 if kind in ('glob', 'path', 're'):
335 if kind in ('glob', 'path', 're'):
336 pats.append((kind, name))
336 pats.append((kind, name))
337 if kind == 'glob':
337 if kind == 'glob':
338 root = globprefix(name)
338 root = globprefix(name)
339 if root: roots.append(root)
339 if root: roots.append(root)
340 elif kind == 'relpath':
340 elif kind == 'relpath':
341 files.append((kind, name))
341 files.append((kind, name))
342 roots.append(name)
342 roots.append(name)
343
343
344 patmatch = matchfn(pats, '$') or always
344 patmatch = matchfn(pats, '$') or always
345 filematch = matchfn(files, '(?:/|$)') or always
345 filematch = matchfn(files, '(?:/|$)') or always
346 incmatch = always
346 incmatch = always
347 if inc:
347 if inc:
348 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
348 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
349 incmatch = matchfn(inckinds, '(?:/|$)')
349 incmatch = matchfn(inckinds, '(?:/|$)')
350 excmatch = lambda fn: False
350 excmatch = lambda fn: False
351 if exc:
351 if exc:
352 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
352 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
353 excmatch = matchfn(exckinds, '(?:/|$)')
353 excmatch = matchfn(exckinds, '(?:/|$)')
354
354
355 return (roots,
355 return (roots,
356 lambda fn: (incmatch(fn) and not excmatch(fn) and
356 lambda fn: (incmatch(fn) and not excmatch(fn) and
357 (fn.endswith('/') or
357 (fn.endswith('/') or
358 (not pats and not files) or
358 (not pats and not files) or
359 (pats and patmatch(fn)) or
359 (pats and patmatch(fn)) or
360 (files and filematch(fn)))),
360 (files and filematch(fn)))),
361 (inc or exc or (pats and pats != [('glob', '**')])) and True)
361 (inc or exc or (pats and pats != [('glob', '**')])) and True)
362
362
363 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
363 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
364 '''enhanced shell command execution.
364 '''enhanced shell command execution.
365 run with environment maybe modified, maybe in different dir.
365 run with environment maybe modified, maybe in different dir.
366
366
367 if command fails and onerr is None, return status. if ui object,
367 if command fails and onerr is None, return status. if ui object,
368 print error message and return status, else raise onerr object as
368 print error message and return status, else raise onerr object as
369 exception.'''
369 exception.'''
370 def py2shell(val):
370 def py2shell(val):
371 'convert python object into string that is useful to shell'
371 'convert python object into string that is useful to shell'
372 if val in (None, False):
372 if val in (None, False):
373 return '0'
373 return '0'
374 if val == True:
374 if val == True:
375 return '1'
375 return '1'
376 return str(val)
376 return str(val)
377 oldenv = {}
377 oldenv = {}
378 for k in environ:
378 for k in environ:
379 oldenv[k] = os.environ.get(k)
379 oldenv[k] = os.environ.get(k)
380 if cwd is not None:
380 if cwd is not None:
381 oldcwd = os.getcwd()
381 oldcwd = os.getcwd()
382 try:
382 try:
383 for k, v in environ.iteritems():
383 for k, v in environ.iteritems():
384 os.environ[k] = py2shell(v)
384 os.environ[k] = py2shell(v)
385 if cwd is not None and oldcwd != cwd:
385 if cwd is not None and oldcwd != cwd:
386 os.chdir(cwd)
386 os.chdir(cwd)
387 rc = os.system(cmd)
387 rc = os.system(cmd)
388 if rc and onerr:
388 if rc and onerr:
389 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
389 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
390 explain_exit(rc)[0])
390 explain_exit(rc)[0])
391 if errprefix:
391 if errprefix:
392 errmsg = '%s: %s' % (errprefix, errmsg)
392 errmsg = '%s: %s' % (errprefix, errmsg)
393 try:
393 try:
394 onerr.warn(errmsg + '\n')
394 onerr.warn(errmsg + '\n')
395 except AttributeError:
395 except AttributeError:
396 raise onerr(errmsg)
396 raise onerr(errmsg)
397 return rc
397 return rc
398 finally:
398 finally:
399 for k, v in oldenv.iteritems():
399 for k, v in oldenv.iteritems():
400 if v is None:
400 if v is None:
401 del os.environ[k]
401 del os.environ[k]
402 else:
402 else:
403 os.environ[k] = v
403 os.environ[k] = v
404 if cwd is not None and oldcwd != cwd:
404 if cwd is not None and oldcwd != cwd:
405 os.chdir(oldcwd)
405 os.chdir(oldcwd)
406
406
407 def rename(src, dst):
407 def rename(src, dst):
408 """forcibly rename a file"""
408 """forcibly rename a file"""
409 try:
409 try:
410 os.rename(src, dst)
410 os.rename(src, dst)
411 except OSError, err:
411 except OSError, err:
412 # on windows, rename to existing file is not allowed, so we
412 # on windows, rename to existing file is not allowed, so we
413 # must delete destination first. but if file is open, unlink
413 # must delete destination first. but if file is open, unlink
414 # schedules it for delete but does not delete it. rename
414 # schedules it for delete but does not delete it. rename
415 # happens immediately even for open files, so we create
415 # happens immediately even for open files, so we create
416 # temporary file, delete it, rename destination to that name,
416 # temporary file, delete it, rename destination to that name,
417 # then delete that. then rename is safe to do.
417 # then delete that. then rename is safe to do.
418 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
418 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
419 os.close(fd)
419 os.close(fd)
420 os.unlink(temp)
420 os.unlink(temp)
421 os.rename(dst, temp)
421 os.rename(dst, temp)
422 os.unlink(temp)
422 os.unlink(temp)
423 os.rename(src, dst)
423 os.rename(src, dst)
424
424
425 def unlink(f):
425 def unlink(f):
426 """unlink and remove the directory if it is empty"""
426 """unlink and remove the directory if it is empty"""
427 os.unlink(f)
427 os.unlink(f)
428 # try removing directories that might now be empty
428 # try removing directories that might now be empty
429 try:
429 try:
430 os.removedirs(os.path.dirname(f))
430 os.removedirs(os.path.dirname(f))
431 except OSError:
431 except OSError:
432 pass
432 pass
433
433
434 def copyfiles(src, dst, hardlink=None):
434 def copyfiles(src, dst, hardlink=None):
435 """Copy a directory tree using hardlinks if possible"""
435 """Copy a directory tree using hardlinks if possible"""
436
436
437 if hardlink is None:
437 if hardlink is None:
438 hardlink = (os.stat(src).st_dev ==
438 hardlink = (os.stat(src).st_dev ==
439 os.stat(os.path.dirname(dst)).st_dev)
439 os.stat(os.path.dirname(dst)).st_dev)
440
440
441 if os.path.isdir(src):
441 if os.path.isdir(src):
442 os.mkdir(dst)
442 os.mkdir(dst)
443 for name in os.listdir(src):
443 for name in os.listdir(src):
444 srcname = os.path.join(src, name)
444 srcname = os.path.join(src, name)
445 dstname = os.path.join(dst, name)
445 dstname = os.path.join(dst, name)
446 copyfiles(srcname, dstname, hardlink)
446 copyfiles(srcname, dstname, hardlink)
447 else:
447 else:
448 if hardlink:
448 if hardlink:
449 try:
449 try:
450 os_link(src, dst)
450 os_link(src, dst)
451 except (IOError, OSError):
451 except (IOError, OSError):
452 hardlink = False
452 hardlink = False
453 shutil.copy(src, dst)
453 shutil.copy(src, dst)
454 else:
454 else:
455 shutil.copy(src, dst)
455 shutil.copy(src, dst)
456
456
457 def audit_path(path):
457 def audit_path(path):
458 """Abort if path contains dangerous components"""
458 """Abort if path contains dangerous components"""
459 parts = os.path.normcase(path).split(os.sep)
459 parts = os.path.normcase(path).split(os.sep)
460 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
460 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
461 or os.pardir in parts):
461 or os.pardir in parts):
462 raise Abort(_("path contains illegal component: %s\n") % path)
462 raise Abort(_("path contains illegal component: %s\n") % path)
463
463
464 def _makelock_file(info, pathname):
464 def _makelock_file(info, pathname):
465 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
465 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
466 os.write(ld, info)
466 os.write(ld, info)
467 os.close(ld)
467 os.close(ld)
468
468
469 def _readlock_file(pathname):
469 def _readlock_file(pathname):
470 return posixfile(pathname).read()
470 return posixfile(pathname).read()
471
471
472 def nlinks(pathname):
472 def nlinks(pathname):
473 """Return number of hardlinks for the given file."""
473 """Return number of hardlinks for the given file."""
474 return os.lstat(pathname).st_nlink
474 return os.lstat(pathname).st_nlink
475
475
476 if hasattr(os, 'link'):
476 if hasattr(os, 'link'):
477 os_link = os.link
477 os_link = os.link
478 else:
478 else:
479 def os_link(src, dst):
479 def os_link(src, dst):
480 raise OSError(0, _("Hardlinks not supported"))
480 raise OSError(0, _("Hardlinks not supported"))
481
481
482 def fstat(fp):
482 def fstat(fp):
483 '''stat file object that may not have fileno method.'''
483 '''stat file object that may not have fileno method.'''
484 try:
484 try:
485 return os.fstat(fp.fileno())
485 return os.fstat(fp.fileno())
486 except AttributeError:
486 except AttributeError:
487 return os.stat(fp.name)
487 return os.stat(fp.name)
488
488
489 posixfile = file
489 posixfile = file
490
490
491 def is_win_9x():
491 def is_win_9x():
492 '''return true if run on windows 95, 98 or me.'''
492 '''return true if run on windows 95, 98 or me.'''
493 try:
493 try:
494 return sys.getwindowsversion()[3] == 1
494 return sys.getwindowsversion()[3] == 1
495 except AttributeError:
495 except AttributeError:
496 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
496 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
497
497
498 getuser_fallback = None
498 getuser_fallback = None
499
499
500 def getuser():
500 def getuser():
501 '''return name of current user'''
501 '''return name of current user'''
502 try:
502 try:
503 return getpass.getuser()
503 return getpass.getuser()
504 except ImportError:
504 except ImportError:
505 # import of pwd will fail on windows - try fallback
505 # import of pwd will fail on windows - try fallback
506 if getuser_fallback:
506 if getuser_fallback:
507 return getuser_fallback()
507 return getuser_fallback()
508 # raised if win32api not available
508 # raised if win32api not available
509 raise Abort(_('user name not available - set USERNAME '
509 raise Abort(_('user name not available - set USERNAME '
510 'environment variable'))
510 'environment variable'))
511
511
512 # Platform specific variants
512 # Platform specific variants
513 if os.name == 'nt':
513 if os.name == 'nt':
514 demandload(globals(), "msvcrt")
514 demandload(globals(), "msvcrt")
515 nulldev = 'NUL:'
515 nulldev = 'NUL:'
516
516
517 class winstdout:
517 class winstdout:
518 '''stdout on windows misbehaves if sent through a pipe'''
518 '''stdout on windows misbehaves if sent through a pipe'''
519
519
520 def __init__(self, fp):
520 def __init__(self, fp):
521 self.fp = fp
521 self.fp = fp
522
522
523 def __getattr__(self, key):
523 def __getattr__(self, key):
524 return getattr(self.fp, key)
524 return getattr(self.fp, key)
525
525
526 def close(self):
526 def close(self):
527 try:
527 try:
528 self.fp.close()
528 self.fp.close()
529 except: pass
529 except: pass
530
530
531 def write(self, s):
531 def write(self, s):
532 try:
532 try:
533 return self.fp.write(s)
533 return self.fp.write(s)
534 except IOError, inst:
534 except IOError, inst:
535 if inst.errno != 0: raise
535 if inst.errno != 0: raise
536 self.close()
536 self.close()
537 raise IOError(errno.EPIPE, 'Broken pipe')
537 raise IOError(errno.EPIPE, 'Broken pipe')
538
538
539 sys.stdout = winstdout(sys.stdout)
539 sys.stdout = winstdout(sys.stdout)
540
540
541 def system_rcpath():
541 def system_rcpath():
542 try:
542 try:
543 return system_rcpath_win32()
543 return system_rcpath_win32()
544 except:
544 except:
545 return [r'c:\mercurial\mercurial.ini']
545 return [r'c:\mercurial\mercurial.ini']
546
546
547 def os_rcpath():
547 def os_rcpath():
548 '''return default os-specific hgrc search path'''
548 '''return default os-specific hgrc search path'''
549 path = system_rcpath()
549 path = system_rcpath()
550 path.append(user_rcpath())
550 path.append(user_rcpath())
551 userprofile = os.environ.get('USERPROFILE')
551 userprofile = os.environ.get('USERPROFILE')
552 if userprofile:
552 if userprofile:
553 path.append(os.path.join(userprofile, 'mercurial.ini'))
553 path.append(os.path.join(userprofile, 'mercurial.ini'))
554 return path
554 return path
555
555
556 def user_rcpath():
556 def user_rcpath():
557 '''return os-specific hgrc search path to the user dir'''
557 '''return os-specific hgrc search path to the user dir'''
558 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
558 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
559
559
560 def parse_patch_output(output_line):
560 def parse_patch_output(output_line):
561 """parses the output produced by patch and returns the file name"""
561 """parses the output produced by patch and returns the file name"""
562 pf = output_line[14:]
562 pf = output_line[14:]
563 if pf[0] == '`':
563 if pf[0] == '`':
564 pf = pf[1:-1] # Remove the quotes
564 pf = pf[1:-1] # Remove the quotes
565 return pf
565 return pf
566
566
567 def testpid(pid):
567 def testpid(pid):
568 '''return False if pid dead, True if running or not known'''
568 '''return False if pid dead, True if running or not known'''
569 return True
569 return True
570
570
571 def is_exec(f, last):
571 def is_exec(f, last):
572 return last
572 return last
573
573
574 def set_exec(f, mode):
574 def set_exec(f, mode):
575 pass
575 pass
576
576
577 def set_binary(fd):
577 def set_binary(fd):
578 msvcrt.setmode(fd.fileno(), os.O_BINARY)
578 msvcrt.setmode(fd.fileno(), os.O_BINARY)
579
579
580 def pconvert(path):
580 def pconvert(path):
581 return path.replace("\\", "/")
581 return path.replace("\\", "/")
582
582
583 def localpath(path):
583 def localpath(path):
584 return path.replace('/', '\\')
584 return path.replace('/', '\\')
585
585
586 def normpath(path):
586 def normpath(path):
587 return pconvert(os.path.normpath(path))
587 return pconvert(os.path.normpath(path))
588
588
589 makelock = _makelock_file
589 makelock = _makelock_file
590 readlock = _readlock_file
590 readlock = _readlock_file
591
591
592 def samestat(s1, s2):
592 def samestat(s1, s2):
593 return False
593 return False
594
594
595 def shellquote(s):
595 def shellquote(s):
596 return '"%s"' % s.replace('"', '\\"')
596 return '"%s"' % s.replace('"', '\\"')
597
597
598 def explain_exit(code):
598 def explain_exit(code):
599 return _("exited with status %d") % code, code
599 return _("exited with status %d") % code, code
600
600
601 try:
601 try:
602 # override functions with win32 versions if possible
602 # override functions with win32 versions if possible
603 from util_win32 import *
603 from util_win32 import *
604 if not is_win_9x():
604 if not is_win_9x():
605 posixfile = posixfile_nt
605 posixfile = posixfile_nt
606 except ImportError:
606 except ImportError:
607 pass
607 pass
608
608
609 else:
609 else:
610 nulldev = '/dev/null'
610 nulldev = '/dev/null'
611
611
612 def rcfiles(path):
612 def rcfiles(path):
613 rcs = [os.path.join(path, 'hgrc')]
613 rcs = [os.path.join(path, 'hgrc')]
614 rcdir = os.path.join(path, 'hgrc.d')
614 rcdir = os.path.join(path, 'hgrc.d')
615 try:
615 try:
616 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
616 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
617 if f.endswith(".rc")])
617 if f.endswith(".rc")])
618 except OSError, inst: pass
618 except OSError:
619 pass
619 return rcs
620 return rcs
620
621
621 def os_rcpath():
622 def os_rcpath():
622 '''return default os-specific hgrc search path'''
623 '''return default os-specific hgrc search path'''
623 path = []
624 path = []
624 # old mod_python does not set sys.argv
625 # old mod_python does not set sys.argv
625 if len(getattr(sys, 'argv', [])) > 0:
626 if len(getattr(sys, 'argv', [])) > 0:
626 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
627 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
627 '/../etc/mercurial'))
628 '/../etc/mercurial'))
628 path.extend(rcfiles('/etc/mercurial'))
629 path.extend(rcfiles('/etc/mercurial'))
629 path.append(os.path.expanduser('~/.hgrc'))
630 path.append(os.path.expanduser('~/.hgrc'))
630 path = [os.path.normpath(f) for f in path]
631 path = [os.path.normpath(f) for f in path]
631 return path
632 return path
632
633
633 def parse_patch_output(output_line):
634 def parse_patch_output(output_line):
634 """parses the output produced by patch and returns the file name"""
635 """parses the output produced by patch and returns the file name"""
635 pf = output_line[14:]
636 pf = output_line[14:]
636 if pf.startswith("'") and pf.endswith("'") and " " in pf:
637 if pf.startswith("'") and pf.endswith("'") and " " in pf:
637 pf = pf[1:-1] # Remove the quotes
638 pf = pf[1:-1] # Remove the quotes
638 return pf
639 return pf
639
640
640 def is_exec(f, last):
641 def is_exec(f, last):
641 """check whether a file is executable"""
642 """check whether a file is executable"""
642 return (os.lstat(f).st_mode & 0100 != 0)
643 return (os.lstat(f).st_mode & 0100 != 0)
643
644
644 def set_exec(f, mode):
645 def set_exec(f, mode):
645 s = os.lstat(f).st_mode
646 s = os.lstat(f).st_mode
646 if (s & 0100 != 0) == mode:
647 if (s & 0100 != 0) == mode:
647 return
648 return
648 if mode:
649 if mode:
649 # Turn on +x for every +r bit when making a file executable
650 # Turn on +x for every +r bit when making a file executable
650 # and obey umask.
651 # and obey umask.
651 umask = os.umask(0)
652 umask = os.umask(0)
652 os.umask(umask)
653 os.umask(umask)
653 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
654 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
654 else:
655 else:
655 os.chmod(f, s & 0666)
656 os.chmod(f, s & 0666)
656
657
657 def set_binary(fd):
658 def set_binary(fd):
658 pass
659 pass
659
660
660 def pconvert(path):
661 def pconvert(path):
661 return path
662 return path
662
663
663 def localpath(path):
664 def localpath(path):
664 return path
665 return path
665
666
666 normpath = os.path.normpath
667 normpath = os.path.normpath
667 samestat = os.path.samestat
668 samestat = os.path.samestat
668
669
669 def makelock(info, pathname):
670 def makelock(info, pathname):
670 try:
671 try:
671 os.symlink(info, pathname)
672 os.symlink(info, pathname)
672 except OSError, why:
673 except OSError, why:
673 if why.errno == errno.EEXIST:
674 if why.errno == errno.EEXIST:
674 raise
675 raise
675 else:
676 else:
676 _makelock_file(info, pathname)
677 _makelock_file(info, pathname)
677
678
678 def readlock(pathname):
679 def readlock(pathname):
679 try:
680 try:
680 return os.readlink(pathname)
681 return os.readlink(pathname)
681 except OSError, why:
682 except OSError, why:
682 if why.errno == errno.EINVAL:
683 if why.errno == errno.EINVAL:
683 return _readlock_file(pathname)
684 return _readlock_file(pathname)
684 else:
685 else:
685 raise
686 raise
686
687
687 def shellquote(s):
688 def shellquote(s):
688 return "'%s'" % s.replace("'", "'\\''")
689 return "'%s'" % s.replace("'", "'\\''")
689
690
690 def testpid(pid):
691 def testpid(pid):
691 '''return False if pid dead, True if running or not sure'''
692 '''return False if pid dead, True if running or not sure'''
692 try:
693 try:
693 os.kill(pid, 0)
694 os.kill(pid, 0)
694 return True
695 return True
695 except OSError, inst:
696 except OSError, inst:
696 return inst.errno != errno.ESRCH
697 return inst.errno != errno.ESRCH
697
698
698 def explain_exit(code):
699 def explain_exit(code):
699 """return a 2-tuple (desc, code) describing a process's status"""
700 """return a 2-tuple (desc, code) describing a process's status"""
700 if os.WIFEXITED(code):
701 if os.WIFEXITED(code):
701 val = os.WEXITSTATUS(code)
702 val = os.WEXITSTATUS(code)
702 return _("exited with status %d") % val, val
703 return _("exited with status %d") % val, val
703 elif os.WIFSIGNALED(code):
704 elif os.WIFSIGNALED(code):
704 val = os.WTERMSIG(code)
705 val = os.WTERMSIG(code)
705 return _("killed by signal %d") % val, val
706 return _("killed by signal %d") % val, val
706 elif os.WIFSTOPPED(code):
707 elif os.WIFSTOPPED(code):
707 val = os.WSTOPSIG(code)
708 val = os.WSTOPSIG(code)
708 return _("stopped by signal %d") % val, val
709 return _("stopped by signal %d") % val, val
709 raise ValueError(_("invalid exit code"))
710 raise ValueError(_("invalid exit code"))
710
711
711 def opener(base, audit=True):
712 def opener(base, audit=True):
712 """
713 """
713 return a function that opens files relative to base
714 return a function that opens files relative to base
714
715
715 this function is used to hide the details of COW semantics and
716 this function is used to hide the details of COW semantics and
716 remote file access from higher level code.
717 remote file access from higher level code.
717 """
718 """
718 p = base
719 p = base
719 audit_p = audit
720 audit_p = audit
720
721
721 def mktempcopy(name):
722 def mktempcopy(name):
722 d, fn = os.path.split(name)
723 d, fn = os.path.split(name)
723 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
724 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
724 os.close(fd)
725 os.close(fd)
725 ofp = posixfile(temp, "wb")
726 ofp = posixfile(temp, "wb")
726 try:
727 try:
727 try:
728 try:
728 ifp = posixfile(name, "rb")
729 ifp = posixfile(name, "rb")
729 except IOError, inst:
730 except IOError, inst:
730 if not getattr(inst, 'filename', None):
731 if not getattr(inst, 'filename', None):
731 inst.filename = name
732 inst.filename = name
732 raise
733 raise
733 for chunk in filechunkiter(ifp):
734 for chunk in filechunkiter(ifp):
734 ofp.write(chunk)
735 ofp.write(chunk)
735 ifp.close()
736 ifp.close()
736 ofp.close()
737 ofp.close()
737 except:
738 except:
738 try: os.unlink(temp)
739 try: os.unlink(temp)
739 except: pass
740 except: pass
740 raise
741 raise
741 st = os.lstat(name)
742 st = os.lstat(name)
742 os.chmod(temp, st.st_mode)
743 os.chmod(temp, st.st_mode)
743 return temp
744 return temp
744
745
745 class atomictempfile(posixfile):
746 class atomictempfile(posixfile):
746 """the file will only be copied when rename is called"""
747 """the file will only be copied when rename is called"""
747 def __init__(self, name, mode):
748 def __init__(self, name, mode):
748 self.__name = name
749 self.__name = name
749 self.temp = mktempcopy(name)
750 self.temp = mktempcopy(name)
750 posixfile.__init__(self, self.temp, mode)
751 posixfile.__init__(self, self.temp, mode)
751 def rename(self):
752 def rename(self):
752 if not self.closed:
753 if not self.closed:
753 posixfile.close(self)
754 posixfile.close(self)
754 rename(self.temp, localpath(self.__name))
755 rename(self.temp, localpath(self.__name))
755 def __del__(self):
756 def __del__(self):
756 if not self.closed:
757 if not self.closed:
757 try:
758 try:
758 os.unlink(self.temp)
759 os.unlink(self.temp)
759 except: pass
760 except: pass
760 posixfile.close(self)
761 posixfile.close(self)
761
762
762 class atomicfile(atomictempfile):
763 class atomicfile(atomictempfile):
763 """the file will only be copied on close"""
764 """the file will only be copied on close"""
764 def __init__(self, name, mode):
765 def __init__(self, name, mode):
765 atomictempfile.__init__(self, name, mode)
766 atomictempfile.__init__(self, name, mode)
766 def close(self):
767 def close(self):
767 self.rename()
768 self.rename()
768 def __del__(self):
769 def __del__(self):
769 self.rename()
770 self.rename()
770
771
771 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
772 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
772 if audit_p:
773 if audit_p:
773 audit_path(path)
774 audit_path(path)
774 f = os.path.join(p, path)
775 f = os.path.join(p, path)
775
776
776 if not text:
777 if not text:
777 mode += "b" # for that other OS
778 mode += "b" # for that other OS
778
779
779 if mode[0] != "r":
780 if mode[0] != "r":
780 try:
781 try:
781 nlink = nlinks(f)
782 nlink = nlinks(f)
782 except OSError:
783 except OSError:
783 d = os.path.dirname(f)
784 d = os.path.dirname(f)
784 if not os.path.isdir(d):
785 if not os.path.isdir(d):
785 os.makedirs(d)
786 os.makedirs(d)
786 else:
787 else:
787 if atomic:
788 if atomic:
788 return atomicfile(f, mode)
789 return atomicfile(f, mode)
789 elif atomictemp:
790 elif atomictemp:
790 return atomictempfile(f, mode)
791 return atomictempfile(f, mode)
791 if nlink > 1:
792 if nlink > 1:
792 rename(mktempcopy(f), f)
793 rename(mktempcopy(f), f)
793 return posixfile(f, mode)
794 return posixfile(f, mode)
794
795
795 return o
796 return o
796
797
797 class chunkbuffer(object):
798 class chunkbuffer(object):
798 """Allow arbitrary sized chunks of data to be efficiently read from an
799 """Allow arbitrary sized chunks of data to be efficiently read from an
799 iterator over chunks of arbitrary size."""
800 iterator over chunks of arbitrary size."""
800
801
801 def __init__(self, in_iter, targetsize = 2**16):
802 def __init__(self, in_iter, targetsize = 2**16):
802 """in_iter is the iterator that's iterating over the input chunks.
803 """in_iter is the iterator that's iterating over the input chunks.
803 targetsize is how big a buffer to try to maintain."""
804 targetsize is how big a buffer to try to maintain."""
804 self.in_iter = iter(in_iter)
805 self.in_iter = iter(in_iter)
805 self.buf = ''
806 self.buf = ''
806 self.targetsize = int(targetsize)
807 self.targetsize = int(targetsize)
807 if self.targetsize <= 0:
808 if self.targetsize <= 0:
808 raise ValueError(_("targetsize must be greater than 0, was %d") %
809 raise ValueError(_("targetsize must be greater than 0, was %d") %
809 targetsize)
810 targetsize)
810 self.iterempty = False
811 self.iterempty = False
811
812
812 def fillbuf(self):
813 def fillbuf(self):
813 """Ignore target size; read every chunk from iterator until empty."""
814 """Ignore target size; read every chunk from iterator until empty."""
814 if not self.iterempty:
815 if not self.iterempty:
815 collector = cStringIO.StringIO()
816 collector = cStringIO.StringIO()
816 collector.write(self.buf)
817 collector.write(self.buf)
817 for ch in self.in_iter:
818 for ch in self.in_iter:
818 collector.write(ch)
819 collector.write(ch)
819 self.buf = collector.getvalue()
820 self.buf = collector.getvalue()
820 self.iterempty = True
821 self.iterempty = True
821
822
822 def read(self, l):
823 def read(self, l):
823 """Read L bytes of data from the iterator of chunks of data.
824 """Read L bytes of data from the iterator of chunks of data.
824 Returns less than L bytes if the iterator runs dry."""
825 Returns less than L bytes if the iterator runs dry."""
825 if l > len(self.buf) and not self.iterempty:
826 if l > len(self.buf) and not self.iterempty:
826 # Clamp to a multiple of self.targetsize
827 # Clamp to a multiple of self.targetsize
827 targetsize = self.targetsize * ((l // self.targetsize) + 1)
828 targetsize = self.targetsize * ((l // self.targetsize) + 1)
828 collector = cStringIO.StringIO()
829 collector = cStringIO.StringIO()
829 collector.write(self.buf)
830 collector.write(self.buf)
830 collected = len(self.buf)
831 collected = len(self.buf)
831 for chunk in self.in_iter:
832 for chunk in self.in_iter:
832 collector.write(chunk)
833 collector.write(chunk)
833 collected += len(chunk)
834 collected += len(chunk)
834 if collected >= targetsize:
835 if collected >= targetsize:
835 break
836 break
836 if collected < targetsize:
837 if collected < targetsize:
837 self.iterempty = True
838 self.iterempty = True
838 self.buf = collector.getvalue()
839 self.buf = collector.getvalue()
839 s, self.buf = self.buf[:l], buffer(self.buf, l)
840 s, self.buf = self.buf[:l], buffer(self.buf, l)
840 return s
841 return s
841
842
842 def filechunkiter(f, size=65536, limit=None):
843 def filechunkiter(f, size=65536, limit=None):
843 """Create a generator that produces the data in the file size
844 """Create a generator that produces the data in the file size
844 (default 65536) bytes at a time, up to optional limit (default is
845 (default 65536) bytes at a time, up to optional limit (default is
845 to read all data). Chunks may be less than size bytes if the
846 to read all data). Chunks may be less than size bytes if the
846 chunk is the last chunk in the file, or the file is a socket or
847 chunk is the last chunk in the file, or the file is a socket or
847 some other type of file that sometimes reads less data than is
848 some other type of file that sometimes reads less data than is
848 requested."""
849 requested."""
849 assert size >= 0
850 assert size >= 0
850 assert limit is None or limit >= 0
851 assert limit is None or limit >= 0
851 while True:
852 while True:
852 if limit is None: nbytes = size
853 if limit is None: nbytes = size
853 else: nbytes = min(limit, size)
854 else: nbytes = min(limit, size)
854 s = nbytes and f.read(nbytes)
855 s = nbytes and f.read(nbytes)
855 if not s: break
856 if not s: break
856 if limit: limit -= len(s)
857 if limit: limit -= len(s)
857 yield s
858 yield s
858
859
859 def makedate():
860 def makedate():
860 lt = time.localtime()
861 lt = time.localtime()
861 if lt[8] == 1 and time.daylight:
862 if lt[8] == 1 and time.daylight:
862 tz = time.altzone
863 tz = time.altzone
863 else:
864 else:
864 tz = time.timezone
865 tz = time.timezone
865 return time.mktime(lt), tz
866 return time.mktime(lt), tz
866
867
867 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
868 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
868 """represent a (unixtime, offset) tuple as a localized time.
869 """represent a (unixtime, offset) tuple as a localized time.
869 unixtime is seconds since the epoch, and offset is the time zone's
870 unixtime is seconds since the epoch, and offset is the time zone's
870 number of seconds away from UTC. if timezone is false, do not
871 number of seconds away from UTC. if timezone is false, do not
871 append time zone to string."""
872 append time zone to string."""
872 t, tz = date or makedate()
873 t, tz = date or makedate()
873 s = time.strftime(format, time.gmtime(float(t) - tz))
874 s = time.strftime(format, time.gmtime(float(t) - tz))
874 if timezone:
875 if timezone:
875 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
876 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
876 return s
877 return s
877
878
878 def strdate(string, format='%a %b %d %H:%M:%S %Y'):
879 def strdate(string, format='%a %b %d %H:%M:%S %Y'):
879 """parse a localized time string and return a (unixtime, offset) tuple.
880 """parse a localized time string and return a (unixtime, offset) tuple.
880 if the string cannot be parsed, ValueError is raised."""
881 if the string cannot be parsed, ValueError is raised."""
881 def hastimezone(string):
882 def hastimezone(string):
882 return (string[-4:].isdigit() and
883 return (string[-4:].isdigit() and
883 (string[-5] == '+' or string[-5] == '-') and
884 (string[-5] == '+' or string[-5] == '-') and
884 string[-6].isspace())
885 string[-6].isspace())
885
886
886 if hastimezone(string):
887 if hastimezone(string):
887 date, tz = string[:-6], string[-5:]
888 date, tz = string[:-6], string[-5:]
888 tz = int(tz)
889 tz = int(tz)
889 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
890 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
890 else:
891 else:
891 date, offset = string, 0
892 date, offset = string, 0
892 when = int(time.mktime(time.strptime(date, format))) + offset
893 when = int(time.mktime(time.strptime(date, format))) + offset
893 return when, offset
894 return when, offset
894
895
895 def parsedate(string, formats=None):
896 def parsedate(string, formats=None):
896 """parse a localized time string and return a (unixtime, offset) tuple.
897 """parse a localized time string and return a (unixtime, offset) tuple.
897 The date may be a "unixtime offset" string or in one of the specified
898 The date may be a "unixtime offset" string or in one of the specified
898 formats."""
899 formats."""
899 if not formats:
900 if not formats:
900 formats = defaultdateformats
901 formats = defaultdateformats
901 try:
902 try:
902 when, offset = map(int, string.split(' '))
903 when, offset = map(int, string.split(' '))
903 except ValueError:
904 except ValueError:
904 for format in formats:
905 for format in formats:
905 try:
906 try:
906 when, offset = strdate(string, format)
907 when, offset = strdate(string, format)
907 except ValueError:
908 except ValueError:
908 pass
909 pass
909 else:
910 else:
910 break
911 break
911 else:
912 else:
912 raise ValueError(_('invalid date: %r') % string)
913 raise ValueError(_('invalid date: %r') % string)
913 # validate explicit (probably user-specified) date and
914 # validate explicit (probably user-specified) date and
914 # time zone offset. values must fit in signed 32 bits for
915 # time zone offset. values must fit in signed 32 bits for
915 # current 32-bit linux runtimes. timezones go from UTC-12
916 # current 32-bit linux runtimes. timezones go from UTC-12
916 # to UTC+14
917 # to UTC+14
917 if abs(when) > 0x7fffffff:
918 if abs(when) > 0x7fffffff:
918 raise ValueError(_('date exceeds 32 bits: %d') % when)
919 raise ValueError(_('date exceeds 32 bits: %d') % when)
919 if offset < -50400 or offset > 43200:
920 if offset < -50400 or offset > 43200:
920 raise ValueError(_('impossible time zone offset: %d') % offset)
921 raise ValueError(_('impossible time zone offset: %d') % offset)
921 return when, offset
922 return when, offset
922
923
923 def shortuser(user):
924 def shortuser(user):
924 """Return a short representation of a user name or email address."""
925 """Return a short representation of a user name or email address."""
925 f = user.find('@')
926 f = user.find('@')
926 if f >= 0:
927 if f >= 0:
927 user = user[:f]
928 user = user[:f]
928 f = user.find('<')
929 f = user.find('<')
929 if f >= 0:
930 if f >= 0:
930 user = user[f+1:]
931 user = user[f+1:]
931 return user
932 return user
932
933
933 def walkrepos(path):
934 def walkrepos(path):
934 '''yield every hg repository under path, recursively.'''
935 '''yield every hg repository under path, recursively.'''
935 def errhandler(err):
936 def errhandler(err):
936 if err.filename == path:
937 if err.filename == path:
937 raise err
938 raise err
938
939
939 for root, dirs, files in os.walk(path, onerror=errhandler):
940 for root, dirs, files in os.walk(path, onerror=errhandler):
940 for d in dirs:
941 for d in dirs:
941 if d == '.hg':
942 if d == '.hg':
942 yield root
943 yield root
943 dirs[:] = []
944 dirs[:] = []
944 break
945 break
945
946
946 _rcpath = None
947 _rcpath = None
947
948
948 def rcpath():
949 def rcpath():
949 '''return hgrc search path. if env var HGRCPATH is set, use it.
950 '''return hgrc search path. if env var HGRCPATH is set, use it.
950 for each item in path, if directory, use files ending in .rc,
951 for each item in path, if directory, use files ending in .rc,
951 else use item.
952 else use item.
952 make HGRCPATH empty to only look in .hg/hgrc of current repo.
953 make HGRCPATH empty to only look in .hg/hgrc of current repo.
953 if no HGRCPATH, use default os-specific path.'''
954 if no HGRCPATH, use default os-specific path.'''
954 global _rcpath
955 global _rcpath
955 if _rcpath is None:
956 if _rcpath is None:
956 if 'HGRCPATH' in os.environ:
957 if 'HGRCPATH' in os.environ:
957 _rcpath = []
958 _rcpath = []
958 for p in os.environ['HGRCPATH'].split(os.pathsep):
959 for p in os.environ['HGRCPATH'].split(os.pathsep):
959 if not p: continue
960 if not p: continue
960 if os.path.isdir(p):
961 if os.path.isdir(p):
961 for f in os.listdir(p):
962 for f in os.listdir(p):
962 if f.endswith('.rc'):
963 if f.endswith('.rc'):
963 _rcpath.append(os.path.join(p, f))
964 _rcpath.append(os.path.join(p, f))
964 else:
965 else:
965 _rcpath.append(p)
966 _rcpath.append(p)
966 else:
967 else:
967 _rcpath = os_rcpath()
968 _rcpath = os_rcpath()
968 return _rcpath
969 return _rcpath
969
970
970 def bytecount(nbytes):
971 def bytecount(nbytes):
971 '''return byte count formatted as readable string, with units'''
972 '''return byte count formatted as readable string, with units'''
972
973
973 units = (
974 units = (
974 (100, 1<<30, _('%.0f GB')),
975 (100, 1<<30, _('%.0f GB')),
975 (10, 1<<30, _('%.1f GB')),
976 (10, 1<<30, _('%.1f GB')),
976 (1, 1<<30, _('%.2f GB')),
977 (1, 1<<30, _('%.2f GB')),
977 (100, 1<<20, _('%.0f MB')),
978 (100, 1<<20, _('%.0f MB')),
978 (10, 1<<20, _('%.1f MB')),
979 (10, 1<<20, _('%.1f MB')),
979 (1, 1<<20, _('%.2f MB')),
980 (1, 1<<20, _('%.2f MB')),
980 (100, 1<<10, _('%.0f KB')),
981 (100, 1<<10, _('%.0f KB')),
981 (10, 1<<10, _('%.1f KB')),
982 (10, 1<<10, _('%.1f KB')),
982 (1, 1<<10, _('%.2f KB')),
983 (1, 1<<10, _('%.2f KB')),
983 (1, 1, _('%.0f bytes')),
984 (1, 1, _('%.0f bytes')),
984 )
985 )
985
986
986 for multiplier, divisor, format in units:
987 for multiplier, divisor, format in units:
987 if nbytes >= divisor * multiplier:
988 if nbytes >= divisor * multiplier:
988 return format % (nbytes / float(divisor))
989 return format % (nbytes / float(divisor))
989 return units[-1][2] % nbytes
990 return units[-1][2] % nbytes
990
991
991 def drop_scheme(scheme, path):
992 def drop_scheme(scheme, path):
992 sc = scheme + ':'
993 sc = scheme + ':'
993 if path.startswith(sc):
994 if path.startswith(sc):
994 path = path[len(sc):]
995 path = path[len(sc):]
995 if path.startswith('//'):
996 if path.startswith('//'):
996 path = path[2:]
997 path = path[2:]
997 return path
998 return path
General Comments 0
You need to be logged in to leave comments. Login now