##// END OF EJS Templates
merge with stable
Matt Mackall -
r23244:18cc87e4 merge default
parent child Browse files
Show More
@@ -1,385 +1,385 b''
1 # git.py - git support for the convert extension
1 # git.py - git support for the convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import subprocess
9 import subprocess
10 from mercurial import util, config
10 from mercurial import util, config
11 from mercurial.node import hex, nullid
11 from mercurial.node import hex, nullid
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13
13
14 from common import NoRepo, commit, converter_source, checktool
14 from common import NoRepo, commit, converter_source, checktool
15
15
16 class submodule(object):
16 class submodule(object):
17 def __init__(self, path, node, url):
17 def __init__(self, path, node, url):
18 self.path = path
18 self.path = path
19 self.node = node
19 self.node = node
20 self.url = url
20 self.url = url
21
21
22 def hgsub(self):
22 def hgsub(self):
23 return "%s = [git]%s" % (self.path, self.url)
23 return "%s = [git]%s" % (self.path, self.url)
24
24
25 def hgsubstate(self):
25 def hgsubstate(self):
26 return "%s %s" % (self.node, self.path)
26 return "%s %s" % (self.node, self.path)
27
27
28 class convert_git(converter_source):
28 class convert_git(converter_source):
29 # Windows does not support GIT_DIR= construct while other systems
29 # Windows does not support GIT_DIR= construct while other systems
30 # cannot remove environment variable. Just assume none have
30 # cannot remove environment variable. Just assume none have
31 # both issues.
31 # both issues.
32 if util.safehasattr(os, 'unsetenv'):
32 if util.safehasattr(os, 'unsetenv'):
33 def gitopen(self, s, err=None):
33 def gitopen(self, s, err=None):
34 prevgitdir = os.environ.get('GIT_DIR')
34 prevgitdir = os.environ.get('GIT_DIR')
35 os.environ['GIT_DIR'] = self.path
35 os.environ['GIT_DIR'] = self.path
36 try:
36 try:
37 if err == subprocess.PIPE:
37 if err == subprocess.PIPE:
38 (stdin, stdout, stderr) = util.popen3(s)
38 (stdin, stdout, stderr) = util.popen3(s)
39 return stdout
39 return stdout
40 elif err == subprocess.STDOUT:
40 elif err == subprocess.STDOUT:
41 return self.popen_with_stderr(s)
41 return self.popen_with_stderr(s)
42 else:
42 else:
43 return util.popen(s, 'rb')
43 return util.popen(s, 'rb')
44 finally:
44 finally:
45 if prevgitdir is None:
45 if prevgitdir is None:
46 del os.environ['GIT_DIR']
46 del os.environ['GIT_DIR']
47 else:
47 else:
48 os.environ['GIT_DIR'] = prevgitdir
48 os.environ['GIT_DIR'] = prevgitdir
49
49
50 def gitpipe(self, s):
50 def gitpipe(self, s):
51 prevgitdir = os.environ.get('GIT_DIR')
51 prevgitdir = os.environ.get('GIT_DIR')
52 os.environ['GIT_DIR'] = self.path
52 os.environ['GIT_DIR'] = self.path
53 try:
53 try:
54 return util.popen3(s)
54 return util.popen3(s)
55 finally:
55 finally:
56 if prevgitdir is None:
56 if prevgitdir is None:
57 del os.environ['GIT_DIR']
57 del os.environ['GIT_DIR']
58 else:
58 else:
59 os.environ['GIT_DIR'] = prevgitdir
59 os.environ['GIT_DIR'] = prevgitdir
60
60
61 else:
61 else:
62 def gitopen(self, s, err=None):
62 def gitopen(self, s, err=None):
63 if err == subprocess.PIPE:
63 if err == subprocess.PIPE:
64 (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
64 (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
65 return so
65 return so
66 elif err == subprocess.STDOUT:
66 elif err == subprocess.STDOUT:
67 return self.popen_with_stderr(s)
67 return self.popen_with_stderr(s)
68 else:
68 else:
69 return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
69 return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
70
70
71 def gitpipe(self, s):
71 def gitpipe(self, s):
72 return util.popen3('GIT_DIR=%s %s' % (self.path, s))
72 return util.popen3('GIT_DIR=%s %s' % (self.path, s))
73
73
74 def popen_with_stderr(self, s):
74 def popen_with_stderr(self, s):
75 p = subprocess.Popen(s, shell=True, bufsize=-1,
75 p = subprocess.Popen(s, shell=True, bufsize=-1,
76 close_fds=util.closefds,
76 close_fds=util.closefds,
77 stdin=subprocess.PIPE,
77 stdin=subprocess.PIPE,
78 stdout=subprocess.PIPE,
78 stdout=subprocess.PIPE,
79 stderr=subprocess.STDOUT,
79 stderr=subprocess.STDOUT,
80 universal_newlines=False,
80 universal_newlines=False,
81 env=None)
81 env=None)
82 return p.stdout
82 return p.stdout
83
83
84 def gitread(self, s):
84 def gitread(self, s):
85 fh = self.gitopen(s)
85 fh = self.gitopen(s)
86 data = fh.read()
86 data = fh.read()
87 return data, fh.close()
87 return data, fh.close()
88
88
89 def __init__(self, ui, path, rev=None):
89 def __init__(self, ui, path, rev=None):
90 super(convert_git, self).__init__(ui, path, rev=rev)
90 super(convert_git, self).__init__(ui, path, rev=rev)
91
91
92 if os.path.isdir(path + "/.git"):
92 if os.path.isdir(path + "/.git"):
93 path += "/.git"
93 path += "/.git"
94 if not os.path.exists(path + "/objects"):
94 if not os.path.exists(path + "/objects"):
95 raise NoRepo(_("%s does not look like a Git repository") % path)
95 raise NoRepo(_("%s does not look like a Git repository") % path)
96
96
97 # The default value (50) is based on the default for 'git diff'.
97 # The default value (50) is based on the default for 'git diff'.
98 similarity = ui.configint('convert', 'git.similarity', default=50)
98 similarity = ui.configint('convert', 'git.similarity', default=50)
99 if similarity < 0 or similarity > 100:
99 if similarity < 0 or similarity > 100:
100 raise util.Abort(_('similarity must be between 0 and 100'))
100 raise util.Abort(_('similarity must be between 0 and 100'))
101 if similarity > 0:
101 if similarity > 0:
102 self.simopt = '--find-copies=%d%%' % similarity
102 self.simopt = '-C%d%%' % similarity
103 findcopiesharder = ui.configbool('convert', 'git.findcopiesharder',
103 findcopiesharder = ui.configbool('convert', 'git.findcopiesharder',
104 False)
104 False)
105 if findcopiesharder:
105 if findcopiesharder:
106 self.simopt += ' --find-copies-harder'
106 self.simopt += ' --find-copies-harder'
107 else:
107 else:
108 self.simopt = ''
108 self.simopt = ''
109
109
110 checktool('git', 'git')
110 checktool('git', 'git')
111
111
112 self.path = path
112 self.path = path
113 self.submodules = []
113 self.submodules = []
114
114
115 self.catfilepipe = self.gitpipe('git cat-file --batch')
115 self.catfilepipe = self.gitpipe('git cat-file --batch')
116
116
117 def after(self):
117 def after(self):
118 for f in self.catfilepipe:
118 for f in self.catfilepipe:
119 f.close()
119 f.close()
120
120
121 def getheads(self):
121 def getheads(self):
122 if not self.rev:
122 if not self.rev:
123 heads, ret = self.gitread('git rev-parse --branches --remotes')
123 heads, ret = self.gitread('git rev-parse --branches --remotes')
124 heads = heads.splitlines()
124 heads = heads.splitlines()
125 else:
125 else:
126 heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
126 heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
127 heads = [heads[:-1]]
127 heads = [heads[:-1]]
128 if ret:
128 if ret:
129 raise util.Abort(_('cannot retrieve git heads'))
129 raise util.Abort(_('cannot retrieve git heads'))
130 return heads
130 return heads
131
131
132 def catfile(self, rev, type):
132 def catfile(self, rev, type):
133 if rev == hex(nullid):
133 if rev == hex(nullid):
134 raise IOError
134 raise IOError
135 self.catfilepipe[0].write(rev+'\n')
135 self.catfilepipe[0].write(rev+'\n')
136 self.catfilepipe[0].flush()
136 self.catfilepipe[0].flush()
137 info = self.catfilepipe[1].readline().split()
137 info = self.catfilepipe[1].readline().split()
138 if info[1] != type:
138 if info[1] != type:
139 raise util.Abort(_('cannot read %r object at %s') % (type, rev))
139 raise util.Abort(_('cannot read %r object at %s') % (type, rev))
140 size = int(info[2])
140 size = int(info[2])
141 data = self.catfilepipe[1].read(size)
141 data = self.catfilepipe[1].read(size)
142 if len(data) < size:
142 if len(data) < size:
143 raise util.Abort(_('cannot read %r object at %s: unexpected size')
143 raise util.Abort(_('cannot read %r object at %s: unexpected size')
144 % (type, rev))
144 % (type, rev))
145 # read the trailing newline
145 # read the trailing newline
146 self.catfilepipe[1].read(1)
146 self.catfilepipe[1].read(1)
147 return data
147 return data
148
148
149 def getfile(self, name, rev):
149 def getfile(self, name, rev):
150 if rev == hex(nullid):
150 if rev == hex(nullid):
151 return None, None
151 return None, None
152 if name == '.hgsub':
152 if name == '.hgsub':
153 data = '\n'.join([m.hgsub() for m in self.submoditer()])
153 data = '\n'.join([m.hgsub() for m in self.submoditer()])
154 mode = ''
154 mode = ''
155 elif name == '.hgsubstate':
155 elif name == '.hgsubstate':
156 data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
156 data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
157 mode = ''
157 mode = ''
158 else:
158 else:
159 data = self.catfile(rev, "blob")
159 data = self.catfile(rev, "blob")
160 mode = self.modecache[(name, rev)]
160 mode = self.modecache[(name, rev)]
161 return data, mode
161 return data, mode
162
162
163 def submoditer(self):
163 def submoditer(self):
164 null = hex(nullid)
164 null = hex(nullid)
165 for m in sorted(self.submodules, key=lambda p: p.path):
165 for m in sorted(self.submodules, key=lambda p: p.path):
166 if m.node != null:
166 if m.node != null:
167 yield m
167 yield m
168
168
169 def parsegitmodules(self, content):
169 def parsegitmodules(self, content):
170 """Parse the formatted .gitmodules file, example file format:
170 """Parse the formatted .gitmodules file, example file format:
171 [submodule "sub"]\n
171 [submodule "sub"]\n
172 \tpath = sub\n
172 \tpath = sub\n
173 \turl = git://giturl\n
173 \turl = git://giturl\n
174 """
174 """
175 self.submodules = []
175 self.submodules = []
176 c = config.config()
176 c = config.config()
177 # Each item in .gitmodules starts with \t that cant be parsed
177 # Each item in .gitmodules starts with \t that cant be parsed
178 c.parse('.gitmodules', content.replace('\t',''))
178 c.parse('.gitmodules', content.replace('\t',''))
179 for sec in c.sections():
179 for sec in c.sections():
180 s = c[sec]
180 s = c[sec]
181 if 'url' in s and 'path' in s:
181 if 'url' in s and 'path' in s:
182 self.submodules.append(submodule(s['path'], '', s['url']))
182 self.submodules.append(submodule(s['path'], '', s['url']))
183
183
184 def retrievegitmodules(self, version):
184 def retrievegitmodules(self, version):
185 modules, ret = self.gitread("git show %s:%s" % (version, '.gitmodules'))
185 modules, ret = self.gitread("git show %s:%s" % (version, '.gitmodules'))
186 if ret:
186 if ret:
187 raise util.Abort(_('cannot read submodules config file in %s') %
187 raise util.Abort(_('cannot read submodules config file in %s') %
188 version)
188 version)
189 self.parsegitmodules(modules)
189 self.parsegitmodules(modules)
190 for m in self.submodules:
190 for m in self.submodules:
191 node, ret = self.gitread("git rev-parse %s:%s" % (version, m.path))
191 node, ret = self.gitread("git rev-parse %s:%s" % (version, m.path))
192 if ret:
192 if ret:
193 continue
193 continue
194 m.node = node.strip()
194 m.node = node.strip()
195
195
196 def getchanges(self, version, full):
196 def getchanges(self, version, full):
197 if full:
197 if full:
198 raise util.Abort(_("convert from git do not support --full"))
198 raise util.Abort(_("convert from git do not support --full"))
199 self.modecache = {}
199 self.modecache = {}
200 fh = self.gitopen("git diff-tree -z --root -m -r %s %s" % (
200 fh = self.gitopen("git diff-tree -z --root -m -r %s %s" % (
201 self.simopt, version))
201 self.simopt, version))
202 changes = []
202 changes = []
203 copies = {}
203 copies = {}
204 seen = set()
204 seen = set()
205 entry = None
205 entry = None
206 subexists = [False]
206 subexists = [False]
207 subdeleted = [False]
207 subdeleted = [False]
208 difftree = fh.read().split('\x00')
208 difftree = fh.read().split('\x00')
209 lcount = len(difftree)
209 lcount = len(difftree)
210 i = 0
210 i = 0
211
211
212 def add(entry, f, isdest):
212 def add(entry, f, isdest):
213 seen.add(f)
213 seen.add(f)
214 h = entry[3]
214 h = entry[3]
215 p = (entry[1] == "100755")
215 p = (entry[1] == "100755")
216 s = (entry[1] == "120000")
216 s = (entry[1] == "120000")
217 renamesource = (not isdest and entry[4][0] == 'R')
217 renamesource = (not isdest and entry[4][0] == 'R')
218
218
219 if f == '.gitmodules':
219 if f == '.gitmodules':
220 subexists[0] = True
220 subexists[0] = True
221 if entry[4] == 'D' or renamesource:
221 if entry[4] == 'D' or renamesource:
222 subdeleted[0] = True
222 subdeleted[0] = True
223 changes.append(('.hgsub', hex(nullid)))
223 changes.append(('.hgsub', hex(nullid)))
224 else:
224 else:
225 changes.append(('.hgsub', ''))
225 changes.append(('.hgsub', ''))
226 elif entry[1] == '160000' or entry[0] == ':160000':
226 elif entry[1] == '160000' or entry[0] == ':160000':
227 subexists[0] = True
227 subexists[0] = True
228 else:
228 else:
229 if renamesource:
229 if renamesource:
230 h = hex(nullid)
230 h = hex(nullid)
231 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
231 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
232 changes.append((f, h))
232 changes.append((f, h))
233
233
234 while i < lcount:
234 while i < lcount:
235 l = difftree[i]
235 l = difftree[i]
236 i += 1
236 i += 1
237 if not entry:
237 if not entry:
238 if not l.startswith(':'):
238 if not l.startswith(':'):
239 continue
239 continue
240 entry = l.split()
240 entry = l.split()
241 continue
241 continue
242 f = l
242 f = l
243 if f not in seen:
243 if f not in seen:
244 add(entry, f, False)
244 add(entry, f, False)
245 # A file can be copied multiple times, or modified and copied
245 # A file can be copied multiple times, or modified and copied
246 # simultaneously. So f can be repeated even if fdest isn't.
246 # simultaneously. So f can be repeated even if fdest isn't.
247 if entry[4][0] in 'RC':
247 if entry[4][0] in 'RC':
248 # rename or copy: next line is the destination
248 # rename or copy: next line is the destination
249 fdest = difftree[i]
249 fdest = difftree[i]
250 i += 1
250 i += 1
251 if fdest not in seen:
251 if fdest not in seen:
252 add(entry, fdest, True)
252 add(entry, fdest, True)
253 # .gitmodules isn't imported at all, so it being copied to
253 # .gitmodules isn't imported at all, so it being copied to
254 # and fro doesn't really make sense
254 # and fro doesn't really make sense
255 if f != '.gitmodules' and fdest != '.gitmodules':
255 if f != '.gitmodules' and fdest != '.gitmodules':
256 copies[fdest] = f
256 copies[fdest] = f
257 entry = None
257 entry = None
258 if fh.close():
258 if fh.close():
259 raise util.Abort(_('cannot read changes in %s') % version)
259 raise util.Abort(_('cannot read changes in %s') % version)
260
260
261 if subexists[0]:
261 if subexists[0]:
262 if subdeleted[0]:
262 if subdeleted[0]:
263 changes.append(('.hgsubstate', hex(nullid)))
263 changes.append(('.hgsubstate', hex(nullid)))
264 else:
264 else:
265 self.retrievegitmodules(version)
265 self.retrievegitmodules(version)
266 changes.append(('.hgsubstate', ''))
266 changes.append(('.hgsubstate', ''))
267 return (changes, copies)
267 return (changes, copies)
268
268
269 def getcommit(self, version):
269 def getcommit(self, version):
270 c = self.catfile(version, "commit") # read the commit hash
270 c = self.catfile(version, "commit") # read the commit hash
271 end = c.find("\n\n")
271 end = c.find("\n\n")
272 message = c[end + 2:]
272 message = c[end + 2:]
273 message = self.recode(message)
273 message = self.recode(message)
274 l = c[:end].splitlines()
274 l = c[:end].splitlines()
275 parents = []
275 parents = []
276 author = committer = None
276 author = committer = None
277 for e in l[1:]:
277 for e in l[1:]:
278 n, v = e.split(" ", 1)
278 n, v = e.split(" ", 1)
279 if n == "author":
279 if n == "author":
280 p = v.split()
280 p = v.split()
281 tm, tz = p[-2:]
281 tm, tz = p[-2:]
282 author = " ".join(p[:-2])
282 author = " ".join(p[:-2])
283 if author[0] == "<": author = author[1:-1]
283 if author[0] == "<": author = author[1:-1]
284 author = self.recode(author)
284 author = self.recode(author)
285 if n == "committer":
285 if n == "committer":
286 p = v.split()
286 p = v.split()
287 tm, tz = p[-2:]
287 tm, tz = p[-2:]
288 committer = " ".join(p[:-2])
288 committer = " ".join(p[:-2])
289 if committer[0] == "<": committer = committer[1:-1]
289 if committer[0] == "<": committer = committer[1:-1]
290 committer = self.recode(committer)
290 committer = self.recode(committer)
291 if n == "parent":
291 if n == "parent":
292 parents.append(v)
292 parents.append(v)
293
293
294 if committer and committer != author:
294 if committer and committer != author:
295 message += "\ncommitter: %s\n" % committer
295 message += "\ncommitter: %s\n" % committer
296 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
296 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
297 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
297 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
298 date = tm + " " + str(tz)
298 date = tm + " " + str(tz)
299
299
300 c = commit(parents=parents, date=date, author=author, desc=message,
300 c = commit(parents=parents, date=date, author=author, desc=message,
301 rev=version)
301 rev=version)
302 return c
302 return c
303
303
304 def numcommits(self):
304 def numcommits(self):
305 return len([None for _ in self.gitopen('git rev-list --all')])
305 return len([None for _ in self.gitopen('git rev-list --all')])
306
306
307 def gettags(self):
307 def gettags(self):
308 tags = {}
308 tags = {}
309 alltags = {}
309 alltags = {}
310 fh = self.gitopen('git ls-remote --tags "%s"' % self.path,
310 fh = self.gitopen('git ls-remote --tags "%s"' % self.path,
311 err=subprocess.STDOUT)
311 err=subprocess.STDOUT)
312 prefix = 'refs/tags/'
312 prefix = 'refs/tags/'
313
313
314 # Build complete list of tags, both annotated and bare ones
314 # Build complete list of tags, both annotated and bare ones
315 for line in fh:
315 for line in fh:
316 line = line.strip()
316 line = line.strip()
317 if line.startswith("error:") or line.startswith("fatal:"):
317 if line.startswith("error:") or line.startswith("fatal:"):
318 raise util.Abort(_('cannot read tags from %s') % self.path)
318 raise util.Abort(_('cannot read tags from %s') % self.path)
319 node, tag = line.split(None, 1)
319 node, tag = line.split(None, 1)
320 if not tag.startswith(prefix):
320 if not tag.startswith(prefix):
321 continue
321 continue
322 alltags[tag[len(prefix):]] = node
322 alltags[tag[len(prefix):]] = node
323 if fh.close():
323 if fh.close():
324 raise util.Abort(_('cannot read tags from %s') % self.path)
324 raise util.Abort(_('cannot read tags from %s') % self.path)
325
325
326 # Filter out tag objects for annotated tag refs
326 # Filter out tag objects for annotated tag refs
327 for tag in alltags:
327 for tag in alltags:
328 if tag.endswith('^{}'):
328 if tag.endswith('^{}'):
329 tags[tag[:-3]] = alltags[tag]
329 tags[tag[:-3]] = alltags[tag]
330 else:
330 else:
331 if tag + '^{}' in alltags:
331 if tag + '^{}' in alltags:
332 continue
332 continue
333 else:
333 else:
334 tags[tag] = alltags[tag]
334 tags[tag] = alltags[tag]
335
335
336 return tags
336 return tags
337
337
338 def getchangedfiles(self, version, i):
338 def getchangedfiles(self, version, i):
339 changes = []
339 changes = []
340 if i is None:
340 if i is None:
341 fh = self.gitopen("git diff-tree --root -m -r %s" % version)
341 fh = self.gitopen("git diff-tree --root -m -r %s" % version)
342 for l in fh:
342 for l in fh:
343 if "\t" not in l:
343 if "\t" not in l:
344 continue
344 continue
345 m, f = l[:-1].split("\t")
345 m, f = l[:-1].split("\t")
346 changes.append(f)
346 changes.append(f)
347 else:
347 else:
348 fh = self.gitopen('git diff-tree --name-only --root -r %s '
348 fh = self.gitopen('git diff-tree --name-only --root -r %s '
349 '"%s^%s" --' % (version, version, i + 1))
349 '"%s^%s" --' % (version, version, i + 1))
350 changes = [f.rstrip('\n') for f in fh]
350 changes = [f.rstrip('\n') for f in fh]
351 if fh.close():
351 if fh.close():
352 raise util.Abort(_('cannot read changes in %s') % version)
352 raise util.Abort(_('cannot read changes in %s') % version)
353
353
354 return changes
354 return changes
355
355
356 def getbookmarks(self):
356 def getbookmarks(self):
357 bookmarks = {}
357 bookmarks = {}
358
358
359 # Interesting references in git are prefixed
359 # Interesting references in git are prefixed
360 prefix = 'refs/heads/'
360 prefix = 'refs/heads/'
361 prefixlen = len(prefix)
361 prefixlen = len(prefix)
362
362
363 # factor two commands
363 # factor two commands
364 gitcmd = { 'remote/': 'git ls-remote --heads origin',
364 gitcmd = { 'remote/': 'git ls-remote --heads origin',
365 '': 'git show-ref'}
365 '': 'git show-ref'}
366
366
367 # Origin heads
367 # Origin heads
368 for reftype in gitcmd:
368 for reftype in gitcmd:
369 try:
369 try:
370 fh = self.gitopen(gitcmd[reftype], err=subprocess.PIPE)
370 fh = self.gitopen(gitcmd[reftype], err=subprocess.PIPE)
371 for line in fh:
371 for line in fh:
372 line = line.strip()
372 line = line.strip()
373 rev, name = line.split(None, 1)
373 rev, name = line.split(None, 1)
374 if not name.startswith(prefix):
374 if not name.startswith(prefix):
375 continue
375 continue
376 name = '%s%s' % (reftype, name[prefixlen:])
376 name = '%s%s' % (reftype, name[prefixlen:])
377 bookmarks[name] = rev
377 bookmarks[name] = rev
378 except Exception:
378 except Exception:
379 pass
379 pass
380
380
381 return bookmarks
381 return bookmarks
382
382
383 def checkrevformat(self, revstr, mapname='splicemap'):
383 def checkrevformat(self, revstr, mapname='splicemap'):
384 """ git revision string is a 40 byte hex """
384 """ git revision string is a 40 byte hex """
385 self.checkhexformat(revstr, mapname)
385 self.checkhexformat(revstr, mapname)
@@ -1,831 +1,829 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17
17
18 def readexactly(stream, n):
18 def readexactly(stream, n):
19 '''read n bytes from stream.read and abort if less was available'''
19 '''read n bytes from stream.read and abort if less was available'''
20 s = stream.read(n)
20 s = stream.read(n)
21 if len(s) < n:
21 if len(s) < n:
22 raise util.Abort(_("stream ended unexpectedly"
22 raise util.Abort(_("stream ended unexpectedly"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(s), n))
24 % (len(s), n))
25 return s
25 return s
26
26
27 def getchunk(stream):
27 def getchunk(stream):
28 """return the next chunk from stream as a string"""
28 """return the next chunk from stream as a string"""
29 d = readexactly(stream, 4)
29 d = readexactly(stream, 4)
30 l = struct.unpack(">l", d)[0]
30 l = struct.unpack(">l", d)[0]
31 if l <= 4:
31 if l <= 4:
32 if l:
32 if l:
33 raise util.Abort(_("invalid chunk length %d") % l)
33 raise util.Abort(_("invalid chunk length %d") % l)
34 return ""
34 return ""
35 return readexactly(stream, l - 4)
35 return readexactly(stream, l - 4)
36
36
37 def chunkheader(length):
37 def chunkheader(length):
38 """return a changegroup chunk header (string)"""
38 """return a changegroup chunk header (string)"""
39 return struct.pack(">l", length + 4)
39 return struct.pack(">l", length + 4)
40
40
41 def closechunk():
41 def closechunk():
42 """return a changegroup chunk header (string) for a zero-length chunk"""
42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 return struct.pack(">l", 0)
43 return struct.pack(">l", 0)
44
44
45 class nocompress(object):
45 class nocompress(object):
46 def compress(self, x):
46 def compress(self, x):
47 return x
47 return x
48 def flush(self):
48 def flush(self):
49 return ""
49 return ""
50
50
51 bundletypes = {
51 bundletypes = {
52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
53 # since the unification ssh accepts a header but there
53 # since the unification ssh accepts a header but there
54 # is no capability signaling it.
54 # is no capability signaling it.
55 "HG10UN": ("HG10UN", nocompress),
55 "HG10UN": ("HG10UN", nocompress),
56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
58 }
58 }
59
59
60 # hgweb uses this list to communicate its preferred type
60 # hgweb uses this list to communicate its preferred type
61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
62
62
63 def writebundle(cg, filename, bundletype, vfs=None):
63 def writebundle(cg, filename, bundletype, vfs=None):
64 """Write a bundle file and return its filename.
64 """Write a bundle file and return its filename.
65
65
66 Existing files will not be overwritten.
66 Existing files will not be overwritten.
67 If no filename is specified, a temporary file is created.
67 If no filename is specified, a temporary file is created.
68 bz2 compression can be turned off.
68 bz2 compression can be turned off.
69 The bundle file will be deleted in case of errors.
69 The bundle file will be deleted in case of errors.
70 """
70 """
71
71
72 fh = None
72 fh = None
73 cleanup = None
73 cleanup = None
74 try:
74 try:
75 if filename:
75 if filename:
76 if vfs:
76 if vfs:
77 fh = vfs.open(filename, "wb")
77 fh = vfs.open(filename, "wb")
78 else:
78 else:
79 fh = open(filename, "wb")
79 fh = open(filename, "wb")
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, "wb")
82 fh = os.fdopen(fd, "wb")
83 cleanup = filename
83 cleanup = filename
84
84
85 header, compressor = bundletypes[bundletype]
85 header, compressor = bundletypes[bundletype]
86 fh.write(header)
86 fh.write(header)
87 z = compressor()
87 z = compressor()
88
88
89 # parse the changegroup data, otherwise we will block
89 # parse the changegroup data, otherwise we will block
90 # in case of sshrepo because we don't know the end of the stream
90 # in case of sshrepo because we don't know the end of the stream
91
91
92 # an empty chunkgroup is the end of the changegroup
92 # an empty chunkgroup is the end of the changegroup
93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
94 # after that, an empty chunkgroup is the end of the changegroup
94 # after that, an empty chunkgroup is the end of the changegroup
95 for chunk in cg.getchunks():
95 for chunk in cg.getchunks():
96 fh.write(z.compress(chunk))
96 fh.write(z.compress(chunk))
97 fh.write(z.flush())
97 fh.write(z.flush())
98 cleanup = None
98 cleanup = None
99 return filename
99 return filename
100 finally:
100 finally:
101 if fh is not None:
101 if fh is not None:
102 fh.close()
102 fh.close()
103 if cleanup is not None:
103 if cleanup is not None:
104 if filename and vfs:
104 if filename and vfs:
105 vfs.unlink(cleanup)
105 vfs.unlink(cleanup)
106 else:
106 else:
107 os.unlink(cleanup)
107 os.unlink(cleanup)
108
108
109 def decompressor(fh, alg):
109 def decompressor(fh, alg):
110 if alg == 'UN':
110 if alg == 'UN':
111 return fh
111 return fh
112 elif alg == 'GZ':
112 elif alg == 'GZ':
113 def generator(f):
113 def generator(f):
114 zd = zlib.decompressobj()
114 zd = zlib.decompressobj()
115 for chunk in util.filechunkiter(f):
115 for chunk in util.filechunkiter(f):
116 yield zd.decompress(chunk)
116 yield zd.decompress(chunk)
117 elif alg == 'BZ':
117 elif alg == 'BZ':
118 def generator(f):
118 def generator(f):
119 zd = bz2.BZ2Decompressor()
119 zd = bz2.BZ2Decompressor()
120 zd.decompress("BZ")
120 zd.decompress("BZ")
121 for chunk in util.filechunkiter(f, 4096):
121 for chunk in util.filechunkiter(f, 4096):
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 else:
123 else:
124 raise util.Abort("unknown bundle compression '%s'" % alg)
124 raise util.Abort("unknown bundle compression '%s'" % alg)
125 return util.chunkbuffer(generator(fh))
125 return util.chunkbuffer(generator(fh))
126
126
127 class cg1unpacker(object):
127 class cg1unpacker(object):
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
129 deltaheadersize = struct.calcsize(deltaheader)
129 deltaheadersize = struct.calcsize(deltaheader)
130 def __init__(self, fh, alg):
130 def __init__(self, fh, alg):
131 self._stream = decompressor(fh, alg)
131 self._stream = decompressor(fh, alg)
132 self._type = alg
132 self._type = alg
133 self.callback = None
133 self.callback = None
134 def compressed(self):
134 def compressed(self):
135 return self._type != 'UN'
135 return self._type != 'UN'
136 def read(self, l):
136 def read(self, l):
137 return self._stream.read(l)
137 return self._stream.read(l)
138 def seek(self, pos):
138 def seek(self, pos):
139 return self._stream.seek(pos)
139 return self._stream.seek(pos)
140 def tell(self):
140 def tell(self):
141 return self._stream.tell()
141 return self._stream.tell()
142 def close(self):
142 def close(self):
143 return self._stream.close()
143 return self._stream.close()
144
144
145 def chunklength(self):
145 def chunklength(self):
146 d = readexactly(self._stream, 4)
146 d = readexactly(self._stream, 4)
147 l = struct.unpack(">l", d)[0]
147 l = struct.unpack(">l", d)[0]
148 if l <= 4:
148 if l <= 4:
149 if l:
149 if l:
150 raise util.Abort(_("invalid chunk length %d") % l)
150 raise util.Abort(_("invalid chunk length %d") % l)
151 return 0
151 return 0
152 if self.callback:
152 if self.callback:
153 self.callback()
153 self.callback()
154 return l - 4
154 return l - 4
155
155
156 def changelogheader(self):
156 def changelogheader(self):
157 """v10 does not have a changelog header chunk"""
157 """v10 does not have a changelog header chunk"""
158 return {}
158 return {}
159
159
160 def manifestheader(self):
160 def manifestheader(self):
161 """v10 does not have a manifest header chunk"""
161 """v10 does not have a manifest header chunk"""
162 return {}
162 return {}
163
163
164 def filelogheader(self):
164 def filelogheader(self):
165 """return the header of the filelogs chunk, v10 only has the filename"""
165 """return the header of the filelogs chunk, v10 only has the filename"""
166 l = self.chunklength()
166 l = self.chunklength()
167 if not l:
167 if not l:
168 return {}
168 return {}
169 fname = readexactly(self._stream, l)
169 fname = readexactly(self._stream, l)
170 return {'filename': fname}
170 return {'filename': fname}
171
171
172 def _deltaheader(self, headertuple, prevnode):
172 def _deltaheader(self, headertuple, prevnode):
173 node, p1, p2, cs = headertuple
173 node, p1, p2, cs = headertuple
174 if prevnode is None:
174 if prevnode is None:
175 deltabase = p1
175 deltabase = p1
176 else:
176 else:
177 deltabase = prevnode
177 deltabase = prevnode
178 return node, p1, p2, deltabase, cs
178 return node, p1, p2, deltabase, cs
179
179
180 def deltachunk(self, prevnode):
180 def deltachunk(self, prevnode):
181 l = self.chunklength()
181 l = self.chunklength()
182 if not l:
182 if not l:
183 return {}
183 return {}
184 headerdata = readexactly(self._stream, self.deltaheadersize)
184 headerdata = readexactly(self._stream, self.deltaheadersize)
185 header = struct.unpack(self.deltaheader, headerdata)
185 header = struct.unpack(self.deltaheader, headerdata)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
189 'deltabase': deltabase, 'delta': delta}
189 'deltabase': deltabase, 'delta': delta}
190
190
191 def getchunks(self):
191 def getchunks(self):
192 """returns all the chunks contains in the bundle
192 """returns all the chunks contains in the bundle
193
193
194 Used when you need to forward the binary stream to a file or another
194 Used when you need to forward the binary stream to a file or another
195 network API. To do so, it parse the changegroup data, otherwise it will
195 network API. To do so, it parse the changegroup data, otherwise it will
196 block in case of sshrepo because it don't know the end of the stream.
196 block in case of sshrepo because it don't know the end of the stream.
197 """
197 """
198 # an empty chunkgroup is the end of the changegroup
198 # an empty chunkgroup is the end of the changegroup
199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
200 # after that, an empty chunkgroup is the end of the changegroup
200 # after that, an empty chunkgroup is the end of the changegroup
201 empty = False
201 empty = False
202 count = 0
202 count = 0
203 while not empty or count <= 2:
203 while not empty or count <= 2:
204 empty = True
204 empty = True
205 count += 1
205 count += 1
206 while True:
206 while True:
207 chunk = getchunk(self)
207 chunk = getchunk(self)
208 if not chunk:
208 if not chunk:
209 break
209 break
210 empty = False
210 empty = False
211 yield chunkheader(len(chunk))
211 yield chunkheader(len(chunk))
212 pos = 0
212 pos = 0
213 while pos < len(chunk):
213 while pos < len(chunk):
214 next = pos + 2**20
214 next = pos + 2**20
215 yield chunk[pos:next]
215 yield chunk[pos:next]
216 pos = next
216 pos = next
217 yield closechunk()
217 yield closechunk()
218
218
219 class cg2unpacker(cg1unpacker):
219 class cg2unpacker(cg1unpacker):
220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
221 deltaheadersize = struct.calcsize(deltaheader)
221 deltaheadersize = struct.calcsize(deltaheader)
222
222
223 def _deltaheader(self, headertuple, prevnode):
223 def _deltaheader(self, headertuple, prevnode):
224 node, p1, p2, deltabase, cs = headertuple
224 node, p1, p2, deltabase, cs = headertuple
225 return node, p1, p2, deltabase, cs
225 return node, p1, p2, deltabase, cs
226
226
227 class headerlessfixup(object):
227 class headerlessfixup(object):
228 def __init__(self, fh, h):
228 def __init__(self, fh, h):
229 self._h = h
229 self._h = h
230 self._fh = fh
230 self._fh = fh
231 def read(self, n):
231 def read(self, n):
232 if self._h:
232 if self._h:
233 d, self._h = self._h[:n], self._h[n:]
233 d, self._h = self._h[:n], self._h[n:]
234 if len(d) < n:
234 if len(d) < n:
235 d += readexactly(self._fh, n - len(d))
235 d += readexactly(self._fh, n - len(d))
236 return d
236 return d
237 return readexactly(self._fh, n)
237 return readexactly(self._fh, n)
238
238
239 class cg1packer(object):
239 class cg1packer(object):
240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
241 def __init__(self, repo, bundlecaps=None):
241 def __init__(self, repo, bundlecaps=None):
242 """Given a source repo, construct a bundler.
242 """Given a source repo, construct a bundler.
243
243
244 bundlecaps is optional and can be used to specify the set of
244 bundlecaps is optional and can be used to specify the set of
245 capabilities which can be used to build the bundle.
245 capabilities which can be used to build the bundle.
246 """
246 """
247 # Set of capabilities we can use to build the bundle.
247 # Set of capabilities we can use to build the bundle.
248 if bundlecaps is None:
248 if bundlecaps is None:
249 bundlecaps = set()
249 bundlecaps = set()
250 self._bundlecaps = bundlecaps
250 self._bundlecaps = bundlecaps
251 self._changelog = repo.changelog
251 self._changelog = repo.changelog
252 self._manifest = repo.manifest
252 self._manifest = repo.manifest
253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
254 if reorder == 'auto':
254 if reorder == 'auto':
255 reorder = None
255 reorder = None
256 else:
256 else:
257 reorder = util.parsebool(reorder)
257 reorder = util.parsebool(reorder)
258 self._repo = repo
258 self._repo = repo
259 self._reorder = reorder
259 self._reorder = reorder
260 self._progress = repo.ui.progress
260 self._progress = repo.ui.progress
261 def close(self):
261 def close(self):
262 return closechunk()
262 return closechunk()
263
263
264 def fileheader(self, fname):
264 def fileheader(self, fname):
265 return chunkheader(len(fname)) + fname
265 return chunkheader(len(fname)) + fname
266
266
267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
268 """Calculate a delta group, yielding a sequence of changegroup chunks
268 """Calculate a delta group, yielding a sequence of changegroup chunks
269 (strings).
269 (strings).
270
270
271 Given a list of changeset revs, return a set of deltas and
271 Given a list of changeset revs, return a set of deltas and
272 metadata corresponding to nodes. The first delta is
272 metadata corresponding to nodes. The first delta is
273 first parent(nodelist[0]) -> nodelist[0], the receiver is
273 first parent(nodelist[0]) -> nodelist[0], the receiver is
274 guaranteed to have this parent as it has all history before
274 guaranteed to have this parent as it has all history before
275 these changesets. In the case firstparent is nullrev the
275 these changesets. In the case firstparent is nullrev the
276 changegroup starts with a full revision.
276 changegroup starts with a full revision.
277
277
278 If units is not None, progress detail will be generated, units specifies
278 If units is not None, progress detail will be generated, units specifies
279 the type of revlog that is touched (changelog, manifest, etc.).
279 the type of revlog that is touched (changelog, manifest, etc.).
280 """
280 """
281 # if we don't have any revisions touched by these changesets, bail
281 # if we don't have any revisions touched by these changesets, bail
282 if len(nodelist) == 0:
282 if len(nodelist) == 0:
283 yield self.close()
283 yield self.close()
284 return
284 return
285
285
286 # for generaldelta revlogs, we linearize the revs; this will both be
286 # for generaldelta revlogs, we linearize the revs; this will both be
287 # much quicker and generate a much smaller bundle
287 # much quicker and generate a much smaller bundle
288 if (revlog._generaldelta and reorder is not False) or reorder:
288 if (revlog._generaldelta and reorder is not False) or reorder:
289 dag = dagutil.revlogdag(revlog)
289 dag = dagutil.revlogdag(revlog)
290 revs = set(revlog.rev(n) for n in nodelist)
290 revs = set(revlog.rev(n) for n in nodelist)
291 revs = dag.linearize(revs)
291 revs = dag.linearize(revs)
292 else:
292 else:
293 revs = sorted([revlog.rev(n) for n in nodelist])
293 revs = sorted([revlog.rev(n) for n in nodelist])
294
294
295 # add the parent of the first rev
295 # add the parent of the first rev
296 p = revlog.parentrevs(revs[0])[0]
296 p = revlog.parentrevs(revs[0])[0]
297 revs.insert(0, p)
297 revs.insert(0, p)
298
298
299 # build deltas
299 # build deltas
300 total = len(revs) - 1
300 total = len(revs) - 1
301 msgbundling = _('bundling')
301 msgbundling = _('bundling')
302 for r in xrange(len(revs) - 1):
302 for r in xrange(len(revs) - 1):
303 if units is not None:
303 if units is not None:
304 self._progress(msgbundling, r + 1, unit=units, total=total)
304 self._progress(msgbundling, r + 1, unit=units, total=total)
305 prev, curr = revs[r], revs[r + 1]
305 prev, curr = revs[r], revs[r + 1]
306 linknode = lookup(revlog.node(curr))
306 linknode = lookup(revlog.node(curr))
307 for c in self.revchunk(revlog, curr, prev, linknode):
307 for c in self.revchunk(revlog, curr, prev, linknode):
308 yield c
308 yield c
309
309
310 yield self.close()
310 yield self.close()
311
311
312 # filter any nodes that claim to be part of the known set
312 # filter any nodes that claim to be part of the known set
313 def prune(self, revlog, missing, commonrevs, source):
313 def prune(self, revlog, missing, commonrevs, source):
314 rr, rl = revlog.rev, revlog.linkrev
314 rr, rl = revlog.rev, revlog.linkrev
315 return [n for n in missing if rl(rr(n)) not in commonrevs]
315 return [n for n in missing if rl(rr(n)) not in commonrevs]
316
316
317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
318 '''yield a sequence of changegroup chunks (strings)'''
318 '''yield a sequence of changegroup chunks (strings)'''
319 repo = self._repo
319 repo = self._repo
320 cl = self._changelog
320 cl = self._changelog
321 mf = self._manifest
321 mf = self._manifest
322 reorder = self._reorder
322 reorder = self._reorder
323 progress = self._progress
323 progress = self._progress
324
324
325 # for progress output
325 # for progress output
326 msgbundling = _('bundling')
326 msgbundling = _('bundling')
327
327
328 mfs = {} # needed manifests
328 mfs = {} # needed manifests
329 fnodes = {} # needed file nodes
329 fnodes = {} # needed file nodes
330 changedfiles = set()
330 changedfiles = set()
331
331
332 # Callback for the changelog, used to collect changed files and manifest
332 # Callback for the changelog, used to collect changed files and manifest
333 # nodes.
333 # nodes.
334 # Returns the linkrev node (identity in the changelog case).
334 # Returns the linkrev node (identity in the changelog case).
335 def lookupcl(x):
335 def lookupcl(x):
336 c = cl.read(x)
336 c = cl.read(x)
337 changedfiles.update(c[3])
337 changedfiles.update(c[3])
338 # record the first changeset introducing this manifest version
338 # record the first changeset introducing this manifest version
339 mfs.setdefault(c[0], x)
339 mfs.setdefault(c[0], x)
340 return x
340 return x
341
341
342 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
343 reorder=reorder):
344 yield chunk
345 progress(msgbundling, None)
346
342 # Callback for the manifest, used to collect linkrevs for filelog
347 # Callback for the manifest, used to collect linkrevs for filelog
343 # revisions.
348 # revisions.
344 # Returns the linkrev node (collected in lookupcl).
349 # Returns the linkrev node (collected in lookupcl).
345 def lookupmf(x):
350 def lookupmf(x):
346 clnode = mfs[x]
351 clnode = mfs[x]
347 if not fastpathlinkrev:
352 if not fastpathlinkrev:
348 mdata = mf.readfast(x)
353 mdata = mf.readfast(x)
349 for f, n in mdata.iteritems():
354 for f, n in mdata.iteritems():
350 if f in changedfiles:
355 if f in changedfiles:
351 # record the first changeset introducing this filelog
356 # record the first changeset introducing this filelog
352 # version
357 # version
353 fnodes[f].setdefault(n, clnode)
358 fnodes.setdefault(f, {}).setdefault(n, clnode)
354 return clnode
359 return clnode
355
360
356 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
357 reorder=reorder):
358 yield chunk
359 progress(msgbundling, None)
360
361 for f in changedfiles:
362 fnodes[f] = {}
363 mfnodes = self.prune(mf, mfs, commonrevs, source)
361 mfnodes = self.prune(mf, mfs, commonrevs, source)
364 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
362 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
365 reorder=reorder):
363 reorder=reorder):
366 yield chunk
364 yield chunk
367 progress(msgbundling, None)
365 progress(msgbundling, None)
368
366
369 mfs.clear()
367 mfs.clear()
370 needed = set(cl.rev(x) for x in clnodes)
368 needed = set(cl.rev(x) for x in clnodes)
371
369
372 def linknodes(filerevlog, fname):
370 def linknodes(filerevlog, fname):
373 if fastpathlinkrev:
371 if fastpathlinkrev:
374 llr = filerevlog.linkrev
372 llr = filerevlog.linkrev
375 def genfilenodes():
373 def genfilenodes():
376 for r in filerevlog:
374 for r in filerevlog:
377 linkrev = llr(r)
375 linkrev = llr(r)
378 if linkrev in needed:
376 if linkrev in needed:
379 yield filerevlog.node(r), cl.node(linkrev)
377 yield filerevlog.node(r), cl.node(linkrev)
380 fnodes[fname] = dict(genfilenodes())
378 return dict(genfilenodes())
381 return fnodes.get(fname, {})
379 return fnodes.get(fname, {})
382
380
383 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
381 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
384 source):
382 source):
385 yield chunk
383 yield chunk
386
384
387 yield self.close()
385 yield self.close()
388 progress(msgbundling, None)
386 progress(msgbundling, None)
389
387
390 if clnodes:
388 if clnodes:
391 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
389 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
392
390
393 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
391 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
394 repo = self._repo
392 repo = self._repo
395 progress = self._progress
393 progress = self._progress
396 reorder = self._reorder
394 reorder = self._reorder
397 msgbundling = _('bundling')
395 msgbundling = _('bundling')
398
396
399 total = len(changedfiles)
397 total = len(changedfiles)
400 # for progress output
398 # for progress output
401 msgfiles = _('files')
399 msgfiles = _('files')
402 for i, fname in enumerate(sorted(changedfiles)):
400 for i, fname in enumerate(sorted(changedfiles)):
403 filerevlog = repo.file(fname)
401 filerevlog = repo.file(fname)
404 if not filerevlog:
402 if not filerevlog:
405 raise util.Abort(_("empty or missing revlog for %s") % fname)
403 raise util.Abort(_("empty or missing revlog for %s") % fname)
406
404
407 linkrevnodes = linknodes(filerevlog, fname)
405 linkrevnodes = linknodes(filerevlog, fname)
408 # Lookup for filenodes, we collected the linkrev nodes above in the
406 # Lookup for filenodes, we collected the linkrev nodes above in the
409 # fastpath case and with lookupmf in the slowpath case.
407 # fastpath case and with lookupmf in the slowpath case.
410 def lookupfilelog(x):
408 def lookupfilelog(x):
411 return linkrevnodes[x]
409 return linkrevnodes[x]
412
410
413 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
411 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
414 if filenodes:
412 if filenodes:
415 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
413 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
416 total=total)
414 total=total)
417 yield self.fileheader(fname)
415 yield self.fileheader(fname)
418 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
416 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
419 reorder=reorder):
417 reorder=reorder):
420 yield chunk
418 yield chunk
421
419
422 def deltaparent(self, revlog, rev, p1, p2, prev):
420 def deltaparent(self, revlog, rev, p1, p2, prev):
423 return prev
421 return prev
424
422
425 def revchunk(self, revlog, rev, prev, linknode):
423 def revchunk(self, revlog, rev, prev, linknode):
426 node = revlog.node(rev)
424 node = revlog.node(rev)
427 p1, p2 = revlog.parentrevs(rev)
425 p1, p2 = revlog.parentrevs(rev)
428 base = self.deltaparent(revlog, rev, p1, p2, prev)
426 base = self.deltaparent(revlog, rev, p1, p2, prev)
429
427
430 prefix = ''
428 prefix = ''
431 if base == nullrev:
429 if base == nullrev:
432 delta = revlog.revision(node)
430 delta = revlog.revision(node)
433 prefix = mdiff.trivialdiffheader(len(delta))
431 prefix = mdiff.trivialdiffheader(len(delta))
434 else:
432 else:
435 delta = revlog.revdiff(base, rev)
433 delta = revlog.revdiff(base, rev)
436 p1n, p2n = revlog.parents(node)
434 p1n, p2n = revlog.parents(node)
437 basenode = revlog.node(base)
435 basenode = revlog.node(base)
438 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
436 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
439 meta += prefix
437 meta += prefix
440 l = len(meta) + len(delta)
438 l = len(meta) + len(delta)
441 yield chunkheader(l)
439 yield chunkheader(l)
442 yield meta
440 yield meta
443 yield delta
441 yield delta
444 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
442 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
445 # do nothing with basenode, it is implicitly the previous one in HG10
443 # do nothing with basenode, it is implicitly the previous one in HG10
446 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
444 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
447
445
448 class cg2packer(cg1packer):
446 class cg2packer(cg1packer):
449
447
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
448 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451
449
452 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
450 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
453 if (revlog._generaldelta and reorder is not True):
451 if (revlog._generaldelta and reorder is not True):
454 reorder = False
452 reorder = False
455 return super(cg2packer, self).group(nodelist, revlog, lookup,
453 return super(cg2packer, self).group(nodelist, revlog, lookup,
456 units=units, reorder=reorder)
454 units=units, reorder=reorder)
457
455
458 def deltaparent(self, revlog, rev, p1, p2, prev):
456 def deltaparent(self, revlog, rev, p1, p2, prev):
459 dp = revlog.deltaparent(rev)
457 dp = revlog.deltaparent(rev)
460 # avoid storing full revisions; pick prev in those cases
458 # avoid storing full revisions; pick prev in those cases
461 # also pick prev when we can't be sure remote has dp
459 # also pick prev when we can't be sure remote has dp
462 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
460 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
463 return prev
461 return prev
464 return dp
462 return dp
465
463
466 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
464 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
467 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
465 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
468
466
469 packermap = {'01': (cg1packer, cg1unpacker),
467 packermap = {'01': (cg1packer, cg1unpacker),
470 '02': (cg2packer, cg2unpacker)}
468 '02': (cg2packer, cg2unpacker)}
471
469
472 def _changegroupinfo(repo, nodes, source):
470 def _changegroupinfo(repo, nodes, source):
473 if repo.ui.verbose or source == 'bundle':
471 if repo.ui.verbose or source == 'bundle':
474 repo.ui.status(_("%d changesets found\n") % len(nodes))
472 repo.ui.status(_("%d changesets found\n") % len(nodes))
475 if repo.ui.debugflag:
473 if repo.ui.debugflag:
476 repo.ui.debug("list of changesets:\n")
474 repo.ui.debug("list of changesets:\n")
477 for node in nodes:
475 for node in nodes:
478 repo.ui.debug("%s\n" % hex(node))
476 repo.ui.debug("%s\n" % hex(node))
479
477
480 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
478 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
481 repo = repo.unfiltered()
479 repo = repo.unfiltered()
482 commonrevs = outgoing.common
480 commonrevs = outgoing.common
483 csets = outgoing.missing
481 csets = outgoing.missing
484 heads = outgoing.missingheads
482 heads = outgoing.missingheads
485 # We go through the fast path if we get told to, or if all (unfiltered
483 # We go through the fast path if we get told to, or if all (unfiltered
486 # heads have been requested (since we then know there all linkrevs will
484 # heads have been requested (since we then know there all linkrevs will
487 # be pulled by the client).
485 # be pulled by the client).
488 heads.sort()
486 heads.sort()
489 fastpathlinkrev = fastpath or (
487 fastpathlinkrev = fastpath or (
490 repo.filtername is None and heads == sorted(repo.heads()))
488 repo.filtername is None and heads == sorted(repo.heads()))
491
489
492 repo.hook('preoutgoing', throw=True, source=source)
490 repo.hook('preoutgoing', throw=True, source=source)
493 _changegroupinfo(repo, csets, source)
491 _changegroupinfo(repo, csets, source)
494 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
492 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
495
493
496 def getsubset(repo, outgoing, bundler, source, fastpath=False):
494 def getsubset(repo, outgoing, bundler, source, fastpath=False):
497 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
495 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
498 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
496 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
499
497
500 def changegroupsubset(repo, roots, heads, source):
498 def changegroupsubset(repo, roots, heads, source):
501 """Compute a changegroup consisting of all the nodes that are
499 """Compute a changegroup consisting of all the nodes that are
502 descendants of any of the roots and ancestors of any of the heads.
500 descendants of any of the roots and ancestors of any of the heads.
503 Return a chunkbuffer object whose read() method will return
501 Return a chunkbuffer object whose read() method will return
504 successive changegroup chunks.
502 successive changegroup chunks.
505
503
506 It is fairly complex as determining which filenodes and which
504 It is fairly complex as determining which filenodes and which
507 manifest nodes need to be included for the changeset to be complete
505 manifest nodes need to be included for the changeset to be complete
508 is non-trivial.
506 is non-trivial.
509
507
510 Another wrinkle is doing the reverse, figuring out which changeset in
508 Another wrinkle is doing the reverse, figuring out which changeset in
511 the changegroup a particular filenode or manifestnode belongs to.
509 the changegroup a particular filenode or manifestnode belongs to.
512 """
510 """
513 cl = repo.changelog
511 cl = repo.changelog
514 if not roots:
512 if not roots:
515 roots = [nullid]
513 roots = [nullid]
516 # TODO: remove call to nodesbetween.
514 # TODO: remove call to nodesbetween.
517 csets, roots, heads = cl.nodesbetween(roots, heads)
515 csets, roots, heads = cl.nodesbetween(roots, heads)
518 discbases = []
516 discbases = []
519 for n in roots:
517 for n in roots:
520 discbases.extend([p for p in cl.parents(n) if p != nullid])
518 discbases.extend([p for p in cl.parents(n) if p != nullid])
521 outgoing = discovery.outgoing(cl, discbases, heads)
519 outgoing = discovery.outgoing(cl, discbases, heads)
522 bundler = cg1packer(repo)
520 bundler = cg1packer(repo)
523 return getsubset(repo, outgoing, bundler, source)
521 return getsubset(repo, outgoing, bundler, source)
524
522
525 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
523 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
526 version='01'):
524 version='01'):
527 """Like getbundle, but taking a discovery.outgoing as an argument.
525 """Like getbundle, but taking a discovery.outgoing as an argument.
528
526
529 This is only implemented for local repos and reuses potentially
527 This is only implemented for local repos and reuses potentially
530 precomputed sets in outgoing. Returns a raw changegroup generator."""
528 precomputed sets in outgoing. Returns a raw changegroup generator."""
531 if not outgoing.missing:
529 if not outgoing.missing:
532 return None
530 return None
533 bundler = packermap[version][0](repo, bundlecaps)
531 bundler = packermap[version][0](repo, bundlecaps)
534 return getsubsetraw(repo, outgoing, bundler, source)
532 return getsubsetraw(repo, outgoing, bundler, source)
535
533
536 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
534 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
537 """Like getbundle, but taking a discovery.outgoing as an argument.
535 """Like getbundle, but taking a discovery.outgoing as an argument.
538
536
539 This is only implemented for local repos and reuses potentially
537 This is only implemented for local repos and reuses potentially
540 precomputed sets in outgoing."""
538 precomputed sets in outgoing."""
541 if not outgoing.missing:
539 if not outgoing.missing:
542 return None
540 return None
543 bundler = cg1packer(repo, bundlecaps)
541 bundler = cg1packer(repo, bundlecaps)
544 return getsubset(repo, outgoing, bundler, source)
542 return getsubset(repo, outgoing, bundler, source)
545
543
546 def _computeoutgoing(repo, heads, common):
544 def _computeoutgoing(repo, heads, common):
547 """Computes which revs are outgoing given a set of common
545 """Computes which revs are outgoing given a set of common
548 and a set of heads.
546 and a set of heads.
549
547
550 This is a separate function so extensions can have access to
548 This is a separate function so extensions can have access to
551 the logic.
549 the logic.
552
550
553 Returns a discovery.outgoing object.
551 Returns a discovery.outgoing object.
554 """
552 """
555 cl = repo.changelog
553 cl = repo.changelog
556 if common:
554 if common:
557 hasnode = cl.hasnode
555 hasnode = cl.hasnode
558 common = [n for n in common if hasnode(n)]
556 common = [n for n in common if hasnode(n)]
559 else:
557 else:
560 common = [nullid]
558 common = [nullid]
561 if not heads:
559 if not heads:
562 heads = cl.heads()
560 heads = cl.heads()
563 return discovery.outgoing(cl, common, heads)
561 return discovery.outgoing(cl, common, heads)
564
562
565 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
563 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
566 version='01'):
564 version='01'):
567 """Like changegroupsubset, but returns the set difference between the
565 """Like changegroupsubset, but returns the set difference between the
568 ancestors of heads and the ancestors common.
566 ancestors of heads and the ancestors common.
569
567
570 If heads is None, use the local heads. If common is None, use [nullid].
568 If heads is None, use the local heads. If common is None, use [nullid].
571
569
572 If version is None, use a version '1' changegroup.
570 If version is None, use a version '1' changegroup.
573
571
574 The nodes in common might not all be known locally due to the way the
572 The nodes in common might not all be known locally due to the way the
575 current discovery protocol works. Returns a raw changegroup generator.
573 current discovery protocol works. Returns a raw changegroup generator.
576 """
574 """
577 outgoing = _computeoutgoing(repo, heads, common)
575 outgoing = _computeoutgoing(repo, heads, common)
578 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
576 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
579 version=version)
577 version=version)
580
578
581 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
579 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
582 """Like changegroupsubset, but returns the set difference between the
580 """Like changegroupsubset, but returns the set difference between the
583 ancestors of heads and the ancestors common.
581 ancestors of heads and the ancestors common.
584
582
585 If heads is None, use the local heads. If common is None, use [nullid].
583 If heads is None, use the local heads. If common is None, use [nullid].
586
584
587 The nodes in common might not all be known locally due to the way the
585 The nodes in common might not all be known locally due to the way the
588 current discovery protocol works.
586 current discovery protocol works.
589 """
587 """
590 outgoing = _computeoutgoing(repo, heads, common)
588 outgoing = _computeoutgoing(repo, heads, common)
591 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
589 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
592
590
593 def changegroup(repo, basenodes, source):
591 def changegroup(repo, basenodes, source):
594 # to avoid a race we use changegroupsubset() (issue1320)
592 # to avoid a race we use changegroupsubset() (issue1320)
595 return changegroupsubset(repo, basenodes, repo.heads(), source)
593 return changegroupsubset(repo, basenodes, repo.heads(), source)
596
594
597 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
595 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
598 revisions = 0
596 revisions = 0
599 files = 0
597 files = 0
600 while True:
598 while True:
601 chunkdata = source.filelogheader()
599 chunkdata = source.filelogheader()
602 if not chunkdata:
600 if not chunkdata:
603 break
601 break
604 f = chunkdata["filename"]
602 f = chunkdata["filename"]
605 repo.ui.debug("adding %s revisions\n" % f)
603 repo.ui.debug("adding %s revisions\n" % f)
606 pr()
604 pr()
607 fl = repo.file(f)
605 fl = repo.file(f)
608 o = len(fl)
606 o = len(fl)
609 if not fl.addgroup(source, revmap, trp):
607 if not fl.addgroup(source, revmap, trp):
610 raise util.Abort(_("received file revlog group is empty"))
608 raise util.Abort(_("received file revlog group is empty"))
611 revisions += len(fl) - o
609 revisions += len(fl) - o
612 files += 1
610 files += 1
613 if f in needfiles:
611 if f in needfiles:
614 needs = needfiles[f]
612 needs = needfiles[f]
615 for new in xrange(o, len(fl)):
613 for new in xrange(o, len(fl)):
616 n = fl.node(new)
614 n = fl.node(new)
617 if n in needs:
615 if n in needs:
618 needs.remove(n)
616 needs.remove(n)
619 else:
617 else:
620 raise util.Abort(
618 raise util.Abort(
621 _("received spurious file revlog entry"))
619 _("received spurious file revlog entry"))
622 if not needs:
620 if not needs:
623 del needfiles[f]
621 del needfiles[f]
624 repo.ui.progress(_('files'), None)
622 repo.ui.progress(_('files'), None)
625
623
626 for f, needs in needfiles.iteritems():
624 for f, needs in needfiles.iteritems():
627 fl = repo.file(f)
625 fl = repo.file(f)
628 for n in needs:
626 for n in needs:
629 try:
627 try:
630 fl.rev(n)
628 fl.rev(n)
631 except error.LookupError:
629 except error.LookupError:
632 raise util.Abort(
630 raise util.Abort(
633 _('missing file data for %s:%s - run hg verify') %
631 _('missing file data for %s:%s - run hg verify') %
634 (f, hex(n)))
632 (f, hex(n)))
635
633
636 return revisions, files
634 return revisions, files
637
635
638 def addchangegroup(repo, source, srctype, url, emptyok=False,
636 def addchangegroup(repo, source, srctype, url, emptyok=False,
639 targetphase=phases.draft):
637 targetphase=phases.draft):
640 """Add the changegroup returned by source.read() to this repo.
638 """Add the changegroup returned by source.read() to this repo.
641 srctype is a string like 'push', 'pull', or 'unbundle'. url is
639 srctype is a string like 'push', 'pull', or 'unbundle'. url is
642 the URL of the repo where this changegroup is coming from.
640 the URL of the repo where this changegroup is coming from.
643
641
644 Return an integer summarizing the change to this repo:
642 Return an integer summarizing the change to this repo:
645 - nothing changed or no source: 0
643 - nothing changed or no source: 0
646 - more heads than before: 1+added heads (2..n)
644 - more heads than before: 1+added heads (2..n)
647 - fewer heads than before: -1-removed heads (-2..-n)
645 - fewer heads than before: -1-removed heads (-2..-n)
648 - number of heads stays the same: 1
646 - number of heads stays the same: 1
649 """
647 """
650 repo = repo.unfiltered()
648 repo = repo.unfiltered()
651 def csmap(x):
649 def csmap(x):
652 repo.ui.debug("add changeset %s\n" % short(x))
650 repo.ui.debug("add changeset %s\n" % short(x))
653 return len(cl)
651 return len(cl)
654
652
655 def revmap(x):
653 def revmap(x):
656 return cl.rev(x)
654 return cl.rev(x)
657
655
658 if not source:
656 if not source:
659 return 0
657 return 0
660
658
661 changesets = files = revisions = 0
659 changesets = files = revisions = 0
662 efiles = set()
660 efiles = set()
663
661
664 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
662 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
665 # The transaction could have been created before and already carries source
663 # The transaction could have been created before and already carries source
666 # information. In this case we use the top level data. We overwrite the
664 # information. In this case we use the top level data. We overwrite the
667 # argument because we need to use the top level value (if they exist) in
665 # argument because we need to use the top level value (if they exist) in
668 # this function.
666 # this function.
669 srctype = tr.hookargs.setdefault('source', srctype)
667 srctype = tr.hookargs.setdefault('source', srctype)
670 url = tr.hookargs.setdefault('url', url)
668 url = tr.hookargs.setdefault('url', url)
671
669
672 # write changelog data to temp files so concurrent readers will not see
670 # write changelog data to temp files so concurrent readers will not see
673 # inconsistent view
671 # inconsistent view
674 cl = repo.changelog
672 cl = repo.changelog
675 cl.delayupdate(tr)
673 cl.delayupdate(tr)
676 oldheads = cl.heads()
674 oldheads = cl.heads()
677 try:
675 try:
678 repo.hook('prechangegroup', throw=True, **tr.hookargs)
676 repo.hook('prechangegroup', throw=True, **tr.hookargs)
679
677
680 trp = weakref.proxy(tr)
678 trp = weakref.proxy(tr)
681 # pull off the changeset group
679 # pull off the changeset group
682 repo.ui.status(_("adding changesets\n"))
680 repo.ui.status(_("adding changesets\n"))
683 clstart = len(cl)
681 clstart = len(cl)
684 class prog(object):
682 class prog(object):
685 step = _('changesets')
683 step = _('changesets')
686 count = 1
684 count = 1
687 ui = repo.ui
685 ui = repo.ui
688 total = None
686 total = None
689 def __call__(repo):
687 def __call__(repo):
690 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
688 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
691 total=repo.total)
689 total=repo.total)
692 repo.count += 1
690 repo.count += 1
693 pr = prog()
691 pr = prog()
694 source.callback = pr
692 source.callback = pr
695
693
696 source.changelogheader()
694 source.changelogheader()
697 srccontent = cl.addgroup(source, csmap, trp)
695 srccontent = cl.addgroup(source, csmap, trp)
698 if not (srccontent or emptyok):
696 if not (srccontent or emptyok):
699 raise util.Abort(_("received changelog group is empty"))
697 raise util.Abort(_("received changelog group is empty"))
700 clend = len(cl)
698 clend = len(cl)
701 changesets = clend - clstart
699 changesets = clend - clstart
702 for c in xrange(clstart, clend):
700 for c in xrange(clstart, clend):
703 efiles.update(repo[c].files())
701 efiles.update(repo[c].files())
704 efiles = len(efiles)
702 efiles = len(efiles)
705 repo.ui.progress(_('changesets'), None)
703 repo.ui.progress(_('changesets'), None)
706
704
707 # pull off the manifest group
705 # pull off the manifest group
708 repo.ui.status(_("adding manifests\n"))
706 repo.ui.status(_("adding manifests\n"))
709 pr.step = _('manifests')
707 pr.step = _('manifests')
710 pr.count = 1
708 pr.count = 1
711 pr.total = changesets # manifests <= changesets
709 pr.total = changesets # manifests <= changesets
712 # no need to check for empty manifest group here:
710 # no need to check for empty manifest group here:
713 # if the result of the merge of 1 and 2 is the same in 3 and 4,
711 # if the result of the merge of 1 and 2 is the same in 3 and 4,
714 # no new manifest will be created and the manifest group will
712 # no new manifest will be created and the manifest group will
715 # be empty during the pull
713 # be empty during the pull
716 source.manifestheader()
714 source.manifestheader()
717 repo.manifest.addgroup(source, revmap, trp)
715 repo.manifest.addgroup(source, revmap, trp)
718 repo.ui.progress(_('manifests'), None)
716 repo.ui.progress(_('manifests'), None)
719
717
720 needfiles = {}
718 needfiles = {}
721 if repo.ui.configbool('server', 'validate', default=False):
719 if repo.ui.configbool('server', 'validate', default=False):
722 # validate incoming csets have their manifests
720 # validate incoming csets have their manifests
723 for cset in xrange(clstart, clend):
721 for cset in xrange(clstart, clend):
724 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
722 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
725 mfest = repo.manifest.readdelta(mfest)
723 mfest = repo.manifest.readdelta(mfest)
726 # store file nodes we must see
724 # store file nodes we must see
727 for f, n in mfest.iteritems():
725 for f, n in mfest.iteritems():
728 needfiles.setdefault(f, set()).add(n)
726 needfiles.setdefault(f, set()).add(n)
729
727
730 # process the files
728 # process the files
731 repo.ui.status(_("adding file changes\n"))
729 repo.ui.status(_("adding file changes\n"))
732 pr.step = _('files')
730 pr.step = _('files')
733 pr.count = 1
731 pr.count = 1
734 pr.total = efiles
732 pr.total = efiles
735 source.callback = None
733 source.callback = None
736
734
737 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
735 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
738 needfiles)
736 needfiles)
739 revisions += newrevs
737 revisions += newrevs
740 files += newfiles
738 files += newfiles
741
739
742 dh = 0
740 dh = 0
743 if oldheads:
741 if oldheads:
744 heads = cl.heads()
742 heads = cl.heads()
745 dh = len(heads) - len(oldheads)
743 dh = len(heads) - len(oldheads)
746 for h in heads:
744 for h in heads:
747 if h not in oldheads and repo[h].closesbranch():
745 if h not in oldheads and repo[h].closesbranch():
748 dh -= 1
746 dh -= 1
749 htext = ""
747 htext = ""
750 if dh:
748 if dh:
751 htext = _(" (%+d heads)") % dh
749 htext = _(" (%+d heads)") % dh
752
750
753 repo.ui.status(_("added %d changesets"
751 repo.ui.status(_("added %d changesets"
754 " with %d changes to %d files%s\n")
752 " with %d changes to %d files%s\n")
755 % (changesets, revisions, files, htext))
753 % (changesets, revisions, files, htext))
756 repo.invalidatevolatilesets()
754 repo.invalidatevolatilesets()
757
755
758 if changesets > 0:
756 if changesets > 0:
759 p = lambda: tr.writepending() and repo.root or ""
757 p = lambda: tr.writepending() and repo.root or ""
760 if 'node' not in tr.hookargs:
758 if 'node' not in tr.hookargs:
761 tr.hookargs['node'] = hex(cl.node(clstart))
759 tr.hookargs['node'] = hex(cl.node(clstart))
762 hookargs = dict(tr.hookargs)
760 hookargs = dict(tr.hookargs)
763 else:
761 else:
764 hookargs = dict(tr.hookargs)
762 hookargs = dict(tr.hookargs)
765 hookargs['node'] = hex(cl.node(clstart))
763 hookargs['node'] = hex(cl.node(clstart))
766 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
764 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
767
765
768 added = [cl.node(r) for r in xrange(clstart, clend)]
766 added = [cl.node(r) for r in xrange(clstart, clend)]
769 publishing = repo.ui.configbool('phases', 'publish', True)
767 publishing = repo.ui.configbool('phases', 'publish', True)
770 if srctype in ('push', 'serve'):
768 if srctype in ('push', 'serve'):
771 # Old servers can not push the boundary themselves.
769 # Old servers can not push the boundary themselves.
772 # New servers won't push the boundary if changeset already
770 # New servers won't push the boundary if changeset already
773 # exists locally as secret
771 # exists locally as secret
774 #
772 #
775 # We should not use added here but the list of all change in
773 # We should not use added here but the list of all change in
776 # the bundle
774 # the bundle
777 if publishing:
775 if publishing:
778 phases.advanceboundary(repo, tr, phases.public, srccontent)
776 phases.advanceboundary(repo, tr, phases.public, srccontent)
779 else:
777 else:
780 # Those changesets have been pushed from the outside, their
778 # Those changesets have been pushed from the outside, their
781 # phases are going to be pushed alongside. Therefor
779 # phases are going to be pushed alongside. Therefor
782 # `targetphase` is ignored.
780 # `targetphase` is ignored.
783 phases.advanceboundary(repo, tr, phases.draft, srccontent)
781 phases.advanceboundary(repo, tr, phases.draft, srccontent)
784 phases.retractboundary(repo, tr, phases.draft, added)
782 phases.retractboundary(repo, tr, phases.draft, added)
785 elif srctype != 'strip':
783 elif srctype != 'strip':
786 # publishing only alter behavior during push
784 # publishing only alter behavior during push
787 #
785 #
788 # strip should not touch boundary at all
786 # strip should not touch boundary at all
789 phases.retractboundary(repo, tr, targetphase, added)
787 phases.retractboundary(repo, tr, targetphase, added)
790
788
791 if changesets > 0:
789 if changesets > 0:
792 if srctype != 'strip':
790 if srctype != 'strip':
793 # During strip, branchcache is invalid but coming call to
791 # During strip, branchcache is invalid but coming call to
794 # `destroyed` will repair it.
792 # `destroyed` will repair it.
795 # In other case we can safely update cache on disk.
793 # In other case we can safely update cache on disk.
796 branchmap.updatecache(repo.filtered('served'))
794 branchmap.updatecache(repo.filtered('served'))
797
795
798 def runhooks():
796 def runhooks():
799 # These hooks run when the lock releases, not when the
797 # These hooks run when the lock releases, not when the
800 # transaction closes. So it's possible for the changelog
798 # transaction closes. So it's possible for the changelog
801 # to have changed since we last saw it.
799 # to have changed since we last saw it.
802 if clstart >= len(repo):
800 if clstart >= len(repo):
803 return
801 return
804
802
805 # forcefully update the on-disk branch cache
803 # forcefully update the on-disk branch cache
806 repo.ui.debug("updating the branch cache\n")
804 repo.ui.debug("updating the branch cache\n")
807 repo.hook("changegroup", **hookargs)
805 repo.hook("changegroup", **hookargs)
808
806
809 for n in added:
807 for n in added:
810 args = hookargs.copy()
808 args = hookargs.copy()
811 args['node'] = hex(n)
809 args['node'] = hex(n)
812 repo.hook("incoming", **args)
810 repo.hook("incoming", **args)
813
811
814 newheads = [h for h in repo.heads() if h not in oldheads]
812 newheads = [h for h in repo.heads() if h not in oldheads]
815 repo.ui.log("incoming",
813 repo.ui.log("incoming",
816 "%s incoming changes - new heads: %s\n",
814 "%s incoming changes - new heads: %s\n",
817 len(added),
815 len(added),
818 ', '.join([hex(c[:6]) for c in newheads]))
816 ', '.join([hex(c[:6]) for c in newheads]))
819
817
820 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
818 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
821 lambda: repo._afterlock(runhooks))
819 lambda: repo._afterlock(runhooks))
822
820
823 tr.close()
821 tr.close()
824
822
825 finally:
823 finally:
826 tr.release()
824 tr.release()
827 # never return 0 here:
825 # never return 0 here:
828 if dh < 0:
826 if dh < 0:
829 return dh - 1
827 return dh - 1
830 else:
828 else:
831 return dh + 1
829 return dh + 1
@@ -1,1107 +1,1107 b''
1 #
1 #
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os, mimetypes, re, cgi, copy
8 import os, mimetypes, re, cgi, copy
9 import webutil
9 import webutil
10 from mercurial import error, encoding, archival, templater, templatefilters
10 from mercurial import error, encoding, archival, templater, templatefilters
11 from mercurial.node import short, hex
11 from mercurial.node import short, hex
12 from mercurial import util
12 from mercurial import util
13 from common import paritygen, staticfile, get_contact, ErrorResponse
13 from common import paritygen, staticfile, get_contact, ErrorResponse
14 from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
14 from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
15 from mercurial import graphmod, patch
15 from mercurial import graphmod, patch
16 from mercurial import help as helpmod
16 from mercurial import help as helpmod
17 from mercurial import scmutil
17 from mercurial import scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.error import ParseError, RepoLookupError, Abort
19 from mercurial.error import ParseError, RepoLookupError, Abort
20 from mercurial import revset
20 from mercurial import revset
21
21
22 # __all__ is populated with the allowed commands. Be sure to add to it if
22 # __all__ is populated with the allowed commands. Be sure to add to it if
23 # you're adding a new command, or the new command won't work.
23 # you're adding a new command, or the new command won't work.
24
24
25 __all__ = [
25 __all__ = [
26 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev',
26 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev',
27 'manifest', 'tags', 'bookmarks', 'branches', 'summary', 'filediff', 'diff',
27 'manifest', 'tags', 'bookmarks', 'branches', 'summary', 'filediff', 'diff',
28 'comparison', 'annotate', 'filelog', 'archive', 'static', 'graph', 'help',
28 'comparison', 'annotate', 'filelog', 'archive', 'static', 'graph', 'help',
29 ]
29 ]
30
30
31 def log(web, req, tmpl):
31 def log(web, req, tmpl):
32 if 'file' in req.form and req.form['file'][0]:
32 if 'file' in req.form and req.form['file'][0]:
33 return filelog(web, req, tmpl)
33 return filelog(web, req, tmpl)
34 else:
34 else:
35 return changelog(web, req, tmpl)
35 return changelog(web, req, tmpl)
36
36
37 def rawfile(web, req, tmpl):
37 def rawfile(web, req, tmpl):
38 guessmime = web.configbool('web', 'guessmime', False)
38 guessmime = web.configbool('web', 'guessmime', False)
39
39
40 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
40 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
41 if not path:
41 if not path:
42 content = manifest(web, req, tmpl)
42 content = manifest(web, req, tmpl)
43 req.respond(HTTP_OK, web.ctype)
43 req.respond(HTTP_OK, web.ctype)
44 return content
44 return content
45
45
46 try:
46 try:
47 fctx = webutil.filectx(web.repo, req)
47 fctx = webutil.filectx(web.repo, req)
48 except error.LookupError, inst:
48 except error.LookupError, inst:
49 try:
49 try:
50 content = manifest(web, req, tmpl)
50 content = manifest(web, req, tmpl)
51 req.respond(HTTP_OK, web.ctype)
51 req.respond(HTTP_OK, web.ctype)
52 return content
52 return content
53 except ErrorResponse:
53 except ErrorResponse:
54 raise inst
54 raise inst
55
55
56 path = fctx.path()
56 path = fctx.path()
57 text = fctx.data()
57 text = fctx.data()
58 mt = 'application/binary'
58 mt = 'application/binary'
59 if guessmime:
59 if guessmime:
60 mt = mimetypes.guess_type(path)[0]
60 mt = mimetypes.guess_type(path)[0]
61 if mt is None:
61 if mt is None:
62 mt = util.binary(text) and 'application/binary' or 'text/plain'
62 mt = util.binary(text) and 'application/binary' or 'text/plain'
63 if mt.startswith('text/'):
63 if mt.startswith('text/'):
64 mt += '; charset="%s"' % encoding.encoding
64 mt += '; charset="%s"' % encoding.encoding
65
65
66 req.respond(HTTP_OK, mt, path, body=text)
66 req.respond(HTTP_OK, mt, path, body=text)
67 return []
67 return []
68
68
69 def _filerevision(web, tmpl, fctx):
69 def _filerevision(web, tmpl, fctx):
70 f = fctx.path()
70 f = fctx.path()
71 text = fctx.data()
71 text = fctx.data()
72 parity = paritygen(web.stripecount)
72 parity = paritygen(web.stripecount)
73
73
74 if util.binary(text):
74 if util.binary(text):
75 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
75 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
76 text = '(binary:%s)' % mt
76 text = '(binary:%s)' % mt
77
77
78 def lines():
78 def lines():
79 for lineno, t in enumerate(text.splitlines(True)):
79 for lineno, t in enumerate(text.splitlines(True)):
80 yield {"line": t,
80 yield {"line": t,
81 "lineid": "l%d" % (lineno + 1),
81 "lineid": "l%d" % (lineno + 1),
82 "linenumber": "% 6d" % (lineno + 1),
82 "linenumber": "% 6d" % (lineno + 1),
83 "parity": parity.next()}
83 "parity": parity.next()}
84
84
85 return tmpl("filerevision",
85 return tmpl("filerevision",
86 file=f,
86 file=f,
87 path=webutil.up(f),
87 path=webutil.up(f),
88 text=lines(),
88 text=lines(),
89 rev=fctx.rev(),
89 rev=fctx.rev(),
90 node=fctx.hex(),
90 node=fctx.hex(),
91 author=fctx.user(),
91 author=fctx.user(),
92 date=fctx.date(),
92 date=fctx.date(),
93 desc=fctx.description(),
93 desc=fctx.description(),
94 extra=fctx.extra(),
94 extra=fctx.extra(),
95 branch=webutil.nodebranchnodefault(fctx),
95 branch=webutil.nodebranchnodefault(fctx),
96 parent=webutil.parents(fctx),
96 parent=webutil.parents(fctx),
97 child=webutil.children(fctx),
97 child=webutil.children(fctx),
98 rename=webutil.renamelink(fctx),
98 rename=webutil.renamelink(fctx),
99 permissions=fctx.manifest().flags(f))
99 permissions=fctx.manifest().flags(f))
100
100
101 def file(web, req, tmpl):
101 def file(web, req, tmpl):
102 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
102 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
103 if not path:
103 if not path:
104 return manifest(web, req, tmpl)
104 return manifest(web, req, tmpl)
105 try:
105 try:
106 return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
106 return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
107 except error.LookupError, inst:
107 except error.LookupError, inst:
108 try:
108 try:
109 return manifest(web, req, tmpl)
109 return manifest(web, req, tmpl)
110 except ErrorResponse:
110 except ErrorResponse:
111 raise inst
111 raise inst
112
112
113 def _search(web, req, tmpl):
113 def _search(web, req, tmpl):
114 MODE_REVISION = 'rev'
114 MODE_REVISION = 'rev'
115 MODE_KEYWORD = 'keyword'
115 MODE_KEYWORD = 'keyword'
116 MODE_REVSET = 'revset'
116 MODE_REVSET = 'revset'
117
117
118 def revsearch(ctx):
118 def revsearch(ctx):
119 yield ctx
119 yield ctx
120
120
121 def keywordsearch(query):
121 def keywordsearch(query):
122 lower = encoding.lower
122 lower = encoding.lower
123 qw = lower(query).split()
123 qw = lower(query).split()
124
124
125 def revgen():
125 def revgen():
126 cl = web.repo.changelog
126 cl = web.repo.changelog
127 for i in xrange(len(web.repo) - 1, 0, -100):
127 for i in xrange(len(web.repo) - 1, 0, -100):
128 l = []
128 l = []
129 for j in cl.revs(max(0, i - 99), i):
129 for j in cl.revs(max(0, i - 99), i):
130 ctx = web.repo[j]
130 ctx = web.repo[j]
131 l.append(ctx)
131 l.append(ctx)
132 l.reverse()
132 l.reverse()
133 for e in l:
133 for e in l:
134 yield e
134 yield e
135
135
136 for ctx in revgen():
136 for ctx in revgen():
137 miss = 0
137 miss = 0
138 for q in qw:
138 for q in qw:
139 if not (q in lower(ctx.user()) or
139 if not (q in lower(ctx.user()) or
140 q in lower(ctx.description()) or
140 q in lower(ctx.description()) or
141 q in lower(" ".join(ctx.files()))):
141 q in lower(" ".join(ctx.files()))):
142 miss = 1
142 miss = 1
143 break
143 break
144 if miss:
144 if miss:
145 continue
145 continue
146
146
147 yield ctx
147 yield ctx
148
148
149 def revsetsearch(revs):
149 def revsetsearch(revs):
150 for r in revs:
150 for r in revs:
151 yield web.repo[r]
151 yield web.repo[r]
152
152
153 searchfuncs = {
153 searchfuncs = {
154 MODE_REVISION: (revsearch, 'exact revision search'),
154 MODE_REVISION: (revsearch, 'exact revision search'),
155 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
155 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
156 MODE_REVSET: (revsetsearch, 'revset expression search'),
156 MODE_REVSET: (revsetsearch, 'revset expression search'),
157 }
157 }
158
158
159 def getsearchmode(query):
159 def getsearchmode(query):
160 try:
160 try:
161 ctx = web.repo[query]
161 ctx = web.repo[query]
162 except (error.RepoError, error.LookupError):
162 except (error.RepoError, error.LookupError):
163 # query is not an exact revision pointer, need to
163 # query is not an exact revision pointer, need to
164 # decide if it's a revset expression or keywords
164 # decide if it's a revset expression or keywords
165 pass
165 pass
166 else:
166 else:
167 return MODE_REVISION, ctx
167 return MODE_REVISION, ctx
168
168
169 revdef = 'reverse(%s)' % query
169 revdef = 'reverse(%s)' % query
170 try:
170 try:
171 tree, pos = revset.parse(revdef)
171 tree, pos = revset.parse(revdef)
172 except ParseError:
172 except ParseError:
173 # can't parse to a revset tree
173 # can't parse to a revset tree
174 return MODE_KEYWORD, query
174 return MODE_KEYWORD, query
175
175
176 if revset.depth(tree) <= 2:
176 if revset.depth(tree) <= 2:
177 # no revset syntax used
177 # no revset syntax used
178 return MODE_KEYWORD, query
178 return MODE_KEYWORD, query
179
179
180 if util.any((token, (value or '')[:3]) == ('string', 're:')
180 if util.any((token, (value or '')[:3]) == ('string', 're:')
181 for token, value, pos in revset.tokenize(revdef)):
181 for token, value, pos in revset.tokenize(revdef)):
182 return MODE_KEYWORD, query
182 return MODE_KEYWORD, query
183
183
184 funcsused = revset.funcsused(tree)
184 funcsused = revset.funcsused(tree)
185 if not funcsused.issubset(revset.safesymbols):
185 if not funcsused.issubset(revset.safesymbols):
186 return MODE_KEYWORD, query
186 return MODE_KEYWORD, query
187
187
188 mfunc = revset.match(web.repo.ui, revdef)
188 mfunc = revset.match(web.repo.ui, revdef)
189 try:
189 try:
190 revs = mfunc(web.repo, revset.baseset(web.repo))
190 revs = mfunc(web.repo, revset.baseset(web.repo))
191 return MODE_REVSET, revs
191 return MODE_REVSET, revs
192 # ParseError: wrongly placed tokens, wrongs arguments, etc
192 # ParseError: wrongly placed tokens, wrongs arguments, etc
193 # RepoLookupError: no such revision, e.g. in 'revision:'
193 # RepoLookupError: no such revision, e.g. in 'revision:'
194 # Abort: bookmark/tag not exists
194 # Abort: bookmark/tag not exists
195 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
195 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
196 except (ParseError, RepoLookupError, Abort, LookupError):
196 except (ParseError, RepoLookupError, Abort, LookupError):
197 return MODE_KEYWORD, query
197 return MODE_KEYWORD, query
198
198
199 def changelist(**map):
199 def changelist(**map):
200 count = 0
200 count = 0
201
201
202 for ctx in searchfunc[0](funcarg):
202 for ctx in searchfunc[0](funcarg):
203 count += 1
203 count += 1
204 n = ctx.node()
204 n = ctx.node()
205 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
205 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
206 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
206 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
207
207
208 yield tmpl('searchentry',
208 yield tmpl('searchentry',
209 parity=parity.next(),
209 parity=parity.next(),
210 author=ctx.user(),
210 author=ctx.user(),
211 parent=webutil.parents(ctx),
211 parent=webutil.parents(ctx),
212 child=webutil.children(ctx),
212 child=webutil.children(ctx),
213 changelogtag=showtags,
213 changelogtag=showtags,
214 desc=ctx.description(),
214 desc=ctx.description(),
215 extra=ctx.extra(),
215 extra=ctx.extra(),
216 date=ctx.date(),
216 date=ctx.date(),
217 files=files,
217 files=files,
218 rev=ctx.rev(),
218 rev=ctx.rev(),
219 node=hex(n),
219 node=hex(n),
220 tags=webutil.nodetagsdict(web.repo, n),
220 tags=webutil.nodetagsdict(web.repo, n),
221 bookmarks=webutil.nodebookmarksdict(web.repo, n),
221 bookmarks=webutil.nodebookmarksdict(web.repo, n),
222 inbranch=webutil.nodeinbranch(web.repo, ctx),
222 inbranch=webutil.nodeinbranch(web.repo, ctx),
223 branches=webutil.nodebranchdict(web.repo, ctx))
223 branches=webutil.nodebranchdict(web.repo, ctx))
224
224
225 if count >= revcount:
225 if count >= revcount:
226 break
226 break
227
227
228 query = req.form['rev'][0]
228 query = req.form['rev'][0]
229 revcount = web.maxchanges
229 revcount = web.maxchanges
230 if 'revcount' in req.form:
230 if 'revcount' in req.form:
231 try:
231 try:
232 revcount = int(req.form.get('revcount', [revcount])[0])
232 revcount = int(req.form.get('revcount', [revcount])[0])
233 revcount = max(revcount, 1)
233 revcount = max(revcount, 1)
234 tmpl.defaults['sessionvars']['revcount'] = revcount
234 tmpl.defaults['sessionvars']['revcount'] = revcount
235 except ValueError:
235 except ValueError:
236 pass
236 pass
237
237
238 lessvars = copy.copy(tmpl.defaults['sessionvars'])
238 lessvars = copy.copy(tmpl.defaults['sessionvars'])
239 lessvars['revcount'] = max(revcount / 2, 1)
239 lessvars['revcount'] = max(revcount / 2, 1)
240 lessvars['rev'] = query
240 lessvars['rev'] = query
241 morevars = copy.copy(tmpl.defaults['sessionvars'])
241 morevars = copy.copy(tmpl.defaults['sessionvars'])
242 morevars['revcount'] = revcount * 2
242 morevars['revcount'] = revcount * 2
243 morevars['rev'] = query
243 morevars['rev'] = query
244
244
245 mode, funcarg = getsearchmode(query)
245 mode, funcarg = getsearchmode(query)
246
246
247 if 'forcekw' in req.form:
247 if 'forcekw' in req.form:
248 showforcekw = ''
248 showforcekw = ''
249 showunforcekw = searchfuncs[mode][1]
249 showunforcekw = searchfuncs[mode][1]
250 mode = MODE_KEYWORD
250 mode = MODE_KEYWORD
251 funcarg = query
251 funcarg = query
252 else:
252 else:
253 if mode != MODE_KEYWORD:
253 if mode != MODE_KEYWORD:
254 showforcekw = searchfuncs[MODE_KEYWORD][1]
254 showforcekw = searchfuncs[MODE_KEYWORD][1]
255 else:
255 else:
256 showforcekw = ''
256 showforcekw = ''
257 showunforcekw = ''
257 showunforcekw = ''
258
258
259 searchfunc = searchfuncs[mode]
259 searchfunc = searchfuncs[mode]
260
260
261 tip = web.repo['tip']
261 tip = web.repo['tip']
262 parity = paritygen(web.stripecount)
262 parity = paritygen(web.stripecount)
263
263
264 return tmpl('search', query=query, node=tip.hex(),
264 return tmpl('search', query=query, node=tip.hex(),
265 entries=changelist, archives=web.archivelist("tip"),
265 entries=changelist, archives=web.archivelist("tip"),
266 morevars=morevars, lessvars=lessvars,
266 morevars=morevars, lessvars=lessvars,
267 modedesc=searchfunc[1],
267 modedesc=searchfunc[1],
268 showforcekw=showforcekw, showunforcekw=showunforcekw)
268 showforcekw=showforcekw, showunforcekw=showunforcekw)
269
269
270 def changelog(web, req, tmpl, shortlog=False):
270 def changelog(web, req, tmpl, shortlog=False):
271
271
272 query = ''
272 query = ''
273 if 'node' in req.form:
273 if 'node' in req.form:
274 ctx = webutil.changectx(web.repo, req)
274 ctx = webutil.changectx(web.repo, req)
275 elif 'rev' in req.form:
275 elif 'rev' in req.form:
276 return _search(web, req, tmpl)
276 return _search(web, req, tmpl)
277 else:
277 else:
278 ctx = web.repo['tip']
278 ctx = web.repo['tip']
279
279
280 def changelist():
280 def changelist():
281 revs = []
281 revs = []
282 if pos != -1:
282 if pos != -1:
283 revs = web.repo.changelog.revs(pos, 0)
283 revs = web.repo.changelog.revs(pos, 0)
284 curcount = 0
284 curcount = 0
285 for i in revs:
285 for i in revs:
286 ctx = web.repo[i]
286 ctx = web.repo[i]
287 n = ctx.node()
287 n = ctx.node()
288 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
288 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
289 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
289 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
290
290
291 curcount += 1
291 curcount += 1
292 if curcount > revcount + 1:
292 if curcount > revcount + 1:
293 break
293 break
294 yield {"parity": parity.next(),
294 yield {"parity": parity.next(),
295 "author": ctx.user(),
295 "author": ctx.user(),
296 "parent": webutil.parents(ctx, i - 1),
296 "parent": webutil.parents(ctx, i - 1),
297 "child": webutil.children(ctx, i + 1),
297 "child": webutil.children(ctx, i + 1),
298 "changelogtag": showtags,
298 "changelogtag": showtags,
299 "desc": ctx.description(),
299 "desc": ctx.description(),
300 "extra": ctx.extra(),
300 "extra": ctx.extra(),
301 "date": ctx.date(),
301 "date": ctx.date(),
302 "files": files,
302 "files": files,
303 "rev": i,
303 "rev": i,
304 "node": hex(n),
304 "node": hex(n),
305 "tags": webutil.nodetagsdict(web.repo, n),
305 "tags": webutil.nodetagsdict(web.repo, n),
306 "bookmarks": webutil.nodebookmarksdict(web.repo, n),
306 "bookmarks": webutil.nodebookmarksdict(web.repo, n),
307 "inbranch": webutil.nodeinbranch(web.repo, ctx),
307 "inbranch": webutil.nodeinbranch(web.repo, ctx),
308 "branches": webutil.nodebranchdict(web.repo, ctx)
308 "branches": webutil.nodebranchdict(web.repo, ctx)
309 }
309 }
310
310
311 revcount = shortlog and web.maxshortchanges or web.maxchanges
311 revcount = shortlog and web.maxshortchanges or web.maxchanges
312 if 'revcount' in req.form:
312 if 'revcount' in req.form:
313 try:
313 try:
314 revcount = int(req.form.get('revcount', [revcount])[0])
314 revcount = int(req.form.get('revcount', [revcount])[0])
315 revcount = max(revcount, 1)
315 revcount = max(revcount, 1)
316 tmpl.defaults['sessionvars']['revcount'] = revcount
316 tmpl.defaults['sessionvars']['revcount'] = revcount
317 except ValueError:
317 except ValueError:
318 pass
318 pass
319
319
320 lessvars = copy.copy(tmpl.defaults['sessionvars'])
320 lessvars = copy.copy(tmpl.defaults['sessionvars'])
321 lessvars['revcount'] = max(revcount / 2, 1)
321 lessvars['revcount'] = max(revcount / 2, 1)
322 morevars = copy.copy(tmpl.defaults['sessionvars'])
322 morevars = copy.copy(tmpl.defaults['sessionvars'])
323 morevars['revcount'] = revcount * 2
323 morevars['revcount'] = revcount * 2
324
324
325 count = len(web.repo)
325 count = len(web.repo)
326 pos = ctx.rev()
326 pos = ctx.rev()
327 parity = paritygen(web.stripecount)
327 parity = paritygen(web.stripecount)
328
328
329 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
329 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
330
330
331 entries = list(changelist())
331 entries = list(changelist())
332 latestentry = entries[:1]
332 latestentry = entries[:1]
333 if len(entries) > revcount:
333 if len(entries) > revcount:
334 nextentry = entries[-1:]
334 nextentry = entries[-1:]
335 entries = entries[:-1]
335 entries = entries[:-1]
336 else:
336 else:
337 nextentry = []
337 nextentry = []
338
338
339 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
339 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
340 node=ctx.hex(), rev=pos, changesets=count,
340 node=ctx.hex(), rev=pos, changesets=count,
341 entries=entries,
341 entries=entries,
342 latestentry=latestentry, nextentry=nextentry,
342 latestentry=latestentry, nextentry=nextentry,
343 archives=web.archivelist("tip"), revcount=revcount,
343 archives=web.archivelist("tip"), revcount=revcount,
344 morevars=morevars, lessvars=lessvars, query=query)
344 morevars=morevars, lessvars=lessvars, query=query)
345
345
346 def shortlog(web, req, tmpl):
346 def shortlog(web, req, tmpl):
347 return changelog(web, req, tmpl, shortlog=True)
347 return changelog(web, req, tmpl, shortlog=True)
348
348
349 def changeset(web, req, tmpl):
349 def changeset(web, req, tmpl):
350 ctx = webutil.changectx(web.repo, req)
350 ctx = webutil.changectx(web.repo, req)
351 basectx = webutil.basechangectx(web.repo, req)
351 basectx = webutil.basechangectx(web.repo, req)
352 if basectx is None:
352 if basectx is None:
353 basectx = ctx.p1()
353 basectx = ctx.p1()
354 showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
354 showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
355 showbookmarks = webutil.showbookmark(web.repo, tmpl, 'changesetbookmark',
355 showbookmarks = webutil.showbookmark(web.repo, tmpl, 'changesetbookmark',
356 ctx.node())
356 ctx.node())
357 showbranch = webutil.nodebranchnodefault(ctx)
357 showbranch = webutil.nodebranchnodefault(ctx)
358
358
359 files = []
359 files = []
360 parity = paritygen(web.stripecount)
360 parity = paritygen(web.stripecount)
361 for blockno, f in enumerate(ctx.files()):
361 for blockno, f in enumerate(ctx.files()):
362 template = f in ctx and 'filenodelink' or 'filenolink'
362 template = f in ctx and 'filenodelink' or 'filenolink'
363 files.append(tmpl(template,
363 files.append(tmpl(template,
364 node=ctx.hex(), file=f, blockno=blockno + 1,
364 node=ctx.hex(), file=f, blockno=blockno + 1,
365 parity=parity.next()))
365 parity=parity.next()))
366
366
367 style = web.config('web', 'style', 'paper')
367 style = web.config('web', 'style', 'paper')
368 if 'style' in req.form:
368 if 'style' in req.form:
369 style = req.form['style'][0]
369 style = req.form['style'][0]
370
370
371 parity = paritygen(web.stripecount)
371 parity = paritygen(web.stripecount)
372 diffs = webutil.diffs(web.repo, tmpl, ctx, basectx, None, parity, style)
372 diffs = webutil.diffs(web.repo, tmpl, ctx, basectx, None, parity, style)
373
373
374 parity = paritygen(web.stripecount)
374 parity = paritygen(web.stripecount)
375 diffstatgen = webutil.diffstatgen(ctx, basectx)
375 diffstatgen = webutil.diffstatgen(ctx, basectx)
376 diffstat = webutil.diffstat(tmpl, ctx, diffstatgen, parity)
376 diffstat = webutil.diffstat(tmpl, ctx, diffstatgen, parity)
377
377
378 return tmpl('changeset',
378 return tmpl('changeset',
379 diff=diffs,
379 diff=diffs,
380 rev=ctx.rev(),
380 rev=ctx.rev(),
381 node=ctx.hex(),
381 node=ctx.hex(),
382 parent=webutil.parents(ctx),
382 parent=webutil.parents(ctx),
383 child=webutil.children(ctx),
383 child=webutil.children(ctx),
384 basenode=basectx.hex(),
384 basenode=basectx.hex(),
385 changesettag=showtags,
385 changesettag=showtags,
386 changesetbookmark=showbookmarks,
386 changesetbookmark=showbookmarks,
387 changesetbranch=showbranch,
387 changesetbranch=showbranch,
388 author=ctx.user(),
388 author=ctx.user(),
389 desc=ctx.description(),
389 desc=ctx.description(),
390 extra=ctx.extra(),
390 extra=ctx.extra(),
391 date=ctx.date(),
391 date=ctx.date(),
392 files=files,
392 files=files,
393 diffsummary=lambda **x: webutil.diffsummary(diffstatgen),
393 diffsummary=lambda **x: webutil.diffsummary(diffstatgen),
394 diffstat=diffstat,
394 diffstat=diffstat,
395 archives=web.archivelist(ctx.hex()),
395 archives=web.archivelist(ctx.hex()),
396 tags=webutil.nodetagsdict(web.repo, ctx.node()),
396 tags=webutil.nodetagsdict(web.repo, ctx.node()),
397 bookmarks=webutil.nodebookmarksdict(web.repo, ctx.node()),
397 bookmarks=webutil.nodebookmarksdict(web.repo, ctx.node()),
398 branch=webutil.nodebranchnodefault(ctx),
398 branch=webutil.nodebranchnodefault(ctx),
399 inbranch=webutil.nodeinbranch(web.repo, ctx),
399 inbranch=webutil.nodeinbranch(web.repo, ctx),
400 branches=webutil.nodebranchdict(web.repo, ctx))
400 branches=webutil.nodebranchdict(web.repo, ctx))
401
401
402 rev = changeset
402 rev = changeset
403
403
404 def decodepath(path):
404 def decodepath(path):
405 """Hook for mapping a path in the repository to a path in the
405 """Hook for mapping a path in the repository to a path in the
406 working copy.
406 working copy.
407
407
408 Extensions (e.g., largefiles) can override this to remap files in
408 Extensions (e.g., largefiles) can override this to remap files in
409 the virtual file system presented by the manifest command below."""
409 the virtual file system presented by the manifest command below."""
410 return path
410 return path
411
411
412 def manifest(web, req, tmpl):
412 def manifest(web, req, tmpl):
413 ctx = webutil.changectx(web.repo, req)
413 ctx = webutil.changectx(web.repo, req)
414 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
414 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
415 mf = ctx.manifest()
415 mf = ctx.manifest()
416 node = ctx.node()
416 node = ctx.node()
417
417
418 files = {}
418 files = {}
419 dirs = {}
419 dirs = {}
420 parity = paritygen(web.stripecount)
420 parity = paritygen(web.stripecount)
421
421
422 if path and path[-1] != "/":
422 if path and path[-1] != "/":
423 path += "/"
423 path += "/"
424 l = len(path)
424 l = len(path)
425 abspath = "/" + path
425 abspath = "/" + path
426
426
427 for full, n in mf.iteritems():
427 for full, n in mf.iteritems():
428 # the virtual path (working copy path) used for the full
428 # the virtual path (working copy path) used for the full
429 # (repository) path
429 # (repository) path
430 f = decodepath(full)
430 f = decodepath(full)
431
431
432 if f[:l] != path:
432 if f[:l] != path:
433 continue
433 continue
434 remain = f[l:]
434 remain = f[l:]
435 elements = remain.split('/')
435 elements = remain.split('/')
436 if len(elements) == 1:
436 if len(elements) == 1:
437 files[remain] = full
437 files[remain] = full
438 else:
438 else:
439 h = dirs # need to retain ref to dirs (root)
439 h = dirs # need to retain ref to dirs (root)
440 for elem in elements[0:-1]:
440 for elem in elements[0:-1]:
441 if elem not in h:
441 if elem not in h:
442 h[elem] = {}
442 h[elem] = {}
443 h = h[elem]
443 h = h[elem]
444 if len(h) > 1:
444 if len(h) > 1:
445 break
445 break
446 h[None] = None # denotes files present
446 h[None] = None # denotes files present
447
447
448 if mf and not files and not dirs:
448 if mf and not files and not dirs:
449 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
449 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
450
450
451 def filelist(**map):
451 def filelist(**map):
452 for f in sorted(files):
452 for f in sorted(files):
453 full = files[f]
453 full = files[f]
454
454
455 fctx = ctx.filectx(full)
455 fctx = ctx.filectx(full)
456 yield {"file": full,
456 yield {"file": full,
457 "parity": parity.next(),
457 "parity": parity.next(),
458 "basename": f,
458 "basename": f,
459 "date": fctx.date(),
459 "date": fctx.date(),
460 "size": fctx.size(),
460 "size": fctx.size(),
461 "permissions": mf.flags(full)}
461 "permissions": mf.flags(full)}
462
462
463 def dirlist(**map):
463 def dirlist(**map):
464 for d in sorted(dirs):
464 for d in sorted(dirs):
465
465
466 emptydirs = []
466 emptydirs = []
467 h = dirs[d]
467 h = dirs[d]
468 while isinstance(h, dict) and len(h) == 1:
468 while isinstance(h, dict) and len(h) == 1:
469 k, v = h.items()[0]
469 k, v = h.items()[0]
470 if v:
470 if v:
471 emptydirs.append(k)
471 emptydirs.append(k)
472 h = v
472 h = v
473
473
474 path = "%s%s" % (abspath, d)
474 path = "%s%s" % (abspath, d)
475 yield {"parity": parity.next(),
475 yield {"parity": parity.next(),
476 "path": path,
476 "path": path,
477 "emptydirs": "/".join(emptydirs),
477 "emptydirs": "/".join(emptydirs),
478 "basename": d}
478 "basename": d}
479
479
480 return tmpl("manifest",
480 return tmpl("manifest",
481 rev=ctx.rev(),
481 rev=ctx.rev(),
482 node=hex(node),
482 node=hex(node),
483 path=abspath,
483 path=abspath,
484 up=webutil.up(abspath),
484 up=webutil.up(abspath),
485 upparity=parity.next(),
485 upparity=parity.next(),
486 fentries=filelist,
486 fentries=filelist,
487 dentries=dirlist,
487 dentries=dirlist,
488 archives=web.archivelist(hex(node)),
488 archives=web.archivelist(hex(node)),
489 tags=webutil.nodetagsdict(web.repo, node),
489 tags=webutil.nodetagsdict(web.repo, node),
490 bookmarks=webutil.nodebookmarksdict(web.repo, node),
490 bookmarks=webutil.nodebookmarksdict(web.repo, node),
491 inbranch=webutil.nodeinbranch(web.repo, ctx),
491 inbranch=webutil.nodeinbranch(web.repo, ctx),
492 branches=webutil.nodebranchdict(web.repo, ctx))
492 branches=webutil.nodebranchdict(web.repo, ctx))
493
493
494 def tags(web, req, tmpl):
494 def tags(web, req, tmpl):
495 i = list(reversed(web.repo.tagslist()))
495 i = list(reversed(web.repo.tagslist()))
496 parity = paritygen(web.stripecount)
496 parity = paritygen(web.stripecount)
497
497
498 def entries(notip, latestonly, **map):
498 def entries(notip, latestonly, **map):
499 t = i
499 t = i
500 if notip:
500 if notip:
501 t = [(k, n) for k, n in i if k != "tip"]
501 t = [(k, n) for k, n in i if k != "tip"]
502 if latestonly:
502 if latestonly:
503 t = t[:1]
503 t = t[:1]
504 for k, n in t:
504 for k, n in t:
505 yield {"parity": parity.next(),
505 yield {"parity": parity.next(),
506 "tag": k,
506 "tag": k,
507 "date": web.repo[n].date(),
507 "date": web.repo[n].date(),
508 "node": hex(n)}
508 "node": hex(n)}
509
509
510 return tmpl("tags",
510 return tmpl("tags",
511 node=hex(web.repo.changelog.tip()),
511 node=hex(web.repo.changelog.tip()),
512 entries=lambda **x: entries(False, False, **x),
512 entries=lambda **x: entries(False, False, **x),
513 entriesnotip=lambda **x: entries(True, False, **x),
513 entriesnotip=lambda **x: entries(True, False, **x),
514 latestentry=lambda **x: entries(True, True, **x))
514 latestentry=lambda **x: entries(True, True, **x))
515
515
516 def bookmarks(web, req, tmpl):
516 def bookmarks(web, req, tmpl):
517 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
517 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
518 parity = paritygen(web.stripecount)
518 parity = paritygen(web.stripecount)
519
519
520 def entries(latestonly, **map):
520 def entries(latestonly, **map):
521 if latestonly:
521 if latestonly:
522 t = [min(i)]
522 t = [min(i)]
523 else:
523 else:
524 t = sorted(i)
524 t = sorted(i)
525 for k, n in t:
525 for k, n in t:
526 yield {"parity": parity.next(),
526 yield {"parity": parity.next(),
527 "bookmark": k,
527 "bookmark": k,
528 "date": web.repo[n].date(),
528 "date": web.repo[n].date(),
529 "node": hex(n)}
529 "node": hex(n)}
530
530
531 return tmpl("bookmarks",
531 return tmpl("bookmarks",
532 node=hex(web.repo.changelog.tip()),
532 node=hex(web.repo.changelog.tip()),
533 entries=lambda **x: entries(latestonly=False, **x),
533 entries=lambda **x: entries(latestonly=False, **x),
534 latestentry=lambda **x: entries(latestonly=True, **x))
534 latestentry=lambda **x: entries(latestonly=True, **x))
535
535
536 def branches(web, req, tmpl):
536 def branches(web, req, tmpl):
537 tips = []
537 tips = []
538 heads = web.repo.heads()
538 heads = web.repo.heads()
539 parity = paritygen(web.stripecount)
539 parity = paritygen(web.stripecount)
540 sortkey = lambda item: (not item[1], item[0].rev())
540 sortkey = lambda item: (not item[1], item[0].rev())
541
541
542 def entries(limit, **map):
542 def entries(limit, **map):
543 count = 0
543 count = 0
544 if not tips:
544 if not tips:
545 for tag, hs, tip, closed in web.repo.branchmap().iterbranches():
545 for tag, hs, tip, closed in web.repo.branchmap().iterbranches():
546 tips.append((web.repo[tip], closed))
546 tips.append((web.repo[tip], closed))
547 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
547 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
548 if limit > 0 and count >= limit:
548 if limit > 0 and count >= limit:
549 return
549 return
550 count += 1
550 count += 1
551 if closed:
551 if closed:
552 status = 'closed'
552 status = 'closed'
553 elif ctx.node() not in heads:
553 elif ctx.node() not in heads:
554 status = 'inactive'
554 status = 'inactive'
555 else:
555 else:
556 status = 'open'
556 status = 'open'
557 yield {'parity': parity.next(),
557 yield {'parity': parity.next(),
558 'branch': ctx.branch(),
558 'branch': ctx.branch(),
559 'status': status,
559 'status': status,
560 'node': ctx.hex(),
560 'node': ctx.hex(),
561 'date': ctx.date()}
561 'date': ctx.date()}
562
562
563 return tmpl('branches', node=hex(web.repo.changelog.tip()),
563 return tmpl('branches', node=hex(web.repo.changelog.tip()),
564 entries=lambda **x: entries(0, **x),
564 entries=lambda **x: entries(0, **x),
565 latestentry=lambda **x: entries(1, **x))
565 latestentry=lambda **x: entries(1, **x))
566
566
567 def summary(web, req, tmpl):
567 def summary(web, req, tmpl):
568 i = reversed(web.repo.tagslist())
568 i = reversed(web.repo.tagslist())
569
569
570 def tagentries(**map):
570 def tagentries(**map):
571 parity = paritygen(web.stripecount)
571 parity = paritygen(web.stripecount)
572 count = 0
572 count = 0
573 for k, n in i:
573 for k, n in i:
574 if k == "tip": # skip tip
574 if k == "tip": # skip tip
575 continue
575 continue
576
576
577 count += 1
577 count += 1
578 if count > 10: # limit to 10 tags
578 if count > 10: # limit to 10 tags
579 break
579 break
580
580
581 yield tmpl("tagentry",
581 yield tmpl("tagentry",
582 parity=parity.next(),
582 parity=parity.next(),
583 tag=k,
583 tag=k,
584 node=hex(n),
584 node=hex(n),
585 date=web.repo[n].date())
585 date=web.repo[n].date())
586
586
587 def bookmarks(**map):
587 def bookmarks(**map):
588 parity = paritygen(web.stripecount)
588 parity = paritygen(web.stripecount)
589 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
589 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
590 for k, n in sorted(marks)[:10]: # limit to 10 bookmarks
590 for k, n in sorted(marks)[:10]: # limit to 10 bookmarks
591 yield {'parity': parity.next(),
591 yield {'parity': parity.next(),
592 'bookmark': k,
592 'bookmark': k,
593 'date': web.repo[n].date(),
593 'date': web.repo[n].date(),
594 'node': hex(n)}
594 'node': hex(n)}
595
595
596 def branches(**map):
596 def branches(**map):
597 parity = paritygen(web.stripecount)
597 parity = paritygen(web.stripecount)
598
598
599 b = web.repo.branchmap()
599 b = web.repo.branchmap()
600 l = [(-web.repo.changelog.rev(tip), tip, tag)
600 l = [(-web.repo.changelog.rev(tip), tip, tag)
601 for tag, heads, tip, closed in b.iterbranches()]
601 for tag, heads, tip, closed in b.iterbranches()]
602 for r, n, t in sorted(l):
602 for r, n, t in sorted(l):
603 yield {'parity': parity.next(),
603 yield {'parity': parity.next(),
604 'branch': t,
604 'branch': t,
605 'node': hex(n),
605 'node': hex(n),
606 'date': web.repo[n].date()}
606 'date': web.repo[n].date()}
607
607
608 def changelist(**map):
608 def changelist(**map):
609 parity = paritygen(web.stripecount, offset=start - end)
609 parity = paritygen(web.stripecount, offset=start - end)
610 l = [] # build a list in forward order for efficiency
610 l = [] # build a list in forward order for efficiency
611 revs = []
611 revs = []
612 if start < end:
612 if start < end:
613 revs = web.repo.changelog.revs(start, end - 1)
613 revs = web.repo.changelog.revs(start, end - 1)
614 for i in revs:
614 for i in revs:
615 ctx = web.repo[i]
615 ctx = web.repo[i]
616 n = ctx.node()
616 n = ctx.node()
617 hn = hex(n)
617 hn = hex(n)
618
618
619 l.append(tmpl(
619 l.append(tmpl(
620 'shortlogentry',
620 'shortlogentry',
621 parity=parity.next(),
621 parity=parity.next(),
622 author=ctx.user(),
622 author=ctx.user(),
623 desc=ctx.description(),
623 desc=ctx.description(),
624 extra=ctx.extra(),
624 extra=ctx.extra(),
625 date=ctx.date(),
625 date=ctx.date(),
626 rev=i,
626 rev=i,
627 node=hn,
627 node=hn,
628 tags=webutil.nodetagsdict(web.repo, n),
628 tags=webutil.nodetagsdict(web.repo, n),
629 bookmarks=webutil.nodebookmarksdict(web.repo, n),
629 bookmarks=webutil.nodebookmarksdict(web.repo, n),
630 inbranch=webutil.nodeinbranch(web.repo, ctx),
630 inbranch=webutil.nodeinbranch(web.repo, ctx),
631 branches=webutil.nodebranchdict(web.repo, ctx)))
631 branches=webutil.nodebranchdict(web.repo, ctx)))
632
632
633 l.reverse()
633 l.reverse()
634 yield l
634 yield l
635
635
636 tip = web.repo['tip']
636 tip = web.repo['tip']
637 count = len(web.repo)
637 count = len(web.repo)
638 start = max(0, count - web.maxchanges)
638 start = max(0, count - web.maxchanges)
639 end = min(count, start + web.maxchanges)
639 end = min(count, start + web.maxchanges)
640
640
641 return tmpl("summary",
641 return tmpl("summary",
642 desc=web.config("web", "description", "unknown"),
642 desc=web.config("web", "description", "unknown"),
643 owner=get_contact(web.config) or "unknown",
643 owner=get_contact(web.config) or "unknown",
644 lastchange=tip.date(),
644 lastchange=tip.date(),
645 tags=tagentries,
645 tags=tagentries,
646 bookmarks=bookmarks,
646 bookmarks=bookmarks,
647 branches=branches,
647 branches=branches,
648 shortlog=changelist,
648 shortlog=changelist,
649 node=tip.hex(),
649 node=tip.hex(),
650 archives=web.archivelist("tip"))
650 archives=web.archivelist("tip"))
651
651
652 def filediff(web, req, tmpl):
652 def filediff(web, req, tmpl):
653 fctx, ctx = None, None
653 fctx, ctx = None, None
654 try:
654 try:
655 fctx = webutil.filectx(web.repo, req)
655 fctx = webutil.filectx(web.repo, req)
656 except LookupError:
656 except LookupError:
657 ctx = webutil.changectx(web.repo, req)
657 ctx = webutil.changectx(web.repo, req)
658 path = webutil.cleanpath(web.repo, req.form['file'][0])
658 path = webutil.cleanpath(web.repo, req.form['file'][0])
659 if path not in ctx.files():
659 if path not in ctx.files():
660 raise
660 raise
661
661
662 if fctx is not None:
662 if fctx is not None:
663 n = fctx.node()
663 n = fctx.node()
664 path = fctx.path()
664 path = fctx.path()
665 ctx = fctx.changectx()
665 ctx = fctx.changectx()
666 else:
666 else:
667 n = ctx.node()
667 n = ctx.node()
668 # path already defined in except clause
668 # path already defined in except clause
669
669
670 parity = paritygen(web.stripecount)
670 parity = paritygen(web.stripecount)
671 style = web.config('web', 'style', 'paper')
671 style = web.config('web', 'style', 'paper')
672 if 'style' in req.form:
672 if 'style' in req.form:
673 style = req.form['style'][0]
673 style = req.form['style'][0]
674
674
675 diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
675 diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
676 rename = fctx and webutil.renamelink(fctx) or []
676 rename = fctx and webutil.renamelink(fctx) or []
677 ctx = fctx and fctx or ctx
677 ctx = fctx and fctx or ctx
678 return tmpl("filediff",
678 return tmpl("filediff",
679 file=path,
679 file=path,
680 node=hex(n),
680 node=hex(n),
681 rev=ctx.rev(),
681 rev=ctx.rev(),
682 date=ctx.date(),
682 date=ctx.date(),
683 desc=ctx.description(),
683 desc=ctx.description(),
684 extra=ctx.extra(),
684 extra=ctx.extra(),
685 author=ctx.user(),
685 author=ctx.user(),
686 rename=rename,
686 rename=rename,
687 branch=webutil.nodebranchnodefault(ctx),
687 branch=webutil.nodebranchnodefault(ctx),
688 parent=webutil.parents(ctx),
688 parent=webutil.parents(ctx),
689 child=webutil.children(ctx),
689 child=webutil.children(ctx),
690 diff=diffs)
690 diff=diffs)
691
691
692 diff = filediff
692 diff = filediff
693
693
694 def comparison(web, req, tmpl):
694 def comparison(web, req, tmpl):
695 ctx = webutil.changectx(web.repo, req)
695 ctx = webutil.changectx(web.repo, req)
696 if 'file' not in req.form:
696 if 'file' not in req.form:
697 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
697 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
698 path = webutil.cleanpath(web.repo, req.form['file'][0])
698 path = webutil.cleanpath(web.repo, req.form['file'][0])
699 rename = path in ctx and webutil.renamelink(ctx[path]) or []
699 rename = path in ctx and webutil.renamelink(ctx[path]) or []
700
700
701 parsecontext = lambda v: v == 'full' and -1 or int(v)
701 parsecontext = lambda v: v == 'full' and -1 or int(v)
702 if 'context' in req.form:
702 if 'context' in req.form:
703 context = parsecontext(req.form['context'][0])
703 context = parsecontext(req.form['context'][0])
704 else:
704 else:
705 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
705 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
706
706
707 def filelines(f):
707 def filelines(f):
708 if util.binary(f.data()):
708 if util.binary(f.data()):
709 mt = mimetypes.guess_type(f.path())[0]
709 mt = mimetypes.guess_type(f.path())[0]
710 if not mt:
710 if not mt:
711 mt = 'application/octet-stream'
711 mt = 'application/octet-stream'
712 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
712 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
713 return f.data().splitlines()
713 return f.data().splitlines()
714
714
715 parent = ctx.p1()
715 parent = ctx.p1()
716 leftrev = parent.rev()
716 leftrev = parent.rev()
717 leftnode = parent.node()
717 leftnode = parent.node()
718 rightrev = ctx.rev()
718 rightrev = ctx.rev()
719 rightnode = ctx.node()
719 rightnode = ctx.node()
720 if path in ctx:
720 if path in ctx:
721 fctx = ctx[path]
721 fctx = ctx[path]
722 rightlines = filelines(fctx)
722 rightlines = filelines(fctx)
723 if path not in parent:
723 if path not in parent:
724 leftlines = ()
724 leftlines = ()
725 else:
725 else:
726 pfctx = parent[path]
726 pfctx = parent[path]
727 leftlines = filelines(pfctx)
727 leftlines = filelines(pfctx)
728 else:
728 else:
729 rightlines = ()
729 rightlines = ()
730 fctx = ctx.parents()[0][path]
730 fctx = ctx.parents()[0][path]
731 leftlines = filelines(fctx)
731 leftlines = filelines(fctx)
732
732
733 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
733 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
734 return tmpl('filecomparison',
734 return tmpl('filecomparison',
735 file=path,
735 file=path,
736 node=hex(ctx.node()),
736 node=hex(ctx.node()),
737 rev=ctx.rev(),
737 rev=ctx.rev(),
738 date=ctx.date(),
738 date=ctx.date(),
739 desc=ctx.description(),
739 desc=ctx.description(),
740 extra=ctx.extra(),
740 extra=ctx.extra(),
741 author=ctx.user(),
741 author=ctx.user(),
742 rename=rename,
742 rename=rename,
743 branch=webutil.nodebranchnodefault(ctx),
743 branch=webutil.nodebranchnodefault(ctx),
744 parent=webutil.parents(fctx),
744 parent=webutil.parents(fctx),
745 child=webutil.children(fctx),
745 child=webutil.children(fctx),
746 leftrev=leftrev,
746 leftrev=leftrev,
747 leftnode=hex(leftnode),
747 leftnode=hex(leftnode),
748 rightrev=rightrev,
748 rightrev=rightrev,
749 rightnode=hex(rightnode),
749 rightnode=hex(rightnode),
750 comparison=comparison)
750 comparison=comparison)
751
751
752 def annotate(web, req, tmpl):
752 def annotate(web, req, tmpl):
753 fctx = webutil.filectx(web.repo, req)
753 fctx = webutil.filectx(web.repo, req)
754 f = fctx.path()
754 f = fctx.path()
755 parity = paritygen(web.stripecount)
755 parity = paritygen(web.stripecount)
756 diffopts = patch.diffopts(web.repo.ui, untrusted=True, section='annotate')
756 diffopts = patch.diffopts(web.repo.ui, untrusted=True, section='annotate')
757
757
758 def annotate(**map):
758 def annotate(**map):
759 last = None
759 last = None
760 if util.binary(fctx.data()):
760 if util.binary(fctx.data()):
761 mt = (mimetypes.guess_type(fctx.path())[0]
761 mt = (mimetypes.guess_type(fctx.path())[0]
762 or 'application/octet-stream')
762 or 'application/octet-stream')
763 lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
763 lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
764 '(binary:%s)' % mt)])
764 '(binary:%s)' % mt)])
765 else:
765 else:
766 lines = enumerate(fctx.annotate(follow=True, linenumber=True,
766 lines = enumerate(fctx.annotate(follow=True, linenumber=True,
767 diffopts=diffopts))
767 diffopts=diffopts))
768 for lineno, ((f, targetline), l) in lines:
768 for lineno, ((f, targetline), l) in lines:
769 fnode = f.filenode()
769 fnode = f.filenode()
770
770
771 if last != fnode:
771 if last != fnode:
772 last = fnode
772 last = fnode
773
773
774 yield {"parity": parity.next(),
774 yield {"parity": parity.next(),
775 "node": f.hex(),
775 "node": f.hex(),
776 "rev": f.rev(),
776 "rev": f.rev(),
777 "author": f.user(),
777 "author": f.user(),
778 "desc": f.description(),
778 "desc": f.description(),
779 "extra": f.extra(),
779 "extra": f.extra(),
780 "file": f.path(),
780 "file": f.path(),
781 "targetline": targetline,
781 "targetline": targetline,
782 "line": l,
782 "line": l,
783 "lineid": "l%d" % (lineno + 1),
783 "lineid": "l%d" % (lineno + 1),
784 "linenumber": "% 6d" % (lineno + 1),
784 "linenumber": "% 6d" % (lineno + 1),
785 "revdate": f.date()}
785 "revdate": f.date()}
786
786
787 return tmpl("fileannotate",
787 return tmpl("fileannotate",
788 file=f,
788 file=f,
789 annotate=annotate,
789 annotate=annotate,
790 path=webutil.up(f),
790 path=webutil.up(f),
791 rev=fctx.rev(),
791 rev=fctx.rev(),
792 node=fctx.hex(),
792 node=fctx.hex(),
793 author=fctx.user(),
793 author=fctx.user(),
794 date=fctx.date(),
794 date=fctx.date(),
795 desc=fctx.description(),
795 desc=fctx.description(),
796 extra=fctx.extra(),
796 extra=fctx.extra(),
797 rename=webutil.renamelink(fctx),
797 rename=webutil.renamelink(fctx),
798 branch=webutil.nodebranchnodefault(fctx),
798 branch=webutil.nodebranchnodefault(fctx),
799 parent=webutil.parents(fctx),
799 parent=webutil.parents(fctx),
800 child=webutil.children(fctx),
800 child=webutil.children(fctx),
801 permissions=fctx.manifest().flags(f))
801 permissions=fctx.manifest().flags(f))
802
802
803 def filelog(web, req, tmpl):
803 def filelog(web, req, tmpl):
804
804
805 try:
805 try:
806 fctx = webutil.filectx(web.repo, req)
806 fctx = webutil.filectx(web.repo, req)
807 f = fctx.path()
807 f = fctx.path()
808 fl = fctx.filelog()
808 fl = fctx.filelog()
809 except error.LookupError:
809 except error.LookupError:
810 f = webutil.cleanpath(web.repo, req.form['file'][0])
810 f = webutil.cleanpath(web.repo, req.form['file'][0])
811 fl = web.repo.file(f)
811 fl = web.repo.file(f)
812 numrevs = len(fl)
812 numrevs = len(fl)
813 if not numrevs: # file doesn't exist at all
813 if not numrevs: # file doesn't exist at all
814 raise
814 raise
815 rev = webutil.changectx(web.repo, req).rev()
815 rev = webutil.changectx(web.repo, req).rev()
816 first = fl.linkrev(0)
816 first = fl.linkrev(0)
817 if rev < first: # current rev is from before file existed
817 if rev < first: # current rev is from before file existed
818 raise
818 raise
819 frev = numrevs - 1
819 frev = numrevs - 1
820 while fl.linkrev(frev) > rev:
820 while fl.linkrev(frev) > rev:
821 frev -= 1
821 frev -= 1
822 fctx = web.repo.filectx(f, fl.linkrev(frev))
822 fctx = web.repo.filectx(f, fl.linkrev(frev))
823
823
824 revcount = web.maxshortchanges
824 revcount = web.maxshortchanges
825 if 'revcount' in req.form:
825 if 'revcount' in req.form:
826 try:
826 try:
827 revcount = int(req.form.get('revcount', [revcount])[0])
827 revcount = int(req.form.get('revcount', [revcount])[0])
828 revcount = max(revcount, 1)
828 revcount = max(revcount, 1)
829 tmpl.defaults['sessionvars']['revcount'] = revcount
829 tmpl.defaults['sessionvars']['revcount'] = revcount
830 except ValueError:
830 except ValueError:
831 pass
831 pass
832
832
833 lessvars = copy.copy(tmpl.defaults['sessionvars'])
833 lessvars = copy.copy(tmpl.defaults['sessionvars'])
834 lessvars['revcount'] = max(revcount / 2, 1)
834 lessvars['revcount'] = max(revcount / 2, 1)
835 morevars = copy.copy(tmpl.defaults['sessionvars'])
835 morevars = copy.copy(tmpl.defaults['sessionvars'])
836 morevars['revcount'] = revcount * 2
836 morevars['revcount'] = revcount * 2
837
837
838 count = fctx.filerev() + 1
838 count = fctx.filerev() + 1
839 start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
839 start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
840 end = min(count, start + revcount) # last rev on this page
840 end = min(count, start + revcount) # last rev on this page
841 parity = paritygen(web.stripecount, offset=start - end)
841 parity = paritygen(web.stripecount, offset=start - end)
842
842
843 def entries():
843 def entries():
844 l = []
844 l = []
845
845
846 repo = web.repo
846 repo = web.repo
847 revs = fctx.filelog().revs(start, end - 1)
847 revs = fctx.filelog().revs(start, end - 1)
848 for i in revs:
848 for i in revs:
849 iterfctx = fctx.filectx(i)
849 iterfctx = fctx.filectx(i)
850
850
851 l.append({"parity": parity.next(),
851 l.append({"parity": parity.next(),
852 "filerev": i,
852 "filerev": i,
853 "file": f,
853 "file": f,
854 "node": iterfctx.hex(),
854 "node": iterfctx.hex(),
855 "author": iterfctx.user(),
855 "author": iterfctx.user(),
856 "date": iterfctx.date(),
856 "date": iterfctx.date(),
857 "rename": webutil.renamelink(iterfctx),
857 "rename": webutil.renamelink(iterfctx),
858 "parent": webutil.parents(iterfctx),
858 "parent": webutil.parents(iterfctx),
859 "child": webutil.children(iterfctx),
859 "child": webutil.children(iterfctx),
860 "desc": iterfctx.description(),
860 "desc": iterfctx.description(),
861 "extra": iterfctx.extra(),
861 "extra": iterfctx.extra(),
862 "tags": webutil.nodetagsdict(repo, iterfctx.node()),
862 "tags": webutil.nodetagsdict(repo, iterfctx.node()),
863 "bookmarks": webutil.nodebookmarksdict(
863 "bookmarks": webutil.nodebookmarksdict(
864 repo, iterfctx.node()),
864 repo, iterfctx.node()),
865 "branch": webutil.nodebranchnodefault(iterfctx),
865 "branch": webutil.nodebranchnodefault(iterfctx),
866 "inbranch": webutil.nodeinbranch(repo, iterfctx),
866 "inbranch": webutil.nodeinbranch(repo, iterfctx),
867 "branches": webutil.nodebranchdict(repo, iterfctx)})
867 "branches": webutil.nodebranchdict(repo, iterfctx)})
868 for e in reversed(l):
868 for e in reversed(l):
869 yield e
869 yield e
870
870
871 entries = list(entries())
871 entries = list(entries())
872 latestentry = entries[:1]
872 latestentry = entries[:1]
873
873
874 revnav = webutil.filerevnav(web.repo, fctx.path())
874 revnav = webutil.filerevnav(web.repo, fctx.path())
875 nav = revnav.gen(end - 1, revcount, count)
875 nav = revnav.gen(end - 1, revcount, count)
876 return tmpl("filelog", file=f, node=fctx.hex(), nav=nav,
876 return tmpl("filelog", file=f, node=fctx.hex(), nav=nav,
877 entries=entries,
877 entries=entries,
878 latestentry=latestentry,
878 latestentry=latestentry,
879 revcount=revcount, morevars=morevars, lessvars=lessvars)
879 revcount=revcount, morevars=morevars, lessvars=lessvars)
880
880
881 def archive(web, req, tmpl):
881 def archive(web, req, tmpl):
882 type_ = req.form.get('type', [None])[0]
882 type_ = req.form.get('type', [None])[0]
883 allowed = web.configlist("web", "allow_archive")
883 allowed = web.configlist("web", "allow_archive")
884 key = req.form['node'][0]
884 key = req.form['node'][0]
885
885
886 if type_ not in web.archives:
886 if type_ not in web.archives:
887 msg = 'Unsupported archive type: %s' % type_
887 msg = 'Unsupported archive type: %s' % type_
888 raise ErrorResponse(HTTP_NOT_FOUND, msg)
888 raise ErrorResponse(HTTP_NOT_FOUND, msg)
889
889
890 if not ((type_ in allowed or
890 if not ((type_ in allowed or
891 web.configbool("web", "allow" + type_, False))):
891 web.configbool("web", "allow" + type_, False))):
892 msg = 'Archive type not allowed: %s' % type_
892 msg = 'Archive type not allowed: %s' % type_
893 raise ErrorResponse(HTTP_FORBIDDEN, msg)
893 raise ErrorResponse(HTTP_FORBIDDEN, msg)
894
894
895 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
895 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
896 cnode = web.repo.lookup(key)
896 cnode = web.repo.lookup(key)
897 arch_version = key
897 arch_version = key
898 if cnode == key or key == 'tip':
898 if cnode == key or key == 'tip':
899 arch_version = short(cnode)
899 arch_version = short(cnode)
900 name = "%s-%s" % (reponame, arch_version)
900 name = "%s-%s" % (reponame, arch_version)
901
901
902 ctx = webutil.changectx(web.repo, req)
902 ctx = webutil.changectx(web.repo, req)
903 pats = []
903 pats = []
904 matchfn = None
904 matchfn = scmutil.match(ctx, [])
905 file = req.form.get('file', None)
905 file = req.form.get('file', None)
906 if file:
906 if file:
907 pats = ['path:' + file[0]]
907 pats = ['path:' + file[0]]
908 matchfn = scmutil.match(ctx, pats, default='path')
908 matchfn = scmutil.match(ctx, pats, default='path')
909 if pats:
909 if pats:
910 files = [f for f in ctx.manifest().keys() if matchfn(f)]
910 files = [f for f in ctx.manifest().keys() if matchfn(f)]
911 if not files:
911 if not files:
912 raise ErrorResponse(HTTP_NOT_FOUND,
912 raise ErrorResponse(HTTP_NOT_FOUND,
913 'file(s) not found: %s' % file[0])
913 'file(s) not found: %s' % file[0])
914
914
915 mimetype, artype, extension, encoding = web.archive_specs[type_]
915 mimetype, artype, extension, encoding = web.archive_specs[type_]
916 headers = [
916 headers = [
917 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
917 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
918 ]
918 ]
919 if encoding:
919 if encoding:
920 headers.append(('Content-Encoding', encoding))
920 headers.append(('Content-Encoding', encoding))
921 req.headers.extend(headers)
921 req.headers.extend(headers)
922 req.respond(HTTP_OK, mimetype)
922 req.respond(HTTP_OK, mimetype)
923
923
924 archival.archive(web.repo, req, cnode, artype, prefix=name,
924 archival.archive(web.repo, req, cnode, artype, prefix=name,
925 matchfn=matchfn,
925 matchfn=matchfn,
926 subrepos=web.configbool("web", "archivesubrepos"))
926 subrepos=web.configbool("web", "archivesubrepos"))
927 return []
927 return []
928
928
929
929
930 def static(web, req, tmpl):
930 def static(web, req, tmpl):
931 fname = req.form['file'][0]
931 fname = req.form['file'][0]
932 # a repo owner may set web.static in .hg/hgrc to get any file
932 # a repo owner may set web.static in .hg/hgrc to get any file
933 # readable by the user running the CGI script
933 # readable by the user running the CGI script
934 static = web.config("web", "static", None, untrusted=False)
934 static = web.config("web", "static", None, untrusted=False)
935 if not static:
935 if not static:
936 tp = web.templatepath or templater.templatepaths()
936 tp = web.templatepath or templater.templatepaths()
937 if isinstance(tp, str):
937 if isinstance(tp, str):
938 tp = [tp]
938 tp = [tp]
939 static = [os.path.join(p, 'static') for p in tp]
939 static = [os.path.join(p, 'static') for p in tp]
940 staticfile(static, fname, req)
940 staticfile(static, fname, req)
941 return []
941 return []
942
942
943 def graph(web, req, tmpl):
943 def graph(web, req, tmpl):
944
944
945 ctx = webutil.changectx(web.repo, req)
945 ctx = webutil.changectx(web.repo, req)
946 rev = ctx.rev()
946 rev = ctx.rev()
947
947
948 bg_height = 39
948 bg_height = 39
949 revcount = web.maxshortchanges
949 revcount = web.maxshortchanges
950 if 'revcount' in req.form:
950 if 'revcount' in req.form:
951 try:
951 try:
952 revcount = int(req.form.get('revcount', [revcount])[0])
952 revcount = int(req.form.get('revcount', [revcount])[0])
953 revcount = max(revcount, 1)
953 revcount = max(revcount, 1)
954 tmpl.defaults['sessionvars']['revcount'] = revcount
954 tmpl.defaults['sessionvars']['revcount'] = revcount
955 except ValueError:
955 except ValueError:
956 pass
956 pass
957
957
958 lessvars = copy.copy(tmpl.defaults['sessionvars'])
958 lessvars = copy.copy(tmpl.defaults['sessionvars'])
959 lessvars['revcount'] = max(revcount / 2, 1)
959 lessvars['revcount'] = max(revcount / 2, 1)
960 morevars = copy.copy(tmpl.defaults['sessionvars'])
960 morevars = copy.copy(tmpl.defaults['sessionvars'])
961 morevars['revcount'] = revcount * 2
961 morevars['revcount'] = revcount * 2
962
962
963 count = len(web.repo)
963 count = len(web.repo)
964 pos = rev
964 pos = rev
965
965
966 uprev = min(max(0, count - 1), rev + revcount)
966 uprev = min(max(0, count - 1), rev + revcount)
967 downrev = max(0, rev - revcount)
967 downrev = max(0, rev - revcount)
968 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
968 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
969
969
970 tree = []
970 tree = []
971 if pos != -1:
971 if pos != -1:
972 allrevs = web.repo.changelog.revs(pos, 0)
972 allrevs = web.repo.changelog.revs(pos, 0)
973 revs = []
973 revs = []
974 for i in allrevs:
974 for i in allrevs:
975 revs.append(i)
975 revs.append(i)
976 if len(revs) >= revcount:
976 if len(revs) >= revcount:
977 break
977 break
978
978
979 # We have to feed a baseset to dagwalker as it is expecting smartset
979 # We have to feed a baseset to dagwalker as it is expecting smartset
980 # object. This does not have a big impact on hgweb performance itself
980 # object. This does not have a big impact on hgweb performance itself
981 # since hgweb graphing code is not itself lazy yet.
981 # since hgweb graphing code is not itself lazy yet.
982 dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
982 dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
983 # As we said one line above... not lazy.
983 # As we said one line above... not lazy.
984 tree = list(graphmod.colored(dag, web.repo))
984 tree = list(graphmod.colored(dag, web.repo))
985
985
986 def getcolumns(tree):
986 def getcolumns(tree):
987 cols = 0
987 cols = 0
988 for (id, type, ctx, vtx, edges) in tree:
988 for (id, type, ctx, vtx, edges) in tree:
989 if type != graphmod.CHANGESET:
989 if type != graphmod.CHANGESET:
990 continue
990 continue
991 cols = max(cols, max([edge[0] for edge in edges] or [0]),
991 cols = max(cols, max([edge[0] for edge in edges] or [0]),
992 max([edge[1] for edge in edges] or [0]))
992 max([edge[1] for edge in edges] or [0]))
993 return cols
993 return cols
994
994
995 def graphdata(usetuples, **map):
995 def graphdata(usetuples, **map):
996 data = []
996 data = []
997
997
998 row = 0
998 row = 0
999 for (id, type, ctx, vtx, edges) in tree:
999 for (id, type, ctx, vtx, edges) in tree:
1000 if type != graphmod.CHANGESET:
1000 if type != graphmod.CHANGESET:
1001 continue
1001 continue
1002 node = str(ctx)
1002 node = str(ctx)
1003 age = templatefilters.age(ctx.date())
1003 age = templatefilters.age(ctx.date())
1004 desc = templatefilters.firstline(ctx.description())
1004 desc = templatefilters.firstline(ctx.description())
1005 desc = cgi.escape(templatefilters.nonempty(desc))
1005 desc = cgi.escape(templatefilters.nonempty(desc))
1006 user = cgi.escape(templatefilters.person(ctx.user()))
1006 user = cgi.escape(templatefilters.person(ctx.user()))
1007 branch = cgi.escape(ctx.branch())
1007 branch = cgi.escape(ctx.branch())
1008 try:
1008 try:
1009 branchnode = web.repo.branchtip(branch)
1009 branchnode = web.repo.branchtip(branch)
1010 except error.RepoLookupError:
1010 except error.RepoLookupError:
1011 branchnode = None
1011 branchnode = None
1012 branch = branch, branchnode == ctx.node()
1012 branch = branch, branchnode == ctx.node()
1013
1013
1014 if usetuples:
1014 if usetuples:
1015 data.append((node, vtx, edges, desc, user, age, branch,
1015 data.append((node, vtx, edges, desc, user, age, branch,
1016 [cgi.escape(x) for x in ctx.tags()],
1016 [cgi.escape(x) for x in ctx.tags()],
1017 [cgi.escape(x) for x in ctx.bookmarks()]))
1017 [cgi.escape(x) for x in ctx.bookmarks()]))
1018 else:
1018 else:
1019 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1019 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1020 'color': (edge[2] - 1) % 6 + 1,
1020 'color': (edge[2] - 1) % 6 + 1,
1021 'width': edge[3], 'bcolor': edge[4]}
1021 'width': edge[3], 'bcolor': edge[4]}
1022 for edge in edges]
1022 for edge in edges]
1023
1023
1024 data.append(
1024 data.append(
1025 {'node': node,
1025 {'node': node,
1026 'col': vtx[0],
1026 'col': vtx[0],
1027 'color': (vtx[1] - 1) % 6 + 1,
1027 'color': (vtx[1] - 1) % 6 + 1,
1028 'edges': edgedata,
1028 'edges': edgedata,
1029 'row': row,
1029 'row': row,
1030 'nextrow': row + 1,
1030 'nextrow': row + 1,
1031 'desc': desc,
1031 'desc': desc,
1032 'user': user,
1032 'user': user,
1033 'age': age,
1033 'age': age,
1034 'bookmarks': webutil.nodebookmarksdict(
1034 'bookmarks': webutil.nodebookmarksdict(
1035 web.repo, ctx.node()),
1035 web.repo, ctx.node()),
1036 'branches': webutil.nodebranchdict(web.repo, ctx),
1036 'branches': webutil.nodebranchdict(web.repo, ctx),
1037 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1037 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1038 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1038 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1039
1039
1040 row += 1
1040 row += 1
1041
1041
1042 return data
1042 return data
1043
1043
1044 cols = getcolumns(tree)
1044 cols = getcolumns(tree)
1045 rows = len(tree)
1045 rows = len(tree)
1046 canvasheight = (rows + 1) * bg_height - 27
1046 canvasheight = (rows + 1) * bg_height - 27
1047
1047
1048 return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
1048 return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
1049 lessvars=lessvars, morevars=morevars, downrev=downrev,
1049 lessvars=lessvars, morevars=morevars, downrev=downrev,
1050 cols=cols, rows=rows,
1050 cols=cols, rows=rows,
1051 canvaswidth=(cols + 1) * bg_height,
1051 canvaswidth=(cols + 1) * bg_height,
1052 truecanvasheight=rows * bg_height,
1052 truecanvasheight=rows * bg_height,
1053 canvasheight=canvasheight, bg_height=bg_height,
1053 canvasheight=canvasheight, bg_height=bg_height,
1054 jsdata=lambda **x: graphdata(True, **x),
1054 jsdata=lambda **x: graphdata(True, **x),
1055 nodes=lambda **x: graphdata(False, **x),
1055 nodes=lambda **x: graphdata(False, **x),
1056 node=ctx.hex(), changenav=changenav)
1056 node=ctx.hex(), changenav=changenav)
1057
1057
1058 def _getdoc(e):
1058 def _getdoc(e):
1059 doc = e[0].__doc__
1059 doc = e[0].__doc__
1060 if doc:
1060 if doc:
1061 doc = _(doc).split('\n')[0]
1061 doc = _(doc).split('\n')[0]
1062 else:
1062 else:
1063 doc = _('(no help text available)')
1063 doc = _('(no help text available)')
1064 return doc
1064 return doc
1065
1065
1066 def help(web, req, tmpl):
1066 def help(web, req, tmpl):
1067 from mercurial import commands # avoid cycle
1067 from mercurial import commands # avoid cycle
1068
1068
1069 topicname = req.form.get('node', [None])[0]
1069 topicname = req.form.get('node', [None])[0]
1070 if not topicname:
1070 if not topicname:
1071 def topics(**map):
1071 def topics(**map):
1072 for entries, summary, _doc in helpmod.helptable:
1072 for entries, summary, _doc in helpmod.helptable:
1073 yield {'topic': entries[0], 'summary': summary}
1073 yield {'topic': entries[0], 'summary': summary}
1074
1074
1075 early, other = [], []
1075 early, other = [], []
1076 primary = lambda s: s.split('|')[0]
1076 primary = lambda s: s.split('|')[0]
1077 for c, e in commands.table.iteritems():
1077 for c, e in commands.table.iteritems():
1078 doc = _getdoc(e)
1078 doc = _getdoc(e)
1079 if 'DEPRECATED' in doc or c.startswith('debug'):
1079 if 'DEPRECATED' in doc or c.startswith('debug'):
1080 continue
1080 continue
1081 cmd = primary(c)
1081 cmd = primary(c)
1082 if cmd.startswith('^'):
1082 if cmd.startswith('^'):
1083 early.append((cmd[1:], doc))
1083 early.append((cmd[1:], doc))
1084 else:
1084 else:
1085 other.append((cmd, doc))
1085 other.append((cmd, doc))
1086
1086
1087 early.sort()
1087 early.sort()
1088 other.sort()
1088 other.sort()
1089
1089
1090 def earlycommands(**map):
1090 def earlycommands(**map):
1091 for c, doc in early:
1091 for c, doc in early:
1092 yield {'topic': c, 'summary': doc}
1092 yield {'topic': c, 'summary': doc}
1093
1093
1094 def othercommands(**map):
1094 def othercommands(**map):
1095 for c, doc in other:
1095 for c, doc in other:
1096 yield {'topic': c, 'summary': doc}
1096 yield {'topic': c, 'summary': doc}
1097
1097
1098 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1098 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1099 othercommands=othercommands, title='Index')
1099 othercommands=othercommands, title='Index')
1100
1100
1101 u = webutil.wsgiui()
1101 u = webutil.wsgiui()
1102 u.verbose = True
1102 u.verbose = True
1103 try:
1103 try:
1104 doc = helpmod.help_(u, topicname)
1104 doc = helpmod.help_(u, topicname)
1105 except error.UnknownCommand:
1105 except error.UnknownCommand:
1106 raise ErrorResponse(HTTP_NOT_FOUND)
1106 raise ErrorResponse(HTTP_NOT_FOUND)
1107 return tmpl('help', topic=topicname, doc=doc)
1107 return tmpl('help', topic=topicname, doc=doc)
@@ -1,330 +1,331 b''
1 # mail.py - mail sending bits for mercurial
1 # mail.py - mail sending bits for mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util, encoding, sslutil
9 import util, encoding, sslutil
10 import os, smtplib, socket, quopri, time, sys
10 import os, smtplib, socket, quopri, time, sys
11 import email
11 import email
12 # On python2.4 you have to import these by name or they fail to
12 # On python2.4 you have to import these by name or they fail to
13 # load. This was not a problem on Python 2.7.
13 # load. This was not a problem on Python 2.7.
14 import email.Header
14 import email.Header
15 import email.MIMEText
15 import email.MIMEText
16
16
17 _oldheaderinit = email.Header.Header.__init__
17 _oldheaderinit = email.Header.Header.__init__
18 def _unifiedheaderinit(self, *args, **kw):
18 def _unifiedheaderinit(self, *args, **kw):
19 """
19 """
20 Python 2.7 introduces a backwards incompatible change
20 Python 2.7 introduces a backwards incompatible change
21 (Python issue1974, r70772) in email.Generator.Generator code:
21 (Python issue1974, r70772) in email.Generator.Generator code:
22 pre-2.7 code passed "continuation_ws='\t'" to the Header
22 pre-2.7 code passed "continuation_ws='\t'" to the Header
23 constructor, and 2.7 removed this parameter.
23 constructor, and 2.7 removed this parameter.
24
24
25 Default argument is continuation_ws=' ', which means that the
25 Default argument is continuation_ws=' ', which means that the
26 behaviour is different in <2.7 and 2.7
26 behaviour is different in <2.7 and 2.7
27
27
28 We consider the 2.7 behaviour to be preferable, but need
28 We consider the 2.7 behaviour to be preferable, but need
29 to have an unified behaviour for versions 2.4 to 2.7
29 to have an unified behaviour for versions 2.4 to 2.7
30 """
30 """
31 # override continuation_ws
31 # override continuation_ws
32 kw['continuation_ws'] = ' '
32 kw['continuation_ws'] = ' '
33 _oldheaderinit(self, *args, **kw)
33 _oldheaderinit(self, *args, **kw)
34
34
35 email.Header.Header.__dict__['__init__'] = _unifiedheaderinit
35 email.Header.Header.__dict__['__init__'] = _unifiedheaderinit
36
36
37 class STARTTLS(smtplib.SMTP):
37 class STARTTLS(smtplib.SMTP):
38 '''Derived class to verify the peer certificate for STARTTLS.
38 '''Derived class to verify the peer certificate for STARTTLS.
39
39
40 This class allows to pass any keyword arguments to SSL socket creation.
40 This class allows to pass any keyword arguments to SSL socket creation.
41 '''
41 '''
42 def __init__(self, sslkwargs, **kwargs):
42 def __init__(self, sslkwargs, **kwargs):
43 smtplib.SMTP.__init__(self, **kwargs)
43 smtplib.SMTP.__init__(self, **kwargs)
44 self._sslkwargs = sslkwargs
44 self._sslkwargs = sslkwargs
45
45
46 def starttls(self, keyfile=None, certfile=None):
46 def starttls(self, keyfile=None, certfile=None):
47 if not self.has_extn("starttls"):
47 if not self.has_extn("starttls"):
48 msg = "STARTTLS extension not supported by server"
48 msg = "STARTTLS extension not supported by server"
49 raise smtplib.SMTPException(msg)
49 raise smtplib.SMTPException(msg)
50 (resp, reply) = self.docmd("STARTTLS")
50 (resp, reply) = self.docmd("STARTTLS")
51 if resp == 220:
51 if resp == 220:
52 self.sock = sslutil.ssl_wrap_socket(self.sock, keyfile, certfile,
52 self.sock = sslutil.ssl_wrap_socket(self.sock, keyfile, certfile,
53 **self._sslkwargs)
53 **self._sslkwargs)
54 if not util.safehasattr(self.sock, "read"):
54 if not util.safehasattr(self.sock, "read"):
55 # using httplib.FakeSocket with Python 2.5.x or earlier
55 # using httplib.FakeSocket with Python 2.5.x or earlier
56 self.sock.read = self.sock.recv
56 self.sock.read = self.sock.recv
57 self.file = smtplib.SSLFakeFile(self.sock)
57 self.file = smtplib.SSLFakeFile(self.sock)
58 self.helo_resp = None
58 self.helo_resp = None
59 self.ehlo_resp = None
59 self.ehlo_resp = None
60 self.esmtp_features = {}
60 self.esmtp_features = {}
61 self.does_esmtp = 0
61 self.does_esmtp = 0
62 return (resp, reply)
62 return (resp, reply)
63
63
64 if util.safehasattr(smtplib.SMTP, '_get_socket'):
64 if util.safehasattr(smtplib.SMTP, '_get_socket'):
65 class SMTPS(smtplib.SMTP):
65 class SMTPS(smtplib.SMTP):
66 '''Derived class to verify the peer certificate for SMTPS.
66 '''Derived class to verify the peer certificate for SMTPS.
67
67
68 This class allows to pass any keyword arguments to SSL socket creation.
68 This class allows to pass any keyword arguments to SSL socket creation.
69 '''
69 '''
70 def __init__(self, sslkwargs, keyfile=None, certfile=None, **kwargs):
70 def __init__(self, sslkwargs, keyfile=None, certfile=None, **kwargs):
71 self.keyfile = keyfile
71 self.keyfile = keyfile
72 self.certfile = certfile
72 self.certfile = certfile
73 smtplib.SMTP.__init__(self, **kwargs)
73 smtplib.SMTP.__init__(self, **kwargs)
74 self.default_port = smtplib.SMTP_SSL_PORT
74 self.default_port = smtplib.SMTP_SSL_PORT
75 self._sslkwargs = sslkwargs
75 self._sslkwargs = sslkwargs
76
76
77 def _get_socket(self, host, port, timeout):
77 def _get_socket(self, host, port, timeout):
78 if self.debuglevel > 0:
78 if self.debuglevel > 0:
79 print >> sys.stderr, 'connect:', (host, port)
79 print >> sys.stderr, 'connect:', (host, port)
80 new_socket = socket.create_connection((host, port), timeout)
80 new_socket = socket.create_connection((host, port), timeout)
81 new_socket = sslutil.ssl_wrap_socket(new_socket,
81 new_socket = sslutil.ssl_wrap_socket(new_socket,
82 self.keyfile, self.certfile,
82 self.keyfile, self.certfile,
83 **self._sslkwargs)
83 **self._sslkwargs)
84 self.file = smtplib.SSLFakeFile(new_socket)
84 self.file = smtplib.SSLFakeFile(new_socket)
85 return new_socket
85 return new_socket
86 else:
86 else:
87 def SMTPS(sslkwargs, keyfile=None, certfile=None, **kwargs):
87 def SMTPS(sslkwargs, keyfile=None, certfile=None, **kwargs):
88 raise util.Abort(_('SMTPS requires Python 2.6 or later'))
88 raise util.Abort(_('SMTPS requires Python 2.6 or later'))
89
89
90 def _smtp(ui):
90 def _smtp(ui):
91 '''build an smtp connection and return a function to send mail'''
91 '''build an smtp connection and return a function to send mail'''
92 local_hostname = ui.config('smtp', 'local_hostname')
92 local_hostname = ui.config('smtp', 'local_hostname')
93 tls = ui.config('smtp', 'tls', 'none')
93 tls = ui.config('smtp', 'tls', 'none')
94 # backward compatible: when tls = true, we use starttls.
94 # backward compatible: when tls = true, we use starttls.
95 starttls = tls == 'starttls' or util.parsebool(tls)
95 starttls = tls == 'starttls' or util.parsebool(tls)
96 smtps = tls == 'smtps'
96 smtps = tls == 'smtps'
97 if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
97 if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
98 raise util.Abort(_("can't use TLS: Python SSL support not installed"))
98 raise util.Abort(_("can't use TLS: Python SSL support not installed"))
99 mailhost = ui.config('smtp', 'host')
99 mailhost = ui.config('smtp', 'host')
100 if not mailhost:
100 if not mailhost:
101 raise util.Abort(_('smtp.host not configured - cannot send mail'))
101 raise util.Abort(_('smtp.host not configured - cannot send mail'))
102 verifycert = ui.config('smtp', 'verifycert', 'strict')
102 verifycert = ui.config('smtp', 'verifycert', 'strict')
103 if verifycert not in ['strict', 'loose']:
103 if verifycert not in ['strict', 'loose']:
104 if util.parsebool(verifycert) is not False:
104 if util.parsebool(verifycert) is not False:
105 raise util.Abort(_('invalid smtp.verifycert configuration: %s')
105 raise util.Abort(_('invalid smtp.verifycert configuration: %s')
106 % (verifycert))
106 % (verifycert))
107 verifycert = False
107 if (starttls or smtps) and verifycert:
108 if (starttls or smtps) and verifycert:
108 sslkwargs = sslutil.sslkwargs(ui, mailhost)
109 sslkwargs = sslutil.sslkwargs(ui, mailhost)
109 else:
110 else:
110 sslkwargs = {}
111 sslkwargs = {}
111 if smtps:
112 if smtps:
112 ui.note(_('(using smtps)\n'))
113 ui.note(_('(using smtps)\n'))
113 s = SMTPS(sslkwargs, local_hostname=local_hostname)
114 s = SMTPS(sslkwargs, local_hostname=local_hostname)
114 elif starttls:
115 elif starttls:
115 s = STARTTLS(sslkwargs, local_hostname=local_hostname)
116 s = STARTTLS(sslkwargs, local_hostname=local_hostname)
116 else:
117 else:
117 s = smtplib.SMTP(local_hostname=local_hostname)
118 s = smtplib.SMTP(local_hostname=local_hostname)
118 if smtps:
119 if smtps:
119 defaultport = 465
120 defaultport = 465
120 else:
121 else:
121 defaultport = 25
122 defaultport = 25
122 mailport = util.getport(ui.config('smtp', 'port', defaultport))
123 mailport = util.getport(ui.config('smtp', 'port', defaultport))
123 ui.note(_('sending mail: smtp host %s, port %s\n') %
124 ui.note(_('sending mail: smtp host %s, port %s\n') %
124 (mailhost, mailport))
125 (mailhost, mailport))
125 s.connect(host=mailhost, port=mailport)
126 s.connect(host=mailhost, port=mailport)
126 if starttls:
127 if starttls:
127 ui.note(_('(using starttls)\n'))
128 ui.note(_('(using starttls)\n'))
128 s.ehlo()
129 s.ehlo()
129 s.starttls()
130 s.starttls()
130 s.ehlo()
131 s.ehlo()
131 if (starttls or smtps) and verifycert:
132 if (starttls or smtps) and verifycert:
132 ui.note(_('(verifying remote certificate)\n'))
133 ui.note(_('(verifying remote certificate)\n'))
133 sslutil.validator(ui, mailhost)(s.sock, verifycert == 'strict')
134 sslutil.validator(ui, mailhost)(s.sock, verifycert == 'strict')
134 username = ui.config('smtp', 'username')
135 username = ui.config('smtp', 'username')
135 password = ui.config('smtp', 'password')
136 password = ui.config('smtp', 'password')
136 if username and not password:
137 if username and not password:
137 password = ui.getpass()
138 password = ui.getpass()
138 if username and password:
139 if username and password:
139 ui.note(_('(authenticating to mail server as %s)\n') %
140 ui.note(_('(authenticating to mail server as %s)\n') %
140 (username))
141 (username))
141 try:
142 try:
142 s.login(username, password)
143 s.login(username, password)
143 except smtplib.SMTPException, inst:
144 except smtplib.SMTPException, inst:
144 raise util.Abort(inst)
145 raise util.Abort(inst)
145
146
146 def send(sender, recipients, msg):
147 def send(sender, recipients, msg):
147 try:
148 try:
148 return s.sendmail(sender, recipients, msg)
149 return s.sendmail(sender, recipients, msg)
149 except smtplib.SMTPRecipientsRefused, inst:
150 except smtplib.SMTPRecipientsRefused, inst:
150 recipients = [r[1] for r in inst.recipients.values()]
151 recipients = [r[1] for r in inst.recipients.values()]
151 raise util.Abort('\n' + '\n'.join(recipients))
152 raise util.Abort('\n' + '\n'.join(recipients))
152 except smtplib.SMTPException, inst:
153 except smtplib.SMTPException, inst:
153 raise util.Abort(inst)
154 raise util.Abort(inst)
154
155
155 return send
156 return send
156
157
157 def _sendmail(ui, sender, recipients, msg):
158 def _sendmail(ui, sender, recipients, msg):
158 '''send mail using sendmail.'''
159 '''send mail using sendmail.'''
159 program = ui.config('email', 'method')
160 program = ui.config('email', 'method')
160 cmdline = '%s -f %s %s' % (program, util.email(sender),
161 cmdline = '%s -f %s %s' % (program, util.email(sender),
161 ' '.join(map(util.email, recipients)))
162 ' '.join(map(util.email, recipients)))
162 ui.note(_('sending mail: %s\n') % cmdline)
163 ui.note(_('sending mail: %s\n') % cmdline)
163 fp = util.popen(cmdline, 'w')
164 fp = util.popen(cmdline, 'w')
164 fp.write(msg)
165 fp.write(msg)
165 ret = fp.close()
166 ret = fp.close()
166 if ret:
167 if ret:
167 raise util.Abort('%s %s' % (
168 raise util.Abort('%s %s' % (
168 os.path.basename(program.split(None, 1)[0]),
169 os.path.basename(program.split(None, 1)[0]),
169 util.explainexit(ret)[0]))
170 util.explainexit(ret)[0]))
170
171
171 def _mbox(mbox, sender, recipients, msg):
172 def _mbox(mbox, sender, recipients, msg):
172 '''write mails to mbox'''
173 '''write mails to mbox'''
173 fp = open(mbox, 'ab+')
174 fp = open(mbox, 'ab+')
174 # Should be time.asctime(), but Windows prints 2-characters day
175 # Should be time.asctime(), but Windows prints 2-characters day
175 # of month instead of one. Make them print the same thing.
176 # of month instead of one. Make them print the same thing.
176 date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
177 date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
177 fp.write('From %s %s\n' % (sender, date))
178 fp.write('From %s %s\n' % (sender, date))
178 fp.write(msg)
179 fp.write(msg)
179 fp.write('\n\n')
180 fp.write('\n\n')
180 fp.close()
181 fp.close()
181
182
182 def connect(ui, mbox=None):
183 def connect(ui, mbox=None):
183 '''make a mail connection. return a function to send mail.
184 '''make a mail connection. return a function to send mail.
184 call as sendmail(sender, list-of-recipients, msg).'''
185 call as sendmail(sender, list-of-recipients, msg).'''
185 if mbox:
186 if mbox:
186 open(mbox, 'wb').close()
187 open(mbox, 'wb').close()
187 return lambda s, r, m: _mbox(mbox, s, r, m)
188 return lambda s, r, m: _mbox(mbox, s, r, m)
188 if ui.config('email', 'method', 'smtp') == 'smtp':
189 if ui.config('email', 'method', 'smtp') == 'smtp':
189 return _smtp(ui)
190 return _smtp(ui)
190 return lambda s, r, m: _sendmail(ui, s, r, m)
191 return lambda s, r, m: _sendmail(ui, s, r, m)
191
192
192 def sendmail(ui, sender, recipients, msg, mbox=None):
193 def sendmail(ui, sender, recipients, msg, mbox=None):
193 send = connect(ui, mbox=mbox)
194 send = connect(ui, mbox=mbox)
194 return send(sender, recipients, msg)
195 return send(sender, recipients, msg)
195
196
196 def validateconfig(ui):
197 def validateconfig(ui):
197 '''determine if we have enough config data to try sending email.'''
198 '''determine if we have enough config data to try sending email.'''
198 method = ui.config('email', 'method', 'smtp')
199 method = ui.config('email', 'method', 'smtp')
199 if method == 'smtp':
200 if method == 'smtp':
200 if not ui.config('smtp', 'host'):
201 if not ui.config('smtp', 'host'):
201 raise util.Abort(_('smtp specified as email transport, '
202 raise util.Abort(_('smtp specified as email transport, '
202 'but no smtp host configured'))
203 'but no smtp host configured'))
203 else:
204 else:
204 if not util.findexe(method):
205 if not util.findexe(method):
205 raise util.Abort(_('%r specified as email transport, '
206 raise util.Abort(_('%r specified as email transport, '
206 'but not in PATH') % method)
207 'but not in PATH') % method)
207
208
208 def mimetextpatch(s, subtype='plain', display=False):
209 def mimetextpatch(s, subtype='plain', display=False):
209 '''Return MIME message suitable for a patch.
210 '''Return MIME message suitable for a patch.
210 Charset will be detected as utf-8 or (possibly fake) us-ascii.
211 Charset will be detected as utf-8 or (possibly fake) us-ascii.
211 Transfer encodings will be used if necessary.'''
212 Transfer encodings will be used if necessary.'''
212
213
213 cs = 'us-ascii'
214 cs = 'us-ascii'
214 if not display:
215 if not display:
215 try:
216 try:
216 s.decode('us-ascii')
217 s.decode('us-ascii')
217 except UnicodeDecodeError:
218 except UnicodeDecodeError:
218 try:
219 try:
219 s.decode('utf-8')
220 s.decode('utf-8')
220 cs = 'utf-8'
221 cs = 'utf-8'
221 except UnicodeDecodeError:
222 except UnicodeDecodeError:
222 # We'll go with us-ascii as a fallback.
223 # We'll go with us-ascii as a fallback.
223 pass
224 pass
224
225
225 return mimetextqp(s, subtype, cs)
226 return mimetextqp(s, subtype, cs)
226
227
227 def mimetextqp(body, subtype, charset):
228 def mimetextqp(body, subtype, charset):
228 '''Return MIME message.
229 '''Return MIME message.
229 Quoted-printable transfer encoding will be used if necessary.
230 Quoted-printable transfer encoding will be used if necessary.
230 '''
231 '''
231 enc = None
232 enc = None
232 for line in body.splitlines():
233 for line in body.splitlines():
233 if len(line) > 950:
234 if len(line) > 950:
234 body = quopri.encodestring(body)
235 body = quopri.encodestring(body)
235 enc = "quoted-printable"
236 enc = "quoted-printable"
236 break
237 break
237
238
238 msg = email.MIMEText.MIMEText(body, subtype, charset)
239 msg = email.MIMEText.MIMEText(body, subtype, charset)
239 if enc:
240 if enc:
240 del msg['Content-Transfer-Encoding']
241 del msg['Content-Transfer-Encoding']
241 msg['Content-Transfer-Encoding'] = enc
242 msg['Content-Transfer-Encoding'] = enc
242 return msg
243 return msg
243
244
244 def _charsets(ui):
245 def _charsets(ui):
245 '''Obtains charsets to send mail parts not containing patches.'''
246 '''Obtains charsets to send mail parts not containing patches.'''
246 charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
247 charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
247 fallbacks = [encoding.fallbackencoding.lower(),
248 fallbacks = [encoding.fallbackencoding.lower(),
248 encoding.encoding.lower(), 'utf-8']
249 encoding.encoding.lower(), 'utf-8']
249 for cs in fallbacks: # find unique charsets while keeping order
250 for cs in fallbacks: # find unique charsets while keeping order
250 if cs not in charsets:
251 if cs not in charsets:
251 charsets.append(cs)
252 charsets.append(cs)
252 return [cs for cs in charsets if not cs.endswith('ascii')]
253 return [cs for cs in charsets if not cs.endswith('ascii')]
253
254
254 def _encode(ui, s, charsets):
255 def _encode(ui, s, charsets):
255 '''Returns (converted) string, charset tuple.
256 '''Returns (converted) string, charset tuple.
256 Finds out best charset by cycling through sendcharsets in descending
257 Finds out best charset by cycling through sendcharsets in descending
257 order. Tries both encoding and fallbackencoding for input. Only as
258 order. Tries both encoding and fallbackencoding for input. Only as
258 last resort send as is in fake ascii.
259 last resort send as is in fake ascii.
259 Caveat: Do not use for mail parts containing patches!'''
260 Caveat: Do not use for mail parts containing patches!'''
260 try:
261 try:
261 s.decode('ascii')
262 s.decode('ascii')
262 except UnicodeDecodeError:
263 except UnicodeDecodeError:
263 sendcharsets = charsets or _charsets(ui)
264 sendcharsets = charsets or _charsets(ui)
264 for ics in (encoding.encoding, encoding.fallbackencoding):
265 for ics in (encoding.encoding, encoding.fallbackencoding):
265 try:
266 try:
266 u = s.decode(ics)
267 u = s.decode(ics)
267 except UnicodeDecodeError:
268 except UnicodeDecodeError:
268 continue
269 continue
269 for ocs in sendcharsets:
270 for ocs in sendcharsets:
270 try:
271 try:
271 return u.encode(ocs), ocs
272 return u.encode(ocs), ocs
272 except UnicodeEncodeError:
273 except UnicodeEncodeError:
273 pass
274 pass
274 except LookupError:
275 except LookupError:
275 ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
276 ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
276 # if ascii, or all conversion attempts fail, send (broken) ascii
277 # if ascii, or all conversion attempts fail, send (broken) ascii
277 return s, 'us-ascii'
278 return s, 'us-ascii'
278
279
279 def headencode(ui, s, charsets=None, display=False):
280 def headencode(ui, s, charsets=None, display=False):
280 '''Returns RFC-2047 compliant header from given string.'''
281 '''Returns RFC-2047 compliant header from given string.'''
281 if not display:
282 if not display:
282 # split into words?
283 # split into words?
283 s, cs = _encode(ui, s, charsets)
284 s, cs = _encode(ui, s, charsets)
284 return str(email.Header.Header(s, cs))
285 return str(email.Header.Header(s, cs))
285 return s
286 return s
286
287
287 def _addressencode(ui, name, addr, charsets=None):
288 def _addressencode(ui, name, addr, charsets=None):
288 name = headencode(ui, name, charsets)
289 name = headencode(ui, name, charsets)
289 try:
290 try:
290 acc, dom = addr.split('@')
291 acc, dom = addr.split('@')
291 acc = acc.encode('ascii')
292 acc = acc.encode('ascii')
292 dom = dom.decode(encoding.encoding).encode('idna')
293 dom = dom.decode(encoding.encoding).encode('idna')
293 addr = '%s@%s' % (acc, dom)
294 addr = '%s@%s' % (acc, dom)
294 except UnicodeDecodeError:
295 except UnicodeDecodeError:
295 raise util.Abort(_('invalid email address: %s') % addr)
296 raise util.Abort(_('invalid email address: %s') % addr)
296 except ValueError:
297 except ValueError:
297 try:
298 try:
298 # too strict?
299 # too strict?
299 addr = addr.encode('ascii')
300 addr = addr.encode('ascii')
300 except UnicodeDecodeError:
301 except UnicodeDecodeError:
301 raise util.Abort(_('invalid local address: %s') % addr)
302 raise util.Abort(_('invalid local address: %s') % addr)
302 return email.Utils.formataddr((name, addr))
303 return email.Utils.formataddr((name, addr))
303
304
304 def addressencode(ui, address, charsets=None, display=False):
305 def addressencode(ui, address, charsets=None, display=False):
305 '''Turns address into RFC-2047 compliant header.'''
306 '''Turns address into RFC-2047 compliant header.'''
306 if display or not address:
307 if display or not address:
307 return address or ''
308 return address or ''
308 name, addr = email.Utils.parseaddr(address)
309 name, addr = email.Utils.parseaddr(address)
309 return _addressencode(ui, name, addr, charsets)
310 return _addressencode(ui, name, addr, charsets)
310
311
311 def addrlistencode(ui, addrs, charsets=None, display=False):
312 def addrlistencode(ui, addrs, charsets=None, display=False):
312 '''Turns a list of addresses into a list of RFC-2047 compliant headers.
313 '''Turns a list of addresses into a list of RFC-2047 compliant headers.
313 A single element of input list may contain multiple addresses, but output
314 A single element of input list may contain multiple addresses, but output
314 always has one address per item'''
315 always has one address per item'''
315 if display:
316 if display:
316 return [a.strip() for a in addrs if a.strip()]
317 return [a.strip() for a in addrs if a.strip()]
317
318
318 result = []
319 result = []
319 for name, addr in email.Utils.getaddresses(addrs):
320 for name, addr in email.Utils.getaddresses(addrs):
320 if name or addr:
321 if name or addr:
321 result.append(_addressencode(ui, name, addr, charsets))
322 result.append(_addressencode(ui, name, addr, charsets))
322 return result
323 return result
323
324
324 def mimeencode(ui, s, charsets=None, display=False):
325 def mimeencode(ui, s, charsets=None, display=False):
325 '''creates mime text object, encodes it if needed, and sets
326 '''creates mime text object, encodes it if needed, and sets
326 charset and transfer-encoding accordingly.'''
327 charset and transfer-encoding accordingly.'''
327 cs = 'us-ascii'
328 cs = 'us-ascii'
328 if not display:
329 if not display:
329 s, cs = _encode(ui, s, charsets)
330 s, cs = _encode(ui, s, charsets)
330 return mimetextqp(s, 'plain', cs)
331 return mimetextqp(s, 'plain', cs)
@@ -1,334 +1,368 b''
1 #require serve
1 #require serve
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo foo>foo
5 $ echo foo>foo
6 $ hg commit -Am 1 -d '1 0'
6 $ hg commit -Am 1 -d '1 0'
7 adding foo
7 adding foo
8 $ echo bar>bar
8 $ echo bar>bar
9 $ hg commit -Am 2 -d '2 0'
9 $ hg commit -Am 2 -d '2 0'
10 adding bar
10 adding bar
11 $ mkdir baz
11 $ mkdir baz
12 $ echo bletch>baz/bletch
12 $ echo bletch>baz/bletch
13 $ hg commit -Am 3 -d '1000000000 0'
13 $ hg commit -Am 3 -d '1000000000 0'
14 adding baz/bletch
14 adding baz/bletch
15 $ hg init subrepo
16 $ touch subrepo/sub
17 $ hg -q -R subrepo ci -Am "init subrepo"
18 $ echo "subrepo = subrepo" > .hgsub
19 $ hg add .hgsub
20 $ hg ci -m "add subrepo"
15 $ echo "[web]" >> .hg/hgrc
21 $ echo "[web]" >> .hg/hgrc
16 $ echo "name = test-archive" >> .hg/hgrc
22 $ echo "name = test-archive" >> .hg/hgrc
23 $ echo "archivesubrepos = True" >> .hg/hgrc
17 $ cp .hg/hgrc .hg/hgrc-base
24 $ cp .hg/hgrc .hg/hgrc-base
18 > test_archtype() {
25 > test_archtype() {
19 > echo "allow_archive = $1" >> .hg/hgrc
26 > echo "allow_archive = $1" >> .hg/hgrc
20 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
27 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
21 > cat hg.pid >> $DAEMON_PIDS
28 > cat hg.pid >> $DAEMON_PIDS
22 > echo % $1 allowed should give 200
29 > echo % $1 allowed should give 200
23 > "$TESTDIR/get-with-headers.py" localhost:$HGPORT "archive/tip.$2" | head -n 1
30 > "$TESTDIR/get-with-headers.py" localhost:$HGPORT "archive/tip.$2" | head -n 1
24 > echo % $3 and $4 disallowed should both give 403
31 > echo % $3 and $4 disallowed should both give 403
25 > "$TESTDIR/get-with-headers.py" localhost:$HGPORT "archive/tip.$3" | head -n 1
32 > "$TESTDIR/get-with-headers.py" localhost:$HGPORT "archive/tip.$3" | head -n 1
26 > "$TESTDIR/get-with-headers.py" localhost:$HGPORT "archive/tip.$4" | head -n 1
33 > "$TESTDIR/get-with-headers.py" localhost:$HGPORT "archive/tip.$4" | head -n 1
27 > "$TESTDIR/killdaemons.py" $DAEMON_PIDS
34 > "$TESTDIR/killdaemons.py" $DAEMON_PIDS
28 > cat errors.log
35 > cat errors.log
29 > cp .hg/hgrc-base .hg/hgrc
36 > cp .hg/hgrc-base .hg/hgrc
30 > }
37 > }
31
38
32 check http return codes
39 check http return codes
33
40
34 $ test_archtype gz tar.gz tar.bz2 zip
41 $ test_archtype gz tar.gz tar.bz2 zip
35 % gz allowed should give 200
42 % gz allowed should give 200
36 200 Script output follows
43 200 Script output follows
37 % tar.bz2 and zip disallowed should both give 403
44 % tar.bz2 and zip disallowed should both give 403
38 403 Archive type not allowed: bz2
45 403 Archive type not allowed: bz2
39 403 Archive type not allowed: zip
46 403 Archive type not allowed: zip
40 $ test_archtype bz2 tar.bz2 zip tar.gz
47 $ test_archtype bz2 tar.bz2 zip tar.gz
41 % bz2 allowed should give 200
48 % bz2 allowed should give 200
42 200 Script output follows
49 200 Script output follows
43 % zip and tar.gz disallowed should both give 403
50 % zip and tar.gz disallowed should both give 403
44 403 Archive type not allowed: zip
51 403 Archive type not allowed: zip
45 403 Archive type not allowed: gz
52 403 Archive type not allowed: gz
46 $ test_archtype zip zip tar.gz tar.bz2
53 $ test_archtype zip zip tar.gz tar.bz2
47 % zip allowed should give 200
54 % zip allowed should give 200
48 200 Script output follows
55 200 Script output follows
49 % tar.gz and tar.bz2 disallowed should both give 403
56 % tar.gz and tar.bz2 disallowed should both give 403
50 403 Archive type not allowed: gz
57 403 Archive type not allowed: gz
51 403 Archive type not allowed: bz2
58 403 Archive type not allowed: bz2
52
59
53 $ echo "allow_archive = gz bz2 zip" >> .hg/hgrc
60 $ echo "allow_archive = gz bz2 zip" >> .hg/hgrc
54 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
61 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
55 $ cat hg.pid >> $DAEMON_PIDS
62 $ cat hg.pid >> $DAEMON_PIDS
56
63
57 invalid arch type should give 404
64 invalid arch type should give 404
58
65
59 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT "archive/tip.invalid" | head -n 1
66 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT "archive/tip.invalid" | head -n 1
60 404 Unsupported archive type: None
67 404 Unsupported archive type: None
61
68
62 $ TIP=`hg id -v | cut -f1 -d' '`
69 $ TIP=`hg id -v | cut -f1 -d' '`
63 $ QTIP=`hg id -q`
70 $ QTIP=`hg id -q`
64 $ cat > getarchive.py <<EOF
71 $ cat > getarchive.py <<EOF
65 > import os, sys, urllib2
72 > import os, sys, urllib2
66 > try:
73 > try:
67 > # Set stdout to binary mode for win32 platforms
74 > # Set stdout to binary mode for win32 platforms
68 > import msvcrt
75 > import msvcrt
69 > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
76 > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
70 > except ImportError:
77 > except ImportError:
71 > pass
78 > pass
72 > if len(sys.argv) <= 3:
79 > if len(sys.argv) <= 3:
73 > node, archive = sys.argv[1:]
80 > node, archive = sys.argv[1:]
74 > requeststr = 'cmd=archive;node=%s;type=%s' % (node, archive)
81 > requeststr = 'cmd=archive;node=%s;type=%s' % (node, archive)
75 > else:
82 > else:
76 > node, archive, file = sys.argv[1:]
83 > node, archive, file = sys.argv[1:]
77 > requeststr = 'cmd=archive;node=%s;type=%s;file=%s' % (node, archive, file)
84 > requeststr = 'cmd=archive;node=%s;type=%s;file=%s' % (node, archive, file)
78 > try:
85 > try:
79 > f = urllib2.urlopen('http://127.0.0.1:%s/?%s'
86 > f = urllib2.urlopen('http://127.0.0.1:%s/?%s'
80 > % (os.environ['HGPORT'], requeststr))
87 > % (os.environ['HGPORT'], requeststr))
81 > sys.stdout.write(f.read())
88 > sys.stdout.write(f.read())
82 > except urllib2.HTTPError, e:
89 > except urllib2.HTTPError, e:
83 > sys.stderr.write(str(e) + '\n')
90 > sys.stderr.write(str(e) + '\n')
84 > EOF
91 > EOF
85 $ python getarchive.py "$TIP" gz | gunzip | tar tf - 2>/dev/null
92 $ python getarchive.py "$TIP" gz | gunzip | tar tf - 2>/dev/null
86 test-archive-2c0277f05ed4/.hg_archival.txt
93 test-archive-1701ef1f1510/.hg_archival.txt
87 test-archive-2c0277f05ed4/bar
94 test-archive-1701ef1f1510/.hgsub
88 test-archive-2c0277f05ed4/baz/bletch
95 test-archive-1701ef1f1510/.hgsubstate
89 test-archive-2c0277f05ed4/foo
96 test-archive-1701ef1f1510/bar
97 test-archive-1701ef1f1510/baz/bletch
98 test-archive-1701ef1f1510/foo
99 test-archive-1701ef1f1510/subrepo/sub
90 $ python getarchive.py "$TIP" bz2 | bunzip2 | tar tf - 2>/dev/null
100 $ python getarchive.py "$TIP" bz2 | bunzip2 | tar tf - 2>/dev/null
91 test-archive-2c0277f05ed4/.hg_archival.txt
101 test-archive-1701ef1f1510/.hg_archival.txt
92 test-archive-2c0277f05ed4/bar
102 test-archive-1701ef1f1510/.hgsub
93 test-archive-2c0277f05ed4/baz/bletch
103 test-archive-1701ef1f1510/.hgsubstate
94 test-archive-2c0277f05ed4/foo
104 test-archive-1701ef1f1510/bar
105 test-archive-1701ef1f1510/baz/bletch
106 test-archive-1701ef1f1510/foo
107 test-archive-1701ef1f1510/subrepo/sub
95 $ python getarchive.py "$TIP" zip > archive.zip
108 $ python getarchive.py "$TIP" zip > archive.zip
96 $ unzip -t archive.zip
109 $ unzip -t archive.zip
97 Archive: archive.zip
110 Archive: archive.zip
98 testing: test-archive-2c0277f05ed4/.hg_archival.txt OK
111 testing: test-archive-1701ef1f1510/.hg_archival.txt OK
99 testing: test-archive-2c0277f05ed4/bar OK
112 testing: test-archive-1701ef1f1510/.hgsub OK
100 testing: test-archive-2c0277f05ed4/baz/bletch OK
113 testing: test-archive-1701ef1f1510/.hgsubstate OK
101 testing: test-archive-2c0277f05ed4/foo OK
114 testing: test-archive-1701ef1f1510/bar OK
115 testing: test-archive-1701ef1f1510/baz/bletch OK
116 testing: test-archive-1701ef1f1510/foo OK
117 testing: test-archive-1701ef1f1510/subrepo/sub OK
102 No errors detected in compressed data of archive.zip.
118 No errors detected in compressed data of archive.zip.
103
119
104 test that we can download single directories and files
120 test that we can download single directories and files
105
121
106 $ python getarchive.py "$TIP" gz baz | gunzip | tar tf - 2>/dev/null
122 $ python getarchive.py "$TIP" gz baz | gunzip | tar tf - 2>/dev/null
107 test-archive-2c0277f05ed4/baz/bletch
123 test-archive-1701ef1f1510/baz/bletch
108 $ python getarchive.py "$TIP" gz foo | gunzip | tar tf - 2>/dev/null
124 $ python getarchive.py "$TIP" gz foo | gunzip | tar tf - 2>/dev/null
109 test-archive-2c0277f05ed4/foo
125 test-archive-1701ef1f1510/foo
110
126
111 test that we detect file patterns that match no files
127 test that we detect file patterns that match no files
112
128
113 $ python getarchive.py "$TIP" gz foobar
129 $ python getarchive.py "$TIP" gz foobar
114 HTTP Error 404: file(s) not found: foobar
130 HTTP Error 404: file(s) not found: foobar
115
131
116 test that we reject unsafe patterns
132 test that we reject unsafe patterns
117
133
118 $ python getarchive.py "$TIP" gz relre:baz
134 $ python getarchive.py "$TIP" gz relre:baz
119 HTTP Error 404: file(s) not found: relre:baz
135 HTTP Error 404: file(s) not found: relre:baz
120
136
121 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
137 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
122
138
123 $ hg archive -t tar test.tar
139 $ hg archive -t tar test.tar
124 $ tar tf test.tar
140 $ tar tf test.tar
125 test/.hg_archival.txt
141 test/.hg_archival.txt
142 test/.hgsub
143 test/.hgsubstate
126 test/bar
144 test/bar
127 test/baz/bletch
145 test/baz/bletch
128 test/foo
146 test/foo
129
147
130 $ hg archive --debug -t tbz2 -X baz test.tar.bz2
148 $ hg archive --debug -t tbz2 -X baz test.tar.bz2
131 archiving: 0/2 files (0.00%)
149 archiving: 0/4 files (0.00%)
132 archiving: bar 1/2 files (50.00%)
150 archiving: .hgsub 1/4 files (25.00%)
133 archiving: foo 2/2 files (100.00%)
151 archiving: .hgsubstate 2/4 files (50.00%)
152 archiving: bar 3/4 files (75.00%)
153 archiving: foo 4/4 files (100.00%)
134 $ bunzip2 -dc test.tar.bz2 | tar tf - 2>/dev/null
154 $ bunzip2 -dc test.tar.bz2 | tar tf - 2>/dev/null
135 test/.hg_archival.txt
155 test/.hg_archival.txt
156 test/.hgsub
157 test/.hgsubstate
136 test/bar
158 test/bar
137 test/foo
159 test/foo
138
160
139 $ hg archive -t tgz -p %b-%h test-%h.tar.gz
161 $ hg archive -t tgz -p %b-%h test-%h.tar.gz
140 $ gzip -dc test-$QTIP.tar.gz | tar tf - 2>/dev/null
162 $ gzip -dc test-$QTIP.tar.gz | tar tf - 2>/dev/null
141 test-2c0277f05ed4/.hg_archival.txt
163 test-1701ef1f1510/.hg_archival.txt
142 test-2c0277f05ed4/bar
164 test-1701ef1f1510/.hgsub
143 test-2c0277f05ed4/baz/bletch
165 test-1701ef1f1510/.hgsubstate
144 test-2c0277f05ed4/foo
166 test-1701ef1f1510/bar
167 test-1701ef1f1510/baz/bletch
168 test-1701ef1f1510/foo
145
169
146 $ hg archive autodetected_test.tar
170 $ hg archive autodetected_test.tar
147 $ tar tf autodetected_test.tar
171 $ tar tf autodetected_test.tar
148 autodetected_test/.hg_archival.txt
172 autodetected_test/.hg_archival.txt
173 autodetected_test/.hgsub
174 autodetected_test/.hgsubstate
149 autodetected_test/bar
175 autodetected_test/bar
150 autodetected_test/baz/bletch
176 autodetected_test/baz/bletch
151 autodetected_test/foo
177 autodetected_test/foo
152
178
153 The '-t' should override autodetection
179 The '-t' should override autodetection
154
180
155 $ hg archive -t tar autodetect_override_test.zip
181 $ hg archive -t tar autodetect_override_test.zip
156 $ tar tf autodetect_override_test.zip
182 $ tar tf autodetect_override_test.zip
157 autodetect_override_test.zip/.hg_archival.txt
183 autodetect_override_test.zip/.hg_archival.txt
184 autodetect_override_test.zip/.hgsub
185 autodetect_override_test.zip/.hgsubstate
158 autodetect_override_test.zip/bar
186 autodetect_override_test.zip/bar
159 autodetect_override_test.zip/baz/bletch
187 autodetect_override_test.zip/baz/bletch
160 autodetect_override_test.zip/foo
188 autodetect_override_test.zip/foo
161
189
162 $ for ext in tar tar.gz tgz tar.bz2 tbz2 zip; do
190 $ for ext in tar tar.gz tgz tar.bz2 tbz2 zip; do
163 > hg archive auto_test.$ext
191 > hg archive auto_test.$ext
164 > if [ -d auto_test.$ext ]; then
192 > if [ -d auto_test.$ext ]; then
165 > echo "extension $ext was not autodetected."
193 > echo "extension $ext was not autodetected."
166 > fi
194 > fi
167 > done
195 > done
168
196
169 $ cat > md5comp.py <<EOF
197 $ cat > md5comp.py <<EOF
170 > try:
198 > try:
171 > from hashlib import md5
199 > from hashlib import md5
172 > except ImportError:
200 > except ImportError:
173 > from md5 import md5
201 > from md5 import md5
174 > import sys
202 > import sys
175 > f1, f2 = sys.argv[1:3]
203 > f1, f2 = sys.argv[1:3]
176 > h1 = md5(file(f1, 'rb').read()).hexdigest()
204 > h1 = md5(file(f1, 'rb').read()).hexdigest()
177 > h2 = md5(file(f2, 'rb').read()).hexdigest()
205 > h2 = md5(file(f2, 'rb').read()).hexdigest()
178 > print h1 == h2 or "md5 differ: " + repr((h1, h2))
206 > print h1 == h2 or "md5 differ: " + repr((h1, h2))
179 > EOF
207 > EOF
180
208
181 archive name is stored in the archive, so create similar archives and
209 archive name is stored in the archive, so create similar archives and
182 rename them afterwards.
210 rename them afterwards.
183
211
184 $ hg archive -t tgz tip.tar.gz
212 $ hg archive -t tgz tip.tar.gz
185 $ mv tip.tar.gz tip1.tar.gz
213 $ mv tip.tar.gz tip1.tar.gz
186 $ sleep 1
214 $ sleep 1
187 $ hg archive -t tgz tip.tar.gz
215 $ hg archive -t tgz tip.tar.gz
188 $ mv tip.tar.gz tip2.tar.gz
216 $ mv tip.tar.gz tip2.tar.gz
189 $ python md5comp.py tip1.tar.gz tip2.tar.gz
217 $ python md5comp.py tip1.tar.gz tip2.tar.gz
190 True
218 True
191
219
192 $ hg archive -t zip -p /illegal test.zip
220 $ hg archive -t zip -p /illegal test.zip
193 abort: archive prefix contains illegal components
221 abort: archive prefix contains illegal components
194 [255]
222 [255]
195 $ hg archive -t zip -p very/../bad test.zip
223 $ hg archive -t zip -p very/../bad test.zip
196
224
197 $ hg archive --config ui.archivemeta=false -t zip -r 2 test.zip
225 $ hg archive --config ui.archivemeta=false -t zip -r 2 test.zip
198 $ unzip -t test.zip
226 $ unzip -t test.zip
199 Archive: test.zip
227 Archive: test.zip
200 testing: test/bar OK
228 testing: test/bar OK
201 testing: test/baz/bletch OK
229 testing: test/baz/bletch OK
202 testing: test/foo OK
230 testing: test/foo OK
203 No errors detected in compressed data of test.zip.
231 No errors detected in compressed data of test.zip.
204
232
205 $ hg archive -t tar - | tar tf - 2>/dev/null
233 $ hg archive -t tar - | tar tf - 2>/dev/null
206 test-2c0277f05ed4/.hg_archival.txt
234 test-1701ef1f1510/.hg_archival.txt
207 test-2c0277f05ed4/bar
235 test-1701ef1f1510/.hgsub
208 test-2c0277f05ed4/baz/bletch
236 test-1701ef1f1510/.hgsubstate
209 test-2c0277f05ed4/foo
237 test-1701ef1f1510/bar
238 test-1701ef1f1510/baz/bletch
239 test-1701ef1f1510/foo
210
240
211 $ hg archive -r 0 -t tar rev-%r.tar
241 $ hg archive -r 0 -t tar rev-%r.tar
212 $ [ -f rev-0.tar ]
242 $ [ -f rev-0.tar ]
213
243
214 test .hg_archival.txt
244 test .hg_archival.txt
215
245
216 $ hg archive ../test-tags
246 $ hg archive ../test-tags
217 $ cat ../test-tags/.hg_archival.txt
247 $ cat ../test-tags/.hg_archival.txt
218 repo: daa7f7c60e0a224faa4ff77ca41b2760562af264
248 repo: daa7f7c60e0a224faa4ff77ca41b2760562af264
219 node: 2c0277f05ed49d1c8328fb9ba92fba7a5ebcb33e
249 node: 1701ef1f151069b8747038e93b5186bb43a47504
220 branch: default
250 branch: default
221 latesttag: null
251 latesttag: null
222 latesttagdistance: 3
252 latesttagdistance: 4
223 $ hg tag -r 2 mytag
253 $ hg tag -r 2 mytag
224 $ hg tag -r 2 anothertag
254 $ hg tag -r 2 anothertag
225 $ hg archive -r 2 ../test-lasttag
255 $ hg archive -r 2 ../test-lasttag
226 $ cat ../test-lasttag/.hg_archival.txt
256 $ cat ../test-lasttag/.hg_archival.txt
227 repo: daa7f7c60e0a224faa4ff77ca41b2760562af264
257 repo: daa7f7c60e0a224faa4ff77ca41b2760562af264
228 node: 2c0277f05ed49d1c8328fb9ba92fba7a5ebcb33e
258 node: 2c0277f05ed49d1c8328fb9ba92fba7a5ebcb33e
229 branch: default
259 branch: default
230 tag: anothertag
260 tag: anothertag
231 tag: mytag
261 tag: mytag
232
262
233 $ hg archive -t bogus test.bogus
263 $ hg archive -t bogus test.bogus
234 abort: unknown archive type 'bogus'
264 abort: unknown archive type 'bogus'
235 [255]
265 [255]
236
266
237 enable progress extension:
267 enable progress extension:
238
268
239 $ cp $HGRCPATH $HGRCPATH.no-progress
269 $ cp $HGRCPATH $HGRCPATH.no-progress
240 $ cat >> $HGRCPATH <<EOF
270 $ cat >> $HGRCPATH <<EOF
241 > [extensions]
271 > [extensions]
242 > progress =
272 > progress =
243 > [progress]
273 > [progress]
244 > assume-tty = 1
274 > assume-tty = 1
245 > format = topic bar number
275 > format = topic bar number
246 > delay = 0
276 > delay = 0
247 > refresh = 0
277 > refresh = 0
248 > width = 60
278 > width = 60
249 > EOF
279 > EOF
250
280
251 $ hg archive ../with-progress
281 $ hg archive ../with-progress
252 \r (no-eol) (esc)
282 \r (no-eol) (esc)
253 archiving [ ] 0/4\r (no-eol) (esc)
283 archiving [ ] 0/6\r (no-eol) (esc)
254 archiving [ ] 0/4\r (no-eol) (esc)
284 archiving [ ] 0/6\r (no-eol) (esc)
255 archiving [=========> ] 1/4\r (no-eol) (esc)
285 archiving [======> ] 1/6\r (no-eol) (esc)
256 archiving [=========> ] 1/4\r (no-eol) (esc)
286 archiving [======> ] 1/6\r (no-eol) (esc)
257 archiving [====================> ] 2/4\r (no-eol) (esc)
287 archiving [=============> ] 2/6\r (no-eol) (esc)
258 archiving [====================> ] 2/4\r (no-eol) (esc)
288 archiving [=============> ] 2/6\r (no-eol) (esc)
259 archiving [===============================> ] 3/4\r (no-eol) (esc)
289 archiving [====================> ] 3/6\r (no-eol) (esc)
260 archiving [===============================> ] 3/4\r (no-eol) (esc)
290 archiving [====================> ] 3/6\r (no-eol) (esc)
261 archiving [==========================================>] 4/4\r (no-eol) (esc)
291 archiving [===========================> ] 4/6\r (no-eol) (esc)
262 archiving [==========================================>] 4/4\r (no-eol) (esc)
292 archiving [===========================> ] 4/6\r (no-eol) (esc)
293 archiving [==================================> ] 5/6\r (no-eol) (esc)
294 archiving [==================================> ] 5/6\r (no-eol) (esc)
295 archiving [==========================================>] 6/6\r (no-eol) (esc)
296 archiving [==========================================>] 6/6\r (no-eol) (esc)
263 \r (no-eol) (esc)
297 \r (no-eol) (esc)
264
298
265 cleanup after progress extension test:
299 cleanup after progress extension test:
266
300
267 $ cp $HGRCPATH.no-progress $HGRCPATH
301 $ cp $HGRCPATH.no-progress $HGRCPATH
268
302
269 server errors
303 server errors
270
304
271 $ cat errors.log
305 $ cat errors.log
272
306
273 empty repo
307 empty repo
274
308
275 $ hg init ../empty
309 $ hg init ../empty
276 $ cd ../empty
310 $ cd ../empty
277 $ hg archive ../test-empty
311 $ hg archive ../test-empty
278 abort: no working directory: please specify a revision
312 abort: no working directory: please specify a revision
279 [255]
313 [255]
280
314
281 old file -- date clamped to 1980
315 old file -- date clamped to 1980
282
316
283 $ touch -t 197501010000 old
317 $ touch -t 197501010000 old
284 $ hg add old
318 $ hg add old
285 $ hg commit -m old
319 $ hg commit -m old
286 $ hg archive ../old.zip
320 $ hg archive ../old.zip
287 $ unzip -l ../old.zip
321 $ unzip -l ../old.zip
288 Archive: ../old.zip
322 Archive: ../old.zip
289 \s*Length.* (re)
323 \s*Length.* (re)
290 *-----* (glob)
324 *-----* (glob)
291 *147*80*00:00*old/.hg_archival.txt (glob)
325 *147*80*00:00*old/.hg_archival.txt (glob)
292 *0*80*00:00*old/old (glob)
326 *0*80*00:00*old/old (glob)
293 *-----* (glob)
327 *-----* (glob)
294 \s*147\s+2 files (re)
328 \s*147\s+2 files (re)
295
329
296 show an error when a provided pattern matches no files
330 show an error when a provided pattern matches no files
297
331
298 $ hg archive -I file_that_does_not_exist.foo ../empty.zip
332 $ hg archive -I file_that_does_not_exist.foo ../empty.zip
299 abort: no files match the archive pattern
333 abort: no files match the archive pattern
300 [255]
334 [255]
301
335
302 $ hg archive -X * ../empty.zip
336 $ hg archive -X * ../empty.zip
303 abort: no files match the archive pattern
337 abort: no files match the archive pattern
304 [255]
338 [255]
305
339
306 $ cd ..
340 $ cd ..
307
341
308 issue3600: check whether "hg archive" can create archive files which
342 issue3600: check whether "hg archive" can create archive files which
309 are extracted with expected timestamp, even though TZ is not
343 are extracted with expected timestamp, even though TZ is not
310 configured as GMT.
344 configured as GMT.
311
345
312 $ mkdir issue3600
346 $ mkdir issue3600
313 $ cd issue3600
347 $ cd issue3600
314
348
315 $ hg init repo
349 $ hg init repo
316 $ echo a > repo/a
350 $ echo a > repo/a
317 $ hg -R repo add repo/a
351 $ hg -R repo add repo/a
318 $ hg -R repo commit -m '#0' -d '456789012 21600'
352 $ hg -R repo commit -m '#0' -d '456789012 21600'
319 $ cat > show_mtime.py <<EOF
353 $ cat > show_mtime.py <<EOF
320 > import sys, os
354 > import sys, os
321 > print int(os.stat(sys.argv[1]).st_mtime)
355 > print int(os.stat(sys.argv[1]).st_mtime)
322 > EOF
356 > EOF
323
357
324 $ hg -R repo archive --prefix tar-extracted archive.tar
358 $ hg -R repo archive --prefix tar-extracted archive.tar
325 $ (TZ=UTC-3; export TZ; tar xf archive.tar)
359 $ (TZ=UTC-3; export TZ; tar xf archive.tar)
326 $ python show_mtime.py tar-extracted/a
360 $ python show_mtime.py tar-extracted/a
327 456789012
361 456789012
328
362
329 $ hg -R repo archive --prefix zip-extracted archive.zip
363 $ hg -R repo archive --prefix zip-extracted archive.zip
330 $ (TZ=UTC-3; export TZ; unzip -q archive.zip)
364 $ (TZ=UTC-3; export TZ; unzip -q archive.zip)
331 $ python show_mtime.py zip-extracted/a
365 $ python show_mtime.py zip-extracted/a
332 456789012
366 456789012
333
367
334 $ cd ..
368 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now