##// END OF EJS Templates
archive: change "matcnfn" argument to a real matcher...
Martin von Zweigbergk -
r40443:3d76a8e6 default
parent child Browse files
Show More
@@ -1,435 +1,435 b''
1 # extdiff.py - external diff program support for mercurial
1 # extdiff.py - external diff program support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to allow external programs to compare revisions
8 '''command to allow external programs to compare revisions
9
9
10 The extdiff Mercurial extension allows you to use external programs
10 The extdiff Mercurial extension allows you to use external programs
11 to compare revisions, or revision with working directory. The external
11 to compare revisions, or revision with working directory. The external
12 diff programs are called with a configurable set of options and two
12 diff programs are called with a configurable set of options and two
13 non-option arguments: paths to directories containing snapshots of
13 non-option arguments: paths to directories containing snapshots of
14 files to compare.
14 files to compare.
15
15
16 If there is more than one file being compared and the "child" revision
16 If there is more than one file being compared and the "child" revision
17 is the working directory, any modifications made in the external diff
17 is the working directory, any modifications made in the external diff
18 program will be copied back to the working directory from the temporary
18 program will be copied back to the working directory from the temporary
19 directory.
19 directory.
20
20
21 The extdiff extension also allows you to configure new diff commands, so
21 The extdiff extension also allows you to configure new diff commands, so
22 you do not need to type :hg:`extdiff -p kdiff3` always. ::
22 you do not need to type :hg:`extdiff -p kdiff3` always. ::
23
23
24 [extdiff]
24 [extdiff]
25 # add new command that runs GNU diff(1) in 'context diff' mode
25 # add new command that runs GNU diff(1) in 'context diff' mode
26 cdiff = gdiff -Nprc5
26 cdiff = gdiff -Nprc5
27 ## or the old way:
27 ## or the old way:
28 #cmd.cdiff = gdiff
28 #cmd.cdiff = gdiff
29 #opts.cdiff = -Nprc5
29 #opts.cdiff = -Nprc5
30
30
31 # add new command called meld, runs meld (no need to name twice). If
31 # add new command called meld, runs meld (no need to name twice). If
32 # the meld executable is not available, the meld tool in [merge-tools]
32 # the meld executable is not available, the meld tool in [merge-tools]
33 # will be used, if available
33 # will be used, if available
34 meld =
34 meld =
35
35
36 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
36 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
37 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
37 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
38 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
38 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
39 # your .vimrc
39 # your .vimrc
40 vimdiff = gvim -f "+next" \\
40 vimdiff = gvim -f "+next" \\
41 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
41 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
42
42
43 Tool arguments can include variables that are expanded at runtime::
43 Tool arguments can include variables that are expanded at runtime::
44
44
45 $parent1, $plabel1 - filename, descriptive label of first parent
45 $parent1, $plabel1 - filename, descriptive label of first parent
46 $child, $clabel - filename, descriptive label of child revision
46 $child, $clabel - filename, descriptive label of child revision
47 $parent2, $plabel2 - filename, descriptive label of second parent
47 $parent2, $plabel2 - filename, descriptive label of second parent
48 $root - repository root
48 $root - repository root
49 $parent is an alias for $parent1.
49 $parent is an alias for $parent1.
50
50
51 The extdiff extension will look in your [diff-tools] and [merge-tools]
51 The extdiff extension will look in your [diff-tools] and [merge-tools]
52 sections for diff tool arguments, when none are specified in [extdiff].
52 sections for diff tool arguments, when none are specified in [extdiff].
53
53
54 ::
54 ::
55
55
56 [extdiff]
56 [extdiff]
57 kdiff3 =
57 kdiff3 =
58
58
59 [diff-tools]
59 [diff-tools]
60 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
60 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
61
61
62 You can use -I/-X and list of file or directory names like normal
62 You can use -I/-X and list of file or directory names like normal
63 :hg:`diff` command. The extdiff extension makes snapshots of only
63 :hg:`diff` command. The extdiff extension makes snapshots of only
64 needed files, so running the external diff program will actually be
64 needed files, so running the external diff program will actually be
65 pretty fast (at least faster than having to compare the entire tree).
65 pretty fast (at least faster than having to compare the entire tree).
66 '''
66 '''
67
67
68 from __future__ import absolute_import
68 from __future__ import absolute_import
69
69
70 import os
70 import os
71 import re
71 import re
72 import shutil
72 import shutil
73 import stat
73 import stat
74
74
75 from mercurial.i18n import _
75 from mercurial.i18n import _
76 from mercurial.node import (
76 from mercurial.node import (
77 nullid,
77 nullid,
78 short,
78 short,
79 )
79 )
80 from mercurial import (
80 from mercurial import (
81 archival,
81 archival,
82 cmdutil,
82 cmdutil,
83 error,
83 error,
84 filemerge,
84 filemerge,
85 formatter,
85 formatter,
86 pycompat,
86 pycompat,
87 registrar,
87 registrar,
88 scmutil,
88 scmutil,
89 util,
89 util,
90 )
90 )
91 from mercurial.utils import (
91 from mercurial.utils import (
92 procutil,
92 procutil,
93 stringutil,
93 stringutil,
94 )
94 )
95
95
96 cmdtable = {}
96 cmdtable = {}
97 command = registrar.command(cmdtable)
97 command = registrar.command(cmdtable)
98
98
99 configtable = {}
99 configtable = {}
100 configitem = registrar.configitem(configtable)
100 configitem = registrar.configitem(configtable)
101
101
102 configitem('extdiff', br'opts\..*',
102 configitem('extdiff', br'opts\..*',
103 default='',
103 default='',
104 generic=True,
104 generic=True,
105 )
105 )
106
106
107 configitem('diff-tools', br'.*\.diffargs$',
107 configitem('diff-tools', br'.*\.diffargs$',
108 default=None,
108 default=None,
109 generic=True,
109 generic=True,
110 )
110 )
111
111
112 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
112 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
113 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
113 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
114 # be specifying the version(s) of Mercurial they are tested with, or
114 # be specifying the version(s) of Mercurial they are tested with, or
115 # leave the attribute unspecified.
115 # leave the attribute unspecified.
116 testedwith = 'ships-with-hg-core'
116 testedwith = 'ships-with-hg-core'
117
117
118 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
118 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
119 '''snapshot files as of some revision
119 '''snapshot files as of some revision
120 if not using snapshot, -I/-X does not work and recursive diff
120 if not using snapshot, -I/-X does not work and recursive diff
121 in tools like kdiff3 and meld displays too many files.'''
121 in tools like kdiff3 and meld displays too many files.'''
122 dirname = os.path.basename(repo.root)
122 dirname = os.path.basename(repo.root)
123 if dirname == "":
123 if dirname == "":
124 dirname = "root"
124 dirname = "root"
125 if node is not None:
125 if node is not None:
126 dirname = '%s.%s' % (dirname, short(node))
126 dirname = '%s.%s' % (dirname, short(node))
127 base = os.path.join(tmproot, dirname)
127 base = os.path.join(tmproot, dirname)
128 os.mkdir(base)
128 os.mkdir(base)
129 fnsandstat = []
129 fnsandstat = []
130
130
131 if node is not None:
131 if node is not None:
132 ui.note(_('making snapshot of %d files from rev %s\n') %
132 ui.note(_('making snapshot of %d files from rev %s\n') %
133 (len(files), short(node)))
133 (len(files), short(node)))
134 else:
134 else:
135 ui.note(_('making snapshot of %d files from working directory\n') %
135 ui.note(_('making snapshot of %d files from working directory\n') %
136 (len(files)))
136 (len(files)))
137
137
138 if files:
138 if files:
139 repo.ui.setconfig("ui", "archivemeta", False)
139 repo.ui.setconfig("ui", "archivemeta", False)
140
140
141 archival.archive(repo, base, node, 'files',
141 archival.archive(repo, base, node, 'files',
142 matchfn=scmutil.matchfiles(repo, files),
142 match=scmutil.matchfiles(repo, files),
143 subrepos=listsubrepos)
143 subrepos=listsubrepos)
144
144
145 for fn in sorted(files):
145 for fn in sorted(files):
146 wfn = util.pconvert(fn)
146 wfn = util.pconvert(fn)
147 ui.note(' %s\n' % wfn)
147 ui.note(' %s\n' % wfn)
148
148
149 if node is None:
149 if node is None:
150 dest = os.path.join(base, wfn)
150 dest = os.path.join(base, wfn)
151
151
152 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
152 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
153 return dirname, fnsandstat
153 return dirname, fnsandstat
154
154
155 def dodiff(ui, repo, cmdline, pats, opts):
155 def dodiff(ui, repo, cmdline, pats, opts):
156 '''Do the actual diff:
156 '''Do the actual diff:
157
157
158 - copy to a temp structure if diffing 2 internal revisions
158 - copy to a temp structure if diffing 2 internal revisions
159 - copy to a temp structure if diffing working revision with
159 - copy to a temp structure if diffing working revision with
160 another one and more than 1 file is changed
160 another one and more than 1 file is changed
161 - just invoke the diff for a single file in the working dir
161 - just invoke the diff for a single file in the working dir
162 '''
162 '''
163
163
164 revs = opts.get('rev')
164 revs = opts.get('rev')
165 change = opts.get('change')
165 change = opts.get('change')
166 do3way = '$parent2' in cmdline
166 do3way = '$parent2' in cmdline
167
167
168 if revs and change:
168 if revs and change:
169 msg = _('cannot specify --rev and --change at the same time')
169 msg = _('cannot specify --rev and --change at the same time')
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 elif change:
171 elif change:
172 ctx2 = scmutil.revsingle(repo, change, None)
172 ctx2 = scmutil.revsingle(repo, change, None)
173 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
173 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
174 else:
174 else:
175 ctx1a, ctx2 = scmutil.revpair(repo, revs)
175 ctx1a, ctx2 = scmutil.revpair(repo, revs)
176 if not revs:
176 if not revs:
177 ctx1b = repo[None].p2()
177 ctx1b = repo[None].p2()
178 else:
178 else:
179 ctx1b = repo[nullid]
179 ctx1b = repo[nullid]
180
180
181 node1a = ctx1a.node()
181 node1a = ctx1a.node()
182 node1b = ctx1b.node()
182 node1b = ctx1b.node()
183 node2 = ctx2.node()
183 node2 = ctx2.node()
184
184
185 # Disable 3-way merge if there is only one parent
185 # Disable 3-way merge if there is only one parent
186 if do3way:
186 if do3way:
187 if node1b == nullid:
187 if node1b == nullid:
188 do3way = False
188 do3way = False
189
189
190 subrepos=opts.get('subrepos')
190 subrepos=opts.get('subrepos')
191
191
192 matcher = scmutil.match(repo[node2], pats, opts)
192 matcher = scmutil.match(repo[node2], pats, opts)
193
193
194 if opts.get('patch'):
194 if opts.get('patch'):
195 if subrepos:
195 if subrepos:
196 raise error.Abort(_('--patch cannot be used with --subrepos'))
196 raise error.Abort(_('--patch cannot be used with --subrepos'))
197 if node2 is None:
197 if node2 is None:
198 raise error.Abort(_('--patch requires two revisions'))
198 raise error.Abort(_('--patch requires two revisions'))
199 else:
199 else:
200 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
200 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
201 listsubrepos=subrepos)[:3])
201 listsubrepos=subrepos)[:3])
202 if do3way:
202 if do3way:
203 mod_b, add_b, rem_b = map(set,
203 mod_b, add_b, rem_b = map(set,
204 repo.status(node1b, node2, matcher,
204 repo.status(node1b, node2, matcher,
205 listsubrepos=subrepos)[:3])
205 listsubrepos=subrepos)[:3])
206 else:
206 else:
207 mod_b, add_b, rem_b = set(), set(), set()
207 mod_b, add_b, rem_b = set(), set(), set()
208 modadd = mod_a | add_a | mod_b | add_b
208 modadd = mod_a | add_a | mod_b | add_b
209 common = modadd | rem_a | rem_b
209 common = modadd | rem_a | rem_b
210 if not common:
210 if not common:
211 return 0
211 return 0
212
212
213 tmproot = pycompat.mkdtemp(prefix='extdiff.')
213 tmproot = pycompat.mkdtemp(prefix='extdiff.')
214 try:
214 try:
215 if not opts.get('patch'):
215 if not opts.get('patch'):
216 # Always make a copy of node1a (and node1b, if applicable)
216 # Always make a copy of node1a (and node1b, if applicable)
217 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
217 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
218 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
218 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
219 subrepos)[0]
219 subrepos)[0]
220 rev1a = '@%d' % repo[node1a].rev()
220 rev1a = '@%d' % repo[node1a].rev()
221 if do3way:
221 if do3way:
222 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
222 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
223 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
223 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
224 subrepos)[0]
224 subrepos)[0]
225 rev1b = '@%d' % repo[node1b].rev()
225 rev1b = '@%d' % repo[node1b].rev()
226 else:
226 else:
227 dir1b = None
227 dir1b = None
228 rev1b = ''
228 rev1b = ''
229
229
230 fnsandstat = []
230 fnsandstat = []
231
231
232 # If node2 in not the wc or there is >1 change, copy it
232 # If node2 in not the wc or there is >1 change, copy it
233 dir2root = ''
233 dir2root = ''
234 rev2 = ''
234 rev2 = ''
235 if node2:
235 if node2:
236 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
236 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
237 rev2 = '@%d' % repo[node2].rev()
237 rev2 = '@%d' % repo[node2].rev()
238 elif len(common) > 1:
238 elif len(common) > 1:
239 #we only actually need to get the files to copy back to
239 #we only actually need to get the files to copy back to
240 #the working dir in this case (because the other cases
240 #the working dir in this case (because the other cases
241 #are: diffing 2 revisions or single file -- in which case
241 #are: diffing 2 revisions or single file -- in which case
242 #the file is already directly passed to the diff tool).
242 #the file is already directly passed to the diff tool).
243 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot,
243 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot,
244 subrepos)
244 subrepos)
245 else:
245 else:
246 # This lets the diff tool open the changed file directly
246 # This lets the diff tool open the changed file directly
247 dir2 = ''
247 dir2 = ''
248 dir2root = repo.root
248 dir2root = repo.root
249
249
250 label1a = rev1a
250 label1a = rev1a
251 label1b = rev1b
251 label1b = rev1b
252 label2 = rev2
252 label2 = rev2
253
253
254 # If only one change, diff the files instead of the directories
254 # If only one change, diff the files instead of the directories
255 # Handle bogus modifies correctly by checking if the files exist
255 # Handle bogus modifies correctly by checking if the files exist
256 if len(common) == 1:
256 if len(common) == 1:
257 common_file = util.localpath(common.pop())
257 common_file = util.localpath(common.pop())
258 dir1a = os.path.join(tmproot, dir1a, common_file)
258 dir1a = os.path.join(tmproot, dir1a, common_file)
259 label1a = common_file + rev1a
259 label1a = common_file + rev1a
260 if not os.path.isfile(dir1a):
260 if not os.path.isfile(dir1a):
261 dir1a = os.devnull
261 dir1a = os.devnull
262 if do3way:
262 if do3way:
263 dir1b = os.path.join(tmproot, dir1b, common_file)
263 dir1b = os.path.join(tmproot, dir1b, common_file)
264 label1b = common_file + rev1b
264 label1b = common_file + rev1b
265 if not os.path.isfile(dir1b):
265 if not os.path.isfile(dir1b):
266 dir1b = os.devnull
266 dir1b = os.devnull
267 dir2 = os.path.join(dir2root, dir2, common_file)
267 dir2 = os.path.join(dir2root, dir2, common_file)
268 label2 = common_file + rev2
268 label2 = common_file + rev2
269 else:
269 else:
270 template = 'hg-%h.patch'
270 template = 'hg-%h.patch'
271 with formatter.nullformatter(ui, 'extdiff', {}) as fm:
271 with formatter.nullformatter(ui, 'extdiff', {}) as fm:
272 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
272 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
273 fm,
273 fm,
274 fntemplate=repo.vfs.reljoin(tmproot, template),
274 fntemplate=repo.vfs.reljoin(tmproot, template),
275 match=matcher)
275 match=matcher)
276 label1a = cmdutil.makefilename(repo[node1a], template)
276 label1a = cmdutil.makefilename(repo[node1a], template)
277 label2 = cmdutil.makefilename(repo[node2], template)
277 label2 = cmdutil.makefilename(repo[node2], template)
278 dir1a = repo.vfs.reljoin(tmproot, label1a)
278 dir1a = repo.vfs.reljoin(tmproot, label1a)
279 dir2 = repo.vfs.reljoin(tmproot, label2)
279 dir2 = repo.vfs.reljoin(tmproot, label2)
280 dir1b = None
280 dir1b = None
281 label1b = None
281 label1b = None
282 fnsandstat = []
282 fnsandstat = []
283
283
284 # Function to quote file/dir names in the argument string.
284 # Function to quote file/dir names in the argument string.
285 # When not operating in 3-way mode, an empty string is
285 # When not operating in 3-way mode, an empty string is
286 # returned for parent2
286 # returned for parent2
287 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
287 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
288 'plabel1': label1a, 'plabel2': label1b,
288 'plabel1': label1a, 'plabel2': label1b,
289 'clabel': label2, 'child': dir2,
289 'clabel': label2, 'child': dir2,
290 'root': repo.root}
290 'root': repo.root}
291 def quote(match):
291 def quote(match):
292 pre = match.group(2)
292 pre = match.group(2)
293 key = match.group(3)
293 key = match.group(3)
294 if not do3way and key == 'parent2':
294 if not do3way and key == 'parent2':
295 return pre
295 return pre
296 return pre + procutil.shellquote(replace[key])
296 return pre + procutil.shellquote(replace[key])
297
297
298 # Match parent2 first, so 'parent1?' will match both parent1 and parent
298 # Match parent2 first, so 'parent1?' will match both parent1 and parent
299 regex = (br'''(['"]?)([^\s'"$]*)'''
299 regex = (br'''(['"]?)([^\s'"$]*)'''
300 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
300 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
301 if not do3way and not re.search(regex, cmdline):
301 if not do3way and not re.search(regex, cmdline):
302 cmdline += ' $parent1 $child'
302 cmdline += ' $parent1 $child'
303 cmdline = re.sub(regex, quote, cmdline)
303 cmdline = re.sub(regex, quote, cmdline)
304
304
305 ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
305 ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
306 ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
306 ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
307
307
308 for copy_fn, working_fn, st in fnsandstat:
308 for copy_fn, working_fn, st in fnsandstat:
309 cpstat = os.lstat(copy_fn)
309 cpstat = os.lstat(copy_fn)
310 # Some tools copy the file and attributes, so mtime may not detect
310 # Some tools copy the file and attributes, so mtime may not detect
311 # all changes. A size check will detect more cases, but not all.
311 # all changes. A size check will detect more cases, but not all.
312 # The only certain way to detect every case is to diff all files,
312 # The only certain way to detect every case is to diff all files,
313 # which could be expensive.
313 # which could be expensive.
314 # copyfile() carries over the permission, so the mode check could
314 # copyfile() carries over the permission, so the mode check could
315 # be in an 'elif' branch, but for the case where the file has
315 # be in an 'elif' branch, but for the case where the file has
316 # changed without affecting mtime or size.
316 # changed without affecting mtime or size.
317 if (cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
317 if (cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
318 or cpstat.st_size != st.st_size
318 or cpstat.st_size != st.st_size
319 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
319 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
320 ui.debug('file changed while diffing. '
320 ui.debug('file changed while diffing. '
321 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
321 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
322 util.copyfile(copy_fn, working_fn)
322 util.copyfile(copy_fn, working_fn)
323
323
324 return 1
324 return 1
325 finally:
325 finally:
326 ui.note(_('cleaning up temp directory\n'))
326 ui.note(_('cleaning up temp directory\n'))
327 shutil.rmtree(tmproot)
327 shutil.rmtree(tmproot)
328
328
329 extdiffopts = [
329 extdiffopts = [
330 ('o', 'option', [],
330 ('o', 'option', [],
331 _('pass option to comparison program'), _('OPT')),
331 _('pass option to comparison program'), _('OPT')),
332 ('r', 'rev', [], _('revision'), _('REV')),
332 ('r', 'rev', [], _('revision'), _('REV')),
333 ('c', 'change', '', _('change made by revision'), _('REV')),
333 ('c', 'change', '', _('change made by revision'), _('REV')),
334 ('', 'patch', None, _('compare patches for two revisions'))
334 ('', 'patch', None, _('compare patches for two revisions'))
335 ] + cmdutil.walkopts + cmdutil.subrepoopts
335 ] + cmdutil.walkopts + cmdutil.subrepoopts
336
336
337 @command('extdiff',
337 @command('extdiff',
338 [('p', 'program', '', _('comparison program to run'), _('CMD')),
338 [('p', 'program', '', _('comparison program to run'), _('CMD')),
339 ] + extdiffopts,
339 ] + extdiffopts,
340 _('hg extdiff [OPT]... [FILE]...'),
340 _('hg extdiff [OPT]... [FILE]...'),
341 helpcategory=command.CATEGORY_FILE_CONTENTS,
341 helpcategory=command.CATEGORY_FILE_CONTENTS,
342 inferrepo=True)
342 inferrepo=True)
343 def extdiff(ui, repo, *pats, **opts):
343 def extdiff(ui, repo, *pats, **opts):
344 '''use external program to diff repository (or selected files)
344 '''use external program to diff repository (or selected files)
345
345
346 Show differences between revisions for the specified files, using
346 Show differences between revisions for the specified files, using
347 an external program. The default program used is diff, with
347 an external program. The default program used is diff, with
348 default options "-Npru".
348 default options "-Npru".
349
349
350 To select a different program, use the -p/--program option. The
350 To select a different program, use the -p/--program option. The
351 program will be passed the names of two directories to compare. To
351 program will be passed the names of two directories to compare. To
352 pass additional options to the program, use -o/--option. These
352 pass additional options to the program, use -o/--option. These
353 will be passed before the names of the directories to compare.
353 will be passed before the names of the directories to compare.
354
354
355 When two revision arguments are given, then changes are shown
355 When two revision arguments are given, then changes are shown
356 between those revisions. If only one revision is specified then
356 between those revisions. If only one revision is specified then
357 that revision is compared to the working directory, and, when no
357 that revision is compared to the working directory, and, when no
358 revisions are specified, the working directory files are compared
358 revisions are specified, the working directory files are compared
359 to its parent.'''
359 to its parent.'''
360 opts = pycompat.byteskwargs(opts)
360 opts = pycompat.byteskwargs(opts)
361 program = opts.get('program')
361 program = opts.get('program')
362 option = opts.get('option')
362 option = opts.get('option')
363 if not program:
363 if not program:
364 program = 'diff'
364 program = 'diff'
365 option = option or ['-Npru']
365 option = option or ['-Npru']
366 cmdline = ' '.join(map(procutil.shellquote, [program] + option))
366 cmdline = ' '.join(map(procutil.shellquote, [program] + option))
367 return dodiff(ui, repo, cmdline, pats, opts)
367 return dodiff(ui, repo, cmdline, pats, opts)
368
368
369 class savedcmd(object):
369 class savedcmd(object):
370 """use external program to diff repository (or selected files)
370 """use external program to diff repository (or selected files)
371
371
372 Show differences between revisions for the specified files, using
372 Show differences between revisions for the specified files, using
373 the following program::
373 the following program::
374
374
375 %(path)s
375 %(path)s
376
376
377 When two revision arguments are given, then changes are shown
377 When two revision arguments are given, then changes are shown
378 between those revisions. If only one revision is specified then
378 between those revisions. If only one revision is specified then
379 that revision is compared to the working directory, and, when no
379 that revision is compared to the working directory, and, when no
380 revisions are specified, the working directory files are compared
380 revisions are specified, the working directory files are compared
381 to its parent.
381 to its parent.
382 """
382 """
383
383
384 def __init__(self, path, cmdline):
384 def __init__(self, path, cmdline):
385 # We can't pass non-ASCII through docstrings (and path is
385 # We can't pass non-ASCII through docstrings (and path is
386 # in an unknown encoding anyway)
386 # in an unknown encoding anyway)
387 docpath = stringutil.escapestr(path)
387 docpath = stringutil.escapestr(path)
388 self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))}
388 self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))}
389 self._cmdline = cmdline
389 self._cmdline = cmdline
390
390
391 def __call__(self, ui, repo, *pats, **opts):
391 def __call__(self, ui, repo, *pats, **opts):
392 opts = pycompat.byteskwargs(opts)
392 opts = pycompat.byteskwargs(opts)
393 options = ' '.join(map(procutil.shellquote, opts['option']))
393 options = ' '.join(map(procutil.shellquote, opts['option']))
394 if options:
394 if options:
395 options = ' ' + options
395 options = ' ' + options
396 return dodiff(ui, repo, self._cmdline + options, pats, opts)
396 return dodiff(ui, repo, self._cmdline + options, pats, opts)
397
397
398 def uisetup(ui):
398 def uisetup(ui):
399 for cmd, path in ui.configitems('extdiff'):
399 for cmd, path in ui.configitems('extdiff'):
400 path = util.expandpath(path)
400 path = util.expandpath(path)
401 if cmd.startswith('cmd.'):
401 if cmd.startswith('cmd.'):
402 cmd = cmd[4:]
402 cmd = cmd[4:]
403 if not path:
403 if not path:
404 path = procutil.findexe(cmd)
404 path = procutil.findexe(cmd)
405 if path is None:
405 if path is None:
406 path = filemerge.findexternaltool(ui, cmd) or cmd
406 path = filemerge.findexternaltool(ui, cmd) or cmd
407 diffopts = ui.config('extdiff', 'opts.' + cmd)
407 diffopts = ui.config('extdiff', 'opts.' + cmd)
408 cmdline = procutil.shellquote(path)
408 cmdline = procutil.shellquote(path)
409 if diffopts:
409 if diffopts:
410 cmdline += ' ' + diffopts
410 cmdline += ' ' + diffopts
411 elif cmd.startswith('opts.'):
411 elif cmd.startswith('opts.'):
412 continue
412 continue
413 else:
413 else:
414 if path:
414 if path:
415 # case "cmd = path opts"
415 # case "cmd = path opts"
416 cmdline = path
416 cmdline = path
417 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
417 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
418 else:
418 else:
419 # case "cmd ="
419 # case "cmd ="
420 path = procutil.findexe(cmd)
420 path = procutil.findexe(cmd)
421 if path is None:
421 if path is None:
422 path = filemerge.findexternaltool(ui, cmd) or cmd
422 path = filemerge.findexternaltool(ui, cmd) or cmd
423 cmdline = procutil.shellquote(path)
423 cmdline = procutil.shellquote(path)
424 diffopts = False
424 diffopts = False
425 # look for diff arguments in [diff-tools] then [merge-tools]
425 # look for diff arguments in [diff-tools] then [merge-tools]
426 if not diffopts:
426 if not diffopts:
427 args = ui.config('diff-tools', cmd+'.diffargs') or \
427 args = ui.config('diff-tools', cmd+'.diffargs') or \
428 ui.config('merge-tools', cmd+'.diffargs')
428 ui.config('merge-tools', cmd+'.diffargs')
429 if args:
429 if args:
430 cmdline += ' ' + args
430 cmdline += ' ' + args
431 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
431 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
432 inferrepo=True)(savedcmd(path, cmdline))
432 inferrepo=True)(savedcmd(path, cmdline))
433
433
434 # tell hggettext to extract docstrings from these functions:
434 # tell hggettext to extract docstrings from these functions:
435 i18nfunctions = [savedcmd]
435 i18nfunctions = [savedcmd]
@@ -1,1484 +1,1484 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial import (
17 from mercurial import (
18 archival,
18 archival,
19 cmdutil,
19 cmdutil,
20 error,
20 error,
21 hg,
21 hg,
22 logcmdutil,
22 logcmdutil,
23 match as matchmod,
23 match as matchmod,
24 pathutil,
24 pathutil,
25 pycompat,
25 pycompat,
26 registrar,
26 registrar,
27 scmutil,
27 scmutil,
28 smartset,
28 smartset,
29 util,
29 util,
30 )
30 )
31
31
32 from . import (
32 from . import (
33 lfcommands,
33 lfcommands,
34 lfutil,
34 lfutil,
35 storefactory,
35 storefactory,
36 )
36 )
37
37
38 # -- Utility functions: commonly/repeatedly needed functionality ---------------
38 # -- Utility functions: commonly/repeatedly needed functionality ---------------
39
39
40 def composelargefilematcher(match, manifest):
40 def composelargefilematcher(match, manifest):
41 '''create a matcher that matches only the largefiles in the original
41 '''create a matcher that matches only the largefiles in the original
42 matcher'''
42 matcher'''
43 m = copy.copy(match)
43 m = copy.copy(match)
44 lfile = lambda f: lfutil.standin(f) in manifest
44 lfile = lambda f: lfutil.standin(f) in manifest
45 m._files = [lf for lf in m._files if lfile(lf)]
45 m._files = [lf for lf in m._files if lfile(lf)]
46 m._fileset = set(m._files)
46 m._fileset = set(m._files)
47 m.always = lambda: False
47 m.always = lambda: False
48 origmatchfn = m.matchfn
48 origmatchfn = m.matchfn
49 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
49 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
50 return m
50 return m
51
51
52 def composenormalfilematcher(match, manifest, exclude=None):
52 def composenormalfilematcher(match, manifest, exclude=None):
53 excluded = set()
53 excluded = set()
54 if exclude is not None:
54 if exclude is not None:
55 excluded.update(exclude)
55 excluded.update(exclude)
56
56
57 m = copy.copy(match)
57 m = copy.copy(match)
58 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
58 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
59 manifest or f in excluded)
59 manifest or f in excluded)
60 m._files = [lf for lf in m._files if notlfile(lf)]
60 m._files = [lf for lf in m._files if notlfile(lf)]
61 m._fileset = set(m._files)
61 m._fileset = set(m._files)
62 m.always = lambda: False
62 m.always = lambda: False
63 origmatchfn = m.matchfn
63 origmatchfn = m.matchfn
64 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
64 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
65 return m
65 return m
66
66
67 def installnormalfilesmatchfn(manifest):
67 def installnormalfilesmatchfn(manifest):
68 '''installmatchfn with a matchfn that ignores all largefiles'''
68 '''installmatchfn with a matchfn that ignores all largefiles'''
69 def overridematch(ctx, pats=(), opts=None, globbed=False,
69 def overridematch(ctx, pats=(), opts=None, globbed=False,
70 default='relpath', badfn=None):
70 default='relpath', badfn=None):
71 if opts is None:
71 if opts is None:
72 opts = {}
72 opts = {}
73 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
73 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
74 return composenormalfilematcher(match, manifest)
74 return composenormalfilematcher(match, manifest)
75 oldmatch = installmatchfn(overridematch)
75 oldmatch = installmatchfn(overridematch)
76
76
77 def installmatchfn(f):
77 def installmatchfn(f):
78 '''monkey patch the scmutil module with a custom match function.
78 '''monkey patch the scmutil module with a custom match function.
79 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
79 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
80 oldmatch = scmutil.match
80 oldmatch = scmutil.match
81 setattr(f, 'oldmatch', oldmatch)
81 setattr(f, 'oldmatch', oldmatch)
82 scmutil.match = f
82 scmutil.match = f
83 return oldmatch
83 return oldmatch
84
84
85 def restorematchfn():
85 def restorematchfn():
86 '''restores scmutil.match to what it was before installmatchfn
86 '''restores scmutil.match to what it was before installmatchfn
87 was called. no-op if scmutil.match is its original function.
87 was called. no-op if scmutil.match is its original function.
88
88
89 Note that n calls to installmatchfn will require n calls to
89 Note that n calls to installmatchfn will require n calls to
90 restore the original matchfn.'''
90 restore the original matchfn.'''
91 scmutil.match = getattr(scmutil.match, 'oldmatch')
91 scmutil.match = getattr(scmutil.match, 'oldmatch')
92
92
93 def installmatchandpatsfn(f):
93 def installmatchandpatsfn(f):
94 oldmatchandpats = scmutil.matchandpats
94 oldmatchandpats = scmutil.matchandpats
95 setattr(f, 'oldmatchandpats', oldmatchandpats)
95 setattr(f, 'oldmatchandpats', oldmatchandpats)
96 scmutil.matchandpats = f
96 scmutil.matchandpats = f
97 return oldmatchandpats
97 return oldmatchandpats
98
98
99 def restorematchandpatsfn():
99 def restorematchandpatsfn():
100 '''restores scmutil.matchandpats to what it was before
100 '''restores scmutil.matchandpats to what it was before
101 installmatchandpatsfn was called. No-op if scmutil.matchandpats
101 installmatchandpatsfn was called. No-op if scmutil.matchandpats
102 is its original function.
102 is its original function.
103
103
104 Note that n calls to installmatchandpatsfn will require n calls
104 Note that n calls to installmatchandpatsfn will require n calls
105 to restore the original matchfn.'''
105 to restore the original matchfn.'''
106 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
106 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
107 scmutil.matchandpats)
107 scmutil.matchandpats)
108
108
109 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
109 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
110 large = opts.get(r'large')
110 large = opts.get(r'large')
111 lfsize = lfutil.getminsize(
111 lfsize = lfutil.getminsize(
112 ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize'))
112 ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize'))
113
113
114 lfmatcher = None
114 lfmatcher = None
115 if lfutil.islfilesrepo(repo):
115 if lfutil.islfilesrepo(repo):
116 lfpats = ui.configlist(lfutil.longname, 'patterns')
116 lfpats = ui.configlist(lfutil.longname, 'patterns')
117 if lfpats:
117 if lfpats:
118 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
118 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
119
119
120 lfnames = []
120 lfnames = []
121 m = matcher
121 m = matcher
122
122
123 wctx = repo[None]
123 wctx = repo[None]
124 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
124 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
125 exact = m.exact(f)
125 exact = m.exact(f)
126 lfile = lfutil.standin(f) in wctx
126 lfile = lfutil.standin(f) in wctx
127 nfile = f in wctx
127 nfile = f in wctx
128 exists = lfile or nfile
128 exists = lfile or nfile
129
129
130 # addremove in core gets fancy with the name, add doesn't
130 # addremove in core gets fancy with the name, add doesn't
131 if isaddremove:
131 if isaddremove:
132 name = m.uipath(f)
132 name = m.uipath(f)
133 else:
133 else:
134 name = m.rel(f)
134 name = m.rel(f)
135
135
136 # Don't warn the user when they attempt to add a normal tracked file.
136 # Don't warn the user when they attempt to add a normal tracked file.
137 # The normal add code will do that for us.
137 # The normal add code will do that for us.
138 if exact and exists:
138 if exact and exists:
139 if lfile:
139 if lfile:
140 ui.warn(_('%s already a largefile\n') % name)
140 ui.warn(_('%s already a largefile\n') % name)
141 continue
141 continue
142
142
143 if (exact or not exists) and not lfutil.isstandin(f):
143 if (exact or not exists) and not lfutil.isstandin(f):
144 # In case the file was removed previously, but not committed
144 # In case the file was removed previously, but not committed
145 # (issue3507)
145 # (issue3507)
146 if not repo.wvfs.exists(f):
146 if not repo.wvfs.exists(f):
147 continue
147 continue
148
148
149 abovemin = (lfsize and
149 abovemin = (lfsize and
150 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
150 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
151 if large or abovemin or (lfmatcher and lfmatcher(f)):
151 if large or abovemin or (lfmatcher and lfmatcher(f)):
152 lfnames.append(f)
152 lfnames.append(f)
153 if ui.verbose or not exact:
153 if ui.verbose or not exact:
154 ui.status(_('adding %s as a largefile\n') % name)
154 ui.status(_('adding %s as a largefile\n') % name)
155
155
156 bad = []
156 bad = []
157
157
158 # Need to lock, otherwise there could be a race condition between
158 # Need to lock, otherwise there could be a race condition between
159 # when standins are created and added to the repo.
159 # when standins are created and added to the repo.
160 with repo.wlock():
160 with repo.wlock():
161 if not opts.get(r'dry_run'):
161 if not opts.get(r'dry_run'):
162 standins = []
162 standins = []
163 lfdirstate = lfutil.openlfdirstate(ui, repo)
163 lfdirstate = lfutil.openlfdirstate(ui, repo)
164 for f in lfnames:
164 for f in lfnames:
165 standinname = lfutil.standin(f)
165 standinname = lfutil.standin(f)
166 lfutil.writestandin(repo, standinname, hash='',
166 lfutil.writestandin(repo, standinname, hash='',
167 executable=lfutil.getexecutable(repo.wjoin(f)))
167 executable=lfutil.getexecutable(repo.wjoin(f)))
168 standins.append(standinname)
168 standins.append(standinname)
169 if lfdirstate[f] == 'r':
169 if lfdirstate[f] == 'r':
170 lfdirstate.normallookup(f)
170 lfdirstate.normallookup(f)
171 else:
171 else:
172 lfdirstate.add(f)
172 lfdirstate.add(f)
173 lfdirstate.write()
173 lfdirstate.write()
174 bad += [lfutil.splitstandin(f)
174 bad += [lfutil.splitstandin(f)
175 for f in repo[None].add(standins)
175 for f in repo[None].add(standins)
176 if f in m.files()]
176 if f in m.files()]
177
177
178 added = [f for f in lfnames if f not in bad]
178 added = [f for f in lfnames if f not in bad]
179 return added, bad
179 return added, bad
180
180
181 def removelargefiles(ui, repo, isaddremove, matcher, dryrun, **opts):
181 def removelargefiles(ui, repo, isaddremove, matcher, dryrun, **opts):
182 after = opts.get(r'after')
182 after = opts.get(r'after')
183 m = composelargefilematcher(matcher, repo[None].manifest())
183 m = composelargefilematcher(matcher, repo[None].manifest())
184 try:
184 try:
185 repo.lfstatus = True
185 repo.lfstatus = True
186 s = repo.status(match=m, clean=not isaddremove)
186 s = repo.status(match=m, clean=not isaddremove)
187 finally:
187 finally:
188 repo.lfstatus = False
188 repo.lfstatus = False
189 manifest = repo[None].manifest()
189 manifest = repo[None].manifest()
190 modified, added, deleted, clean = [[f for f in list
190 modified, added, deleted, clean = [[f for f in list
191 if lfutil.standin(f) in manifest]
191 if lfutil.standin(f) in manifest]
192 for list in (s.modified, s.added,
192 for list in (s.modified, s.added,
193 s.deleted, s.clean)]
193 s.deleted, s.clean)]
194
194
195 def warn(files, msg):
195 def warn(files, msg):
196 for f in files:
196 for f in files:
197 ui.warn(msg % m.rel(f))
197 ui.warn(msg % m.rel(f))
198 return int(len(files) > 0)
198 return int(len(files) > 0)
199
199
200 result = 0
200 result = 0
201
201
202 if after:
202 if after:
203 remove = deleted
203 remove = deleted
204 result = warn(modified + added + clean,
204 result = warn(modified + added + clean,
205 _('not removing %s: file still exists\n'))
205 _('not removing %s: file still exists\n'))
206 else:
206 else:
207 remove = deleted + clean
207 remove = deleted + clean
208 result = warn(modified, _('not removing %s: file is modified (use -f'
208 result = warn(modified, _('not removing %s: file is modified (use -f'
209 ' to force removal)\n'))
209 ' to force removal)\n'))
210 result = warn(added, _('not removing %s: file has been marked for add'
210 result = warn(added, _('not removing %s: file has been marked for add'
211 ' (use forget to undo)\n')) or result
211 ' (use forget to undo)\n')) or result
212
212
213 # Need to lock because standin files are deleted then removed from the
213 # Need to lock because standin files are deleted then removed from the
214 # repository and we could race in-between.
214 # repository and we could race in-between.
215 with repo.wlock():
215 with repo.wlock():
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
217 for f in sorted(remove):
217 for f in sorted(remove):
218 if ui.verbose or not m.exact(f):
218 if ui.verbose or not m.exact(f):
219 # addremove in core gets fancy with the name, remove doesn't
219 # addremove in core gets fancy with the name, remove doesn't
220 if isaddremove:
220 if isaddremove:
221 name = m.uipath(f)
221 name = m.uipath(f)
222 else:
222 else:
223 name = m.rel(f)
223 name = m.rel(f)
224 ui.status(_('removing %s\n') % name)
224 ui.status(_('removing %s\n') % name)
225
225
226 if not dryrun:
226 if not dryrun:
227 if not after:
227 if not after:
228 repo.wvfs.unlinkpath(f, ignoremissing=True)
228 repo.wvfs.unlinkpath(f, ignoremissing=True)
229
229
230 if dryrun:
230 if dryrun:
231 return result
231 return result
232
232
233 remove = [lfutil.standin(f) for f in remove]
233 remove = [lfutil.standin(f) for f in remove]
234 # If this is being called by addremove, let the original addremove
234 # If this is being called by addremove, let the original addremove
235 # function handle this.
235 # function handle this.
236 if not isaddremove:
236 if not isaddremove:
237 for f in remove:
237 for f in remove:
238 repo.wvfs.unlinkpath(f, ignoremissing=True)
238 repo.wvfs.unlinkpath(f, ignoremissing=True)
239 repo[None].forget(remove)
239 repo[None].forget(remove)
240
240
241 for f in remove:
241 for f in remove:
242 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
242 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
243 False)
243 False)
244
244
245 lfdirstate.write()
245 lfdirstate.write()
246
246
247 return result
247 return result
248
248
249 # For overriding mercurial.hgweb.webcommands so that largefiles will
249 # For overriding mercurial.hgweb.webcommands so that largefiles will
250 # appear at their right place in the manifests.
250 # appear at their right place in the manifests.
251 def decodepath(orig, path):
251 def decodepath(orig, path):
252 return lfutil.splitstandin(path) or path
252 return lfutil.splitstandin(path) or path
253
253
254 # -- Wrappers: modify existing commands --------------------------------
254 # -- Wrappers: modify existing commands --------------------------------
255
255
256 def overrideadd(orig, ui, repo, *pats, **opts):
256 def overrideadd(orig, ui, repo, *pats, **opts):
257 if opts.get(r'normal') and opts.get(r'large'):
257 if opts.get(r'normal') and opts.get(r'large'):
258 raise error.Abort(_('--normal cannot be used with --large'))
258 raise error.Abort(_('--normal cannot be used with --large'))
259 return orig(ui, repo, *pats, **opts)
259 return orig(ui, repo, *pats, **opts)
260
260
261 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
261 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
262 # The --normal flag short circuits this override
262 # The --normal flag short circuits this override
263 if opts.get(r'normal'):
263 if opts.get(r'normal'):
264 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
264 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
265
265
266 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
266 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
267 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
267 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
268 ladded)
268 ladded)
269 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
269 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
270
270
271 bad.extend(f for f in lbad)
271 bad.extend(f for f in lbad)
272 return bad
272 return bad
273
273
274 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos,
274 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos,
275 dryrun):
275 dryrun):
276 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
276 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
277 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos,
277 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos,
278 dryrun)
278 dryrun)
279 return removelargefiles(ui, repo, False, matcher, dryrun, after=after,
279 return removelargefiles(ui, repo, False, matcher, dryrun, after=after,
280 force=force) or result
280 force=force) or result
281
281
282 def overridestatusfn(orig, repo, rev2, **opts):
282 def overridestatusfn(orig, repo, rev2, **opts):
283 try:
283 try:
284 repo._repo.lfstatus = True
284 repo._repo.lfstatus = True
285 return orig(repo, rev2, **opts)
285 return orig(repo, rev2, **opts)
286 finally:
286 finally:
287 repo._repo.lfstatus = False
287 repo._repo.lfstatus = False
288
288
289 def overridestatus(orig, ui, repo, *pats, **opts):
289 def overridestatus(orig, ui, repo, *pats, **opts):
290 try:
290 try:
291 repo.lfstatus = True
291 repo.lfstatus = True
292 return orig(ui, repo, *pats, **opts)
292 return orig(ui, repo, *pats, **opts)
293 finally:
293 finally:
294 repo.lfstatus = False
294 repo.lfstatus = False
295
295
296 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
296 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
297 try:
297 try:
298 repo._repo.lfstatus = True
298 repo._repo.lfstatus = True
299 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
299 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
300 finally:
300 finally:
301 repo._repo.lfstatus = False
301 repo._repo.lfstatus = False
302
302
303 def overridelog(orig, ui, repo, *pats, **opts):
303 def overridelog(orig, ui, repo, *pats, **opts):
304 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
304 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
305 default='relpath', badfn=None):
305 default='relpath', badfn=None):
306 """Matcher that merges root directory with .hglf, suitable for log.
306 """Matcher that merges root directory with .hglf, suitable for log.
307 It is still possible to match .hglf directly.
307 It is still possible to match .hglf directly.
308 For any listed files run log on the standin too.
308 For any listed files run log on the standin too.
309 matchfn tries both the given filename and with .hglf stripped.
309 matchfn tries both the given filename and with .hglf stripped.
310 """
310 """
311 if opts is None:
311 if opts is None:
312 opts = {}
312 opts = {}
313 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
313 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
314 badfn=badfn)
314 badfn=badfn)
315 m, p = copy.copy(matchandpats)
315 m, p = copy.copy(matchandpats)
316
316
317 if m.always():
317 if m.always():
318 # We want to match everything anyway, so there's no benefit trying
318 # We want to match everything anyway, so there's no benefit trying
319 # to add standins.
319 # to add standins.
320 return matchandpats
320 return matchandpats
321
321
322 pats = set(p)
322 pats = set(p)
323
323
324 def fixpats(pat, tostandin=lfutil.standin):
324 def fixpats(pat, tostandin=lfutil.standin):
325 if pat.startswith('set:'):
325 if pat.startswith('set:'):
326 return pat
326 return pat
327
327
328 kindpat = matchmod._patsplit(pat, None)
328 kindpat = matchmod._patsplit(pat, None)
329
329
330 if kindpat[0] is not None:
330 if kindpat[0] is not None:
331 return kindpat[0] + ':' + tostandin(kindpat[1])
331 return kindpat[0] + ':' + tostandin(kindpat[1])
332 return tostandin(kindpat[1])
332 return tostandin(kindpat[1])
333
333
334 if m._cwd:
334 if m._cwd:
335 hglf = lfutil.shortname
335 hglf = lfutil.shortname
336 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
336 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
337
337
338 def tostandin(f):
338 def tostandin(f):
339 # The file may already be a standin, so truncate the back
339 # The file may already be a standin, so truncate the back
340 # prefix and test before mangling it. This avoids turning
340 # prefix and test before mangling it. This avoids turning
341 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
341 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
342 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
342 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
343 return f
343 return f
344
344
345 # An absolute path is from outside the repo, so truncate the
345 # An absolute path is from outside the repo, so truncate the
346 # path to the root before building the standin. Otherwise cwd
346 # path to the root before building the standin. Otherwise cwd
347 # is somewhere in the repo, relative to root, and needs to be
347 # is somewhere in the repo, relative to root, and needs to be
348 # prepended before building the standin.
348 # prepended before building the standin.
349 if os.path.isabs(m._cwd):
349 if os.path.isabs(m._cwd):
350 f = f[len(back):]
350 f = f[len(back):]
351 else:
351 else:
352 f = m._cwd + '/' + f
352 f = m._cwd + '/' + f
353 return back + lfutil.standin(f)
353 return back + lfutil.standin(f)
354 else:
354 else:
355 def tostandin(f):
355 def tostandin(f):
356 if lfutil.isstandin(f):
356 if lfutil.isstandin(f):
357 return f
357 return f
358 return lfutil.standin(f)
358 return lfutil.standin(f)
359 pats.update(fixpats(f, tostandin) for f in p)
359 pats.update(fixpats(f, tostandin) for f in p)
360
360
361 for i in range(0, len(m._files)):
361 for i in range(0, len(m._files)):
362 # Don't add '.hglf' to m.files, since that is already covered by '.'
362 # Don't add '.hglf' to m.files, since that is already covered by '.'
363 if m._files[i] == '.':
363 if m._files[i] == '.':
364 continue
364 continue
365 standin = lfutil.standin(m._files[i])
365 standin = lfutil.standin(m._files[i])
366 # If the "standin" is a directory, append instead of replace to
366 # If the "standin" is a directory, append instead of replace to
367 # support naming a directory on the command line with only
367 # support naming a directory on the command line with only
368 # largefiles. The original directory is kept to support normal
368 # largefiles. The original directory is kept to support normal
369 # files.
369 # files.
370 if standin in ctx:
370 if standin in ctx:
371 m._files[i] = standin
371 m._files[i] = standin
372 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
372 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
373 m._files.append(standin)
373 m._files.append(standin)
374
374
375 m._fileset = set(m._files)
375 m._fileset = set(m._files)
376 m.always = lambda: False
376 m.always = lambda: False
377 origmatchfn = m.matchfn
377 origmatchfn = m.matchfn
378 def lfmatchfn(f):
378 def lfmatchfn(f):
379 lf = lfutil.splitstandin(f)
379 lf = lfutil.splitstandin(f)
380 if lf is not None and origmatchfn(lf):
380 if lf is not None and origmatchfn(lf):
381 return True
381 return True
382 r = origmatchfn(f)
382 r = origmatchfn(f)
383 return r
383 return r
384 m.matchfn = lfmatchfn
384 m.matchfn = lfmatchfn
385
385
386 ui.debug('updated patterns: %s\n' % ', '.join(sorted(pats)))
386 ui.debug('updated patterns: %s\n' % ', '.join(sorted(pats)))
387 return m, pats
387 return m, pats
388
388
389 # For hg log --patch, the match object is used in two different senses:
389 # For hg log --patch, the match object is used in two different senses:
390 # (1) to determine what revisions should be printed out, and
390 # (1) to determine what revisions should be printed out, and
391 # (2) to determine what files to print out diffs for.
391 # (2) to determine what files to print out diffs for.
392 # The magic matchandpats override should be used for case (1) but not for
392 # The magic matchandpats override should be used for case (1) but not for
393 # case (2).
393 # case (2).
394 def overridemakefilematcher(repo, pats, opts, badfn=None):
394 def overridemakefilematcher(repo, pats, opts, badfn=None):
395 wctx = repo[None]
395 wctx = repo[None]
396 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
396 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
397 return lambda ctx: match
397 return lambda ctx: match
398
398
399 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
399 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
400 oldmakefilematcher = logcmdutil._makenofollowfilematcher
400 oldmakefilematcher = logcmdutil._makenofollowfilematcher
401 setattr(logcmdutil, '_makenofollowfilematcher', overridemakefilematcher)
401 setattr(logcmdutil, '_makenofollowfilematcher', overridemakefilematcher)
402
402
403 try:
403 try:
404 return orig(ui, repo, *pats, **opts)
404 return orig(ui, repo, *pats, **opts)
405 finally:
405 finally:
406 restorematchandpatsfn()
406 restorematchandpatsfn()
407 setattr(logcmdutil, '_makenofollowfilematcher', oldmakefilematcher)
407 setattr(logcmdutil, '_makenofollowfilematcher', oldmakefilematcher)
408
408
409 def overrideverify(orig, ui, repo, *pats, **opts):
409 def overrideverify(orig, ui, repo, *pats, **opts):
410 large = opts.pop(r'large', False)
410 large = opts.pop(r'large', False)
411 all = opts.pop(r'lfa', False)
411 all = opts.pop(r'lfa', False)
412 contents = opts.pop(r'lfc', False)
412 contents = opts.pop(r'lfc', False)
413
413
414 result = orig(ui, repo, *pats, **opts)
414 result = orig(ui, repo, *pats, **opts)
415 if large or all or contents:
415 if large or all or contents:
416 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
416 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
417 return result
417 return result
418
418
419 def overridedebugstate(orig, ui, repo, *pats, **opts):
419 def overridedebugstate(orig, ui, repo, *pats, **opts):
420 large = opts.pop(r'large', False)
420 large = opts.pop(r'large', False)
421 if large:
421 if large:
422 class fakerepo(object):
422 class fakerepo(object):
423 dirstate = lfutil.openlfdirstate(ui, repo)
423 dirstate = lfutil.openlfdirstate(ui, repo)
424 orig(ui, fakerepo, *pats, **opts)
424 orig(ui, fakerepo, *pats, **opts)
425 else:
425 else:
426 orig(ui, repo, *pats, **opts)
426 orig(ui, repo, *pats, **opts)
427
427
428 # Before starting the manifest merge, merge.updates will call
428 # Before starting the manifest merge, merge.updates will call
429 # _checkunknownfile to check if there are any files in the merged-in
429 # _checkunknownfile to check if there are any files in the merged-in
430 # changeset that collide with unknown files in the working copy.
430 # changeset that collide with unknown files in the working copy.
431 #
431 #
432 # The largefiles are seen as unknown, so this prevents us from merging
432 # The largefiles are seen as unknown, so this prevents us from merging
433 # in a file 'foo' if we already have a largefile with the same name.
433 # in a file 'foo' if we already have a largefile with the same name.
434 #
434 #
435 # The overridden function filters the unknown files by removing any
435 # The overridden function filters the unknown files by removing any
436 # largefiles. This makes the merge proceed and we can then handle this
436 # largefiles. This makes the merge proceed and we can then handle this
437 # case further in the overridden calculateupdates function below.
437 # case further in the overridden calculateupdates function below.
438 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
438 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
439 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
439 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
440 return False
440 return False
441 return origfn(repo, wctx, mctx, f, f2)
441 return origfn(repo, wctx, mctx, f, f2)
442
442
443 # The manifest merge handles conflicts on the manifest level. We want
443 # The manifest merge handles conflicts on the manifest level. We want
444 # to handle changes in largefile-ness of files at this level too.
444 # to handle changes in largefile-ness of files at this level too.
445 #
445 #
446 # The strategy is to run the original calculateupdates and then process
446 # The strategy is to run the original calculateupdates and then process
447 # the action list it outputs. There are two cases we need to deal with:
447 # the action list it outputs. There are two cases we need to deal with:
448 #
448 #
449 # 1. Normal file in p1, largefile in p2. Here the largefile is
449 # 1. Normal file in p1, largefile in p2. Here the largefile is
450 # detected via its standin file, which will enter the working copy
450 # detected via its standin file, which will enter the working copy
451 # with a "get" action. It is not "merge" since the standin is all
451 # with a "get" action. It is not "merge" since the standin is all
452 # Mercurial is concerned with at this level -- the link to the
452 # Mercurial is concerned with at this level -- the link to the
453 # existing normal file is not relevant here.
453 # existing normal file is not relevant here.
454 #
454 #
455 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
455 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
456 # since the largefile will be present in the working copy and
456 # since the largefile will be present in the working copy and
457 # different from the normal file in p2. Mercurial therefore
457 # different from the normal file in p2. Mercurial therefore
458 # triggers a merge action.
458 # triggers a merge action.
459 #
459 #
460 # In both cases, we prompt the user and emit new actions to either
460 # In both cases, we prompt the user and emit new actions to either
461 # remove the standin (if the normal file was kept) or to remove the
461 # remove the standin (if the normal file was kept) or to remove the
462 # normal file and get the standin (if the largefile was kept). The
462 # normal file and get the standin (if the largefile was kept). The
463 # default prompt answer is to use the largefile version since it was
463 # default prompt answer is to use the largefile version since it was
464 # presumably changed on purpose.
464 # presumably changed on purpose.
465 #
465 #
466 # Finally, the merge.applyupdates function will then take care of
466 # Finally, the merge.applyupdates function will then take care of
467 # writing the files into the working copy and lfcommands.updatelfiles
467 # writing the files into the working copy and lfcommands.updatelfiles
468 # will update the largefiles.
468 # will update the largefiles.
469 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
469 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
470 acceptremote, *args, **kwargs):
470 acceptremote, *args, **kwargs):
471 overwrite = force and not branchmerge
471 overwrite = force and not branchmerge
472 actions, diverge, renamedelete = origfn(
472 actions, diverge, renamedelete = origfn(
473 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
473 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
474
474
475 if overwrite:
475 if overwrite:
476 return actions, diverge, renamedelete
476 return actions, diverge, renamedelete
477
477
478 # Convert to dictionary with filename as key and action as value.
478 # Convert to dictionary with filename as key and action as value.
479 lfiles = set()
479 lfiles = set()
480 for f in actions:
480 for f in actions:
481 splitstandin = lfutil.splitstandin(f)
481 splitstandin = lfutil.splitstandin(f)
482 if splitstandin in p1:
482 if splitstandin in p1:
483 lfiles.add(splitstandin)
483 lfiles.add(splitstandin)
484 elif lfutil.standin(f) in p1:
484 elif lfutil.standin(f) in p1:
485 lfiles.add(f)
485 lfiles.add(f)
486
486
487 for lfile in sorted(lfiles):
487 for lfile in sorted(lfiles):
488 standin = lfutil.standin(lfile)
488 standin = lfutil.standin(lfile)
489 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
489 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
490 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
490 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
491 if sm in ('g', 'dc') and lm != 'r':
491 if sm in ('g', 'dc') and lm != 'r':
492 if sm == 'dc':
492 if sm == 'dc':
493 f1, f2, fa, move, anc = sargs
493 f1, f2, fa, move, anc = sargs
494 sargs = (p2[f2].flags(), False)
494 sargs = (p2[f2].flags(), False)
495 # Case 1: normal file in the working copy, largefile in
495 # Case 1: normal file in the working copy, largefile in
496 # the second parent
496 # the second parent
497 usermsg = _('remote turned local normal file %s into a largefile\n'
497 usermsg = _('remote turned local normal file %s into a largefile\n'
498 'use (l)argefile or keep (n)ormal file?'
498 'use (l)argefile or keep (n)ormal file?'
499 '$$ &Largefile $$ &Normal file') % lfile
499 '$$ &Largefile $$ &Normal file') % lfile
500 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
500 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
501 actions[lfile] = ('r', None, 'replaced by standin')
501 actions[lfile] = ('r', None, 'replaced by standin')
502 actions[standin] = ('g', sargs, 'replaces standin')
502 actions[standin] = ('g', sargs, 'replaces standin')
503 else: # keep local normal file
503 else: # keep local normal file
504 actions[lfile] = ('k', None, 'replaces standin')
504 actions[lfile] = ('k', None, 'replaces standin')
505 if branchmerge:
505 if branchmerge:
506 actions[standin] = ('k', None, 'replaced by non-standin')
506 actions[standin] = ('k', None, 'replaced by non-standin')
507 else:
507 else:
508 actions[standin] = ('r', None, 'replaced by non-standin')
508 actions[standin] = ('r', None, 'replaced by non-standin')
509 elif lm in ('g', 'dc') and sm != 'r':
509 elif lm in ('g', 'dc') and sm != 'r':
510 if lm == 'dc':
510 if lm == 'dc':
511 f1, f2, fa, move, anc = largs
511 f1, f2, fa, move, anc = largs
512 largs = (p2[f2].flags(), False)
512 largs = (p2[f2].flags(), False)
513 # Case 2: largefile in the working copy, normal file in
513 # Case 2: largefile in the working copy, normal file in
514 # the second parent
514 # the second parent
515 usermsg = _('remote turned local largefile %s into a normal file\n'
515 usermsg = _('remote turned local largefile %s into a normal file\n'
516 'keep (l)argefile or use (n)ormal file?'
516 'keep (l)argefile or use (n)ormal file?'
517 '$$ &Largefile $$ &Normal file') % lfile
517 '$$ &Largefile $$ &Normal file') % lfile
518 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
518 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
519 if branchmerge:
519 if branchmerge:
520 # largefile can be restored from standin safely
520 # largefile can be restored from standin safely
521 actions[lfile] = ('k', None, 'replaced by standin')
521 actions[lfile] = ('k', None, 'replaced by standin')
522 actions[standin] = ('k', None, 'replaces standin')
522 actions[standin] = ('k', None, 'replaces standin')
523 else:
523 else:
524 # "lfile" should be marked as "removed" without
524 # "lfile" should be marked as "removed" without
525 # removal of itself
525 # removal of itself
526 actions[lfile] = ('lfmr', None,
526 actions[lfile] = ('lfmr', None,
527 'forget non-standin largefile')
527 'forget non-standin largefile')
528
528
529 # linear-merge should treat this largefile as 're-added'
529 # linear-merge should treat this largefile as 're-added'
530 actions[standin] = ('a', None, 'keep standin')
530 actions[standin] = ('a', None, 'keep standin')
531 else: # pick remote normal file
531 else: # pick remote normal file
532 actions[lfile] = ('g', largs, 'replaces standin')
532 actions[lfile] = ('g', largs, 'replaces standin')
533 actions[standin] = ('r', None, 'replaced by non-standin')
533 actions[standin] = ('r', None, 'replaced by non-standin')
534
534
535 return actions, diverge, renamedelete
535 return actions, diverge, renamedelete
536
536
537 def mergerecordupdates(orig, repo, actions, branchmerge):
537 def mergerecordupdates(orig, repo, actions, branchmerge):
538 if 'lfmr' in actions:
538 if 'lfmr' in actions:
539 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
539 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
540 for lfile, args, msg in actions['lfmr']:
540 for lfile, args, msg in actions['lfmr']:
541 # this should be executed before 'orig', to execute 'remove'
541 # this should be executed before 'orig', to execute 'remove'
542 # before all other actions
542 # before all other actions
543 repo.dirstate.remove(lfile)
543 repo.dirstate.remove(lfile)
544 # make sure lfile doesn't get synclfdirstate'd as normal
544 # make sure lfile doesn't get synclfdirstate'd as normal
545 lfdirstate.add(lfile)
545 lfdirstate.add(lfile)
546 lfdirstate.write()
546 lfdirstate.write()
547
547
548 return orig(repo, actions, branchmerge)
548 return orig(repo, actions, branchmerge)
549
549
550 # Override filemerge to prompt the user about how they wish to merge
550 # Override filemerge to prompt the user about how they wish to merge
551 # largefiles. This will handle identical edits without prompting the user.
551 # largefiles. This will handle identical edits without prompting the user.
552 def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca,
552 def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca,
553 labels=None):
553 labels=None):
554 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
554 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
555 return origfn(premerge, repo, wctx, mynode, orig, fcd, fco, fca,
555 return origfn(premerge, repo, wctx, mynode, orig, fcd, fco, fca,
556 labels=labels)
556 labels=labels)
557
557
558 ahash = lfutil.readasstandin(fca).lower()
558 ahash = lfutil.readasstandin(fca).lower()
559 dhash = lfutil.readasstandin(fcd).lower()
559 dhash = lfutil.readasstandin(fcd).lower()
560 ohash = lfutil.readasstandin(fco).lower()
560 ohash = lfutil.readasstandin(fco).lower()
561 if (ohash != ahash and
561 if (ohash != ahash and
562 ohash != dhash and
562 ohash != dhash and
563 (dhash == ahash or
563 (dhash == ahash or
564 repo.ui.promptchoice(
564 repo.ui.promptchoice(
565 _('largefile %s has a merge conflict\nancestor was %s\n'
565 _('largefile %s has a merge conflict\nancestor was %s\n'
566 'keep (l)ocal %s or\ntake (o)ther %s?'
566 'keep (l)ocal %s or\ntake (o)ther %s?'
567 '$$ &Local $$ &Other') %
567 '$$ &Local $$ &Other') %
568 (lfutil.splitstandin(orig), ahash, dhash, ohash),
568 (lfutil.splitstandin(orig), ahash, dhash, ohash),
569 0) == 1)):
569 0) == 1)):
570 repo.wwrite(fcd.path(), fco.data(), fco.flags())
570 repo.wwrite(fcd.path(), fco.data(), fco.flags())
571 return True, 0, False
571 return True, 0, False
572
572
573 def copiespathcopies(orig, ctx1, ctx2, match=None):
573 def copiespathcopies(orig, ctx1, ctx2, match=None):
574 copies = orig(ctx1, ctx2, match=match)
574 copies = orig(ctx1, ctx2, match=match)
575 updated = {}
575 updated = {}
576
576
577 for k, v in copies.iteritems():
577 for k, v in copies.iteritems():
578 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
578 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
579
579
580 return updated
580 return updated
581
581
582 # Copy first changes the matchers to match standins instead of
582 # Copy first changes the matchers to match standins instead of
583 # largefiles. Then it overrides util.copyfile in that function it
583 # largefiles. Then it overrides util.copyfile in that function it
584 # checks if the destination largefile already exists. It also keeps a
584 # checks if the destination largefile already exists. It also keeps a
585 # list of copied files so that the largefiles can be copied and the
585 # list of copied files so that the largefiles can be copied and the
586 # dirstate updated.
586 # dirstate updated.
587 def overridecopy(orig, ui, repo, pats, opts, rename=False):
587 def overridecopy(orig, ui, repo, pats, opts, rename=False):
588 # doesn't remove largefile on rename
588 # doesn't remove largefile on rename
589 if len(pats) < 2:
589 if len(pats) < 2:
590 # this isn't legal, let the original function deal with it
590 # this isn't legal, let the original function deal with it
591 return orig(ui, repo, pats, opts, rename)
591 return orig(ui, repo, pats, opts, rename)
592
592
593 # This could copy both lfiles and normal files in one command,
593 # This could copy both lfiles and normal files in one command,
594 # but we don't want to do that. First replace their matcher to
594 # but we don't want to do that. First replace their matcher to
595 # only match normal files and run it, then replace it to just
595 # only match normal files and run it, then replace it to just
596 # match largefiles and run it again.
596 # match largefiles and run it again.
597 nonormalfiles = False
597 nonormalfiles = False
598 nolfiles = False
598 nolfiles = False
599 installnormalfilesmatchfn(repo[None].manifest())
599 installnormalfilesmatchfn(repo[None].manifest())
600 try:
600 try:
601 result = orig(ui, repo, pats, opts, rename)
601 result = orig(ui, repo, pats, opts, rename)
602 except error.Abort as e:
602 except error.Abort as e:
603 if pycompat.bytestr(e) != _('no files to copy'):
603 if pycompat.bytestr(e) != _('no files to copy'):
604 raise e
604 raise e
605 else:
605 else:
606 nonormalfiles = True
606 nonormalfiles = True
607 result = 0
607 result = 0
608 finally:
608 finally:
609 restorematchfn()
609 restorematchfn()
610
610
611 # The first rename can cause our current working directory to be removed.
611 # The first rename can cause our current working directory to be removed.
612 # In that case there is nothing left to copy/rename so just quit.
612 # In that case there is nothing left to copy/rename so just quit.
613 try:
613 try:
614 repo.getcwd()
614 repo.getcwd()
615 except OSError:
615 except OSError:
616 return result
616 return result
617
617
618 def makestandin(relpath):
618 def makestandin(relpath):
619 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
619 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
620 return repo.wvfs.join(lfutil.standin(path))
620 return repo.wvfs.join(lfutil.standin(path))
621
621
622 fullpats = scmutil.expandpats(pats)
622 fullpats = scmutil.expandpats(pats)
623 dest = fullpats[-1]
623 dest = fullpats[-1]
624
624
625 if os.path.isdir(dest):
625 if os.path.isdir(dest):
626 if not os.path.isdir(makestandin(dest)):
626 if not os.path.isdir(makestandin(dest)):
627 os.makedirs(makestandin(dest))
627 os.makedirs(makestandin(dest))
628
628
629 try:
629 try:
630 # When we call orig below it creates the standins but we don't add
630 # When we call orig below it creates the standins but we don't add
631 # them to the dir state until later so lock during that time.
631 # them to the dir state until later so lock during that time.
632 wlock = repo.wlock()
632 wlock = repo.wlock()
633
633
634 manifest = repo[None].manifest()
634 manifest = repo[None].manifest()
635 def overridematch(ctx, pats=(), opts=None, globbed=False,
635 def overridematch(ctx, pats=(), opts=None, globbed=False,
636 default='relpath', badfn=None):
636 default='relpath', badfn=None):
637 if opts is None:
637 if opts is None:
638 opts = {}
638 opts = {}
639 newpats = []
639 newpats = []
640 # The patterns were previously mangled to add the standin
640 # The patterns were previously mangled to add the standin
641 # directory; we need to remove that now
641 # directory; we need to remove that now
642 for pat in pats:
642 for pat in pats:
643 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
643 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
644 newpats.append(pat.replace(lfutil.shortname, ''))
644 newpats.append(pat.replace(lfutil.shortname, ''))
645 else:
645 else:
646 newpats.append(pat)
646 newpats.append(pat)
647 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
647 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
648 m = copy.copy(match)
648 m = copy.copy(match)
649 lfile = lambda f: lfutil.standin(f) in manifest
649 lfile = lambda f: lfutil.standin(f) in manifest
650 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
650 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
651 m._fileset = set(m._files)
651 m._fileset = set(m._files)
652 origmatchfn = m.matchfn
652 origmatchfn = m.matchfn
653 def matchfn(f):
653 def matchfn(f):
654 lfile = lfutil.splitstandin(f)
654 lfile = lfutil.splitstandin(f)
655 return (lfile is not None and
655 return (lfile is not None and
656 (f in manifest) and
656 (f in manifest) and
657 origmatchfn(lfile) or
657 origmatchfn(lfile) or
658 None)
658 None)
659 m.matchfn = matchfn
659 m.matchfn = matchfn
660 return m
660 return m
661 oldmatch = installmatchfn(overridematch)
661 oldmatch = installmatchfn(overridematch)
662 listpats = []
662 listpats = []
663 for pat in pats:
663 for pat in pats:
664 if matchmod.patkind(pat) is not None:
664 if matchmod.patkind(pat) is not None:
665 listpats.append(pat)
665 listpats.append(pat)
666 else:
666 else:
667 listpats.append(makestandin(pat))
667 listpats.append(makestandin(pat))
668
668
669 try:
669 try:
670 origcopyfile = util.copyfile
670 origcopyfile = util.copyfile
671 copiedfiles = []
671 copiedfiles = []
672 def overridecopyfile(src, dest, *args, **kwargs):
672 def overridecopyfile(src, dest, *args, **kwargs):
673 if (lfutil.shortname in src and
673 if (lfutil.shortname in src and
674 dest.startswith(repo.wjoin(lfutil.shortname))):
674 dest.startswith(repo.wjoin(lfutil.shortname))):
675 destlfile = dest.replace(lfutil.shortname, '')
675 destlfile = dest.replace(lfutil.shortname, '')
676 if not opts['force'] and os.path.exists(destlfile):
676 if not opts['force'] and os.path.exists(destlfile):
677 raise IOError('',
677 raise IOError('',
678 _('destination largefile already exists'))
678 _('destination largefile already exists'))
679 copiedfiles.append((src, dest))
679 copiedfiles.append((src, dest))
680 origcopyfile(src, dest, *args, **kwargs)
680 origcopyfile(src, dest, *args, **kwargs)
681
681
682 util.copyfile = overridecopyfile
682 util.copyfile = overridecopyfile
683 result += orig(ui, repo, listpats, opts, rename)
683 result += orig(ui, repo, listpats, opts, rename)
684 finally:
684 finally:
685 util.copyfile = origcopyfile
685 util.copyfile = origcopyfile
686
686
687 lfdirstate = lfutil.openlfdirstate(ui, repo)
687 lfdirstate = lfutil.openlfdirstate(ui, repo)
688 for (src, dest) in copiedfiles:
688 for (src, dest) in copiedfiles:
689 if (lfutil.shortname in src and
689 if (lfutil.shortname in src and
690 dest.startswith(repo.wjoin(lfutil.shortname))):
690 dest.startswith(repo.wjoin(lfutil.shortname))):
691 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
691 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
692 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
692 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
693 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
693 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
694 if not os.path.isdir(destlfiledir):
694 if not os.path.isdir(destlfiledir):
695 os.makedirs(destlfiledir)
695 os.makedirs(destlfiledir)
696 if rename:
696 if rename:
697 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
697 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
698
698
699 # The file is gone, but this deletes any empty parent
699 # The file is gone, but this deletes any empty parent
700 # directories as a side-effect.
700 # directories as a side-effect.
701 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
701 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
702 lfdirstate.remove(srclfile)
702 lfdirstate.remove(srclfile)
703 else:
703 else:
704 util.copyfile(repo.wjoin(srclfile),
704 util.copyfile(repo.wjoin(srclfile),
705 repo.wjoin(destlfile))
705 repo.wjoin(destlfile))
706
706
707 lfdirstate.add(destlfile)
707 lfdirstate.add(destlfile)
708 lfdirstate.write()
708 lfdirstate.write()
709 except error.Abort as e:
709 except error.Abort as e:
710 if pycompat.bytestr(e) != _('no files to copy'):
710 if pycompat.bytestr(e) != _('no files to copy'):
711 raise e
711 raise e
712 else:
712 else:
713 nolfiles = True
713 nolfiles = True
714 finally:
714 finally:
715 restorematchfn()
715 restorematchfn()
716 wlock.release()
716 wlock.release()
717
717
718 if nolfiles and nonormalfiles:
718 if nolfiles and nonormalfiles:
719 raise error.Abort(_('no files to copy'))
719 raise error.Abort(_('no files to copy'))
720
720
721 return result
721 return result
722
722
723 # When the user calls revert, we have to be careful to not revert any
723 # When the user calls revert, we have to be careful to not revert any
724 # changes to other largefiles accidentally. This means we have to keep
724 # changes to other largefiles accidentally. This means we have to keep
725 # track of the largefiles that are being reverted so we only pull down
725 # track of the largefiles that are being reverted so we only pull down
726 # the necessary largefiles.
726 # the necessary largefiles.
727 #
727 #
728 # Standins are only updated (to match the hash of largefiles) before
728 # Standins are only updated (to match the hash of largefiles) before
729 # commits. Update the standins then run the original revert, changing
729 # commits. Update the standins then run the original revert, changing
730 # the matcher to hit standins instead of largefiles. Based on the
730 # the matcher to hit standins instead of largefiles. Based on the
731 # resulting standins update the largefiles.
731 # resulting standins update the largefiles.
732 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
732 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
733 # Because we put the standins in a bad state (by updating them)
733 # Because we put the standins in a bad state (by updating them)
734 # and then return them to a correct state we need to lock to
734 # and then return them to a correct state we need to lock to
735 # prevent others from changing them in their incorrect state.
735 # prevent others from changing them in their incorrect state.
736 with repo.wlock():
736 with repo.wlock():
737 lfdirstate = lfutil.openlfdirstate(ui, repo)
737 lfdirstate = lfutil.openlfdirstate(ui, repo)
738 s = lfutil.lfdirstatestatus(lfdirstate, repo)
738 s = lfutil.lfdirstatestatus(lfdirstate, repo)
739 lfdirstate.write()
739 lfdirstate.write()
740 for lfile in s.modified:
740 for lfile in s.modified:
741 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
741 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
742 for lfile in s.deleted:
742 for lfile in s.deleted:
743 fstandin = lfutil.standin(lfile)
743 fstandin = lfutil.standin(lfile)
744 if (repo.wvfs.exists(fstandin)):
744 if (repo.wvfs.exists(fstandin)):
745 repo.wvfs.unlink(fstandin)
745 repo.wvfs.unlink(fstandin)
746
746
747 oldstandins = lfutil.getstandinsstate(repo)
747 oldstandins = lfutil.getstandinsstate(repo)
748
748
749 def overridematch(mctx, pats=(), opts=None, globbed=False,
749 def overridematch(mctx, pats=(), opts=None, globbed=False,
750 default='relpath', badfn=None):
750 default='relpath', badfn=None):
751 if opts is None:
751 if opts is None:
752 opts = {}
752 opts = {}
753 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
753 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
754 m = copy.copy(match)
754 m = copy.copy(match)
755
755
756 # revert supports recursing into subrepos, and though largefiles
756 # revert supports recursing into subrepos, and though largefiles
757 # currently doesn't work correctly in that case, this match is
757 # currently doesn't work correctly in that case, this match is
758 # called, so the lfdirstate above may not be the correct one for
758 # called, so the lfdirstate above may not be the correct one for
759 # this invocation of match.
759 # this invocation of match.
760 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
760 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
761 False)
761 False)
762
762
763 wctx = repo[None]
763 wctx = repo[None]
764 matchfiles = []
764 matchfiles = []
765 for f in m._files:
765 for f in m._files:
766 standin = lfutil.standin(f)
766 standin = lfutil.standin(f)
767 if standin in ctx or standin in mctx:
767 if standin in ctx or standin in mctx:
768 matchfiles.append(standin)
768 matchfiles.append(standin)
769 elif standin in wctx or lfdirstate[f] == 'r':
769 elif standin in wctx or lfdirstate[f] == 'r':
770 continue
770 continue
771 else:
771 else:
772 matchfiles.append(f)
772 matchfiles.append(f)
773 m._files = matchfiles
773 m._files = matchfiles
774 m._fileset = set(m._files)
774 m._fileset = set(m._files)
775 origmatchfn = m.matchfn
775 origmatchfn = m.matchfn
776 def matchfn(f):
776 def matchfn(f):
777 lfile = lfutil.splitstandin(f)
777 lfile = lfutil.splitstandin(f)
778 if lfile is not None:
778 if lfile is not None:
779 return (origmatchfn(lfile) and
779 return (origmatchfn(lfile) and
780 (f in ctx or f in mctx))
780 (f in ctx or f in mctx))
781 return origmatchfn(f)
781 return origmatchfn(f)
782 m.matchfn = matchfn
782 m.matchfn = matchfn
783 return m
783 return m
784 oldmatch = installmatchfn(overridematch)
784 oldmatch = installmatchfn(overridematch)
785 try:
785 try:
786 orig(ui, repo, ctx, parents, *pats, **opts)
786 orig(ui, repo, ctx, parents, *pats, **opts)
787 finally:
787 finally:
788 restorematchfn()
788 restorematchfn()
789
789
790 newstandins = lfutil.getstandinsstate(repo)
790 newstandins = lfutil.getstandinsstate(repo)
791 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
791 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
792 # lfdirstate should be 'normallookup'-ed for updated files,
792 # lfdirstate should be 'normallookup'-ed for updated files,
793 # because reverting doesn't touch dirstate for 'normal' files
793 # because reverting doesn't touch dirstate for 'normal' files
794 # when target revision is explicitly specified: in such case,
794 # when target revision is explicitly specified: in such case,
795 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
795 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
796 # of target (standin) file.
796 # of target (standin) file.
797 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
797 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
798 normallookup=True)
798 normallookup=True)
799
799
800 # after pulling changesets, we need to take some extra care to get
800 # after pulling changesets, we need to take some extra care to get
801 # largefiles updated remotely
801 # largefiles updated remotely
802 def overridepull(orig, ui, repo, source=None, **opts):
802 def overridepull(orig, ui, repo, source=None, **opts):
803 revsprepull = len(repo)
803 revsprepull = len(repo)
804 if not source:
804 if not source:
805 source = 'default'
805 source = 'default'
806 repo.lfpullsource = source
806 repo.lfpullsource = source
807 result = orig(ui, repo, source, **opts)
807 result = orig(ui, repo, source, **opts)
808 revspostpull = len(repo)
808 revspostpull = len(repo)
809 lfrevs = opts.get(r'lfrev', [])
809 lfrevs = opts.get(r'lfrev', [])
810 if opts.get(r'all_largefiles'):
810 if opts.get(r'all_largefiles'):
811 lfrevs.append('pulled()')
811 lfrevs.append('pulled()')
812 if lfrevs and revspostpull > revsprepull:
812 if lfrevs and revspostpull > revsprepull:
813 numcached = 0
813 numcached = 0
814 repo.firstpulled = revsprepull # for pulled() revset expression
814 repo.firstpulled = revsprepull # for pulled() revset expression
815 try:
815 try:
816 for rev in scmutil.revrange(repo, lfrevs):
816 for rev in scmutil.revrange(repo, lfrevs):
817 ui.note(_('pulling largefiles for revision %d\n') % rev)
817 ui.note(_('pulling largefiles for revision %d\n') % rev)
818 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
818 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
819 numcached += len(cached)
819 numcached += len(cached)
820 finally:
820 finally:
821 del repo.firstpulled
821 del repo.firstpulled
822 ui.status(_("%d largefiles cached\n") % numcached)
822 ui.status(_("%d largefiles cached\n") % numcached)
823 return result
823 return result
824
824
825 def overridepush(orig, ui, repo, *args, **kwargs):
825 def overridepush(orig, ui, repo, *args, **kwargs):
826 """Override push command and store --lfrev parameters in opargs"""
826 """Override push command and store --lfrev parameters in opargs"""
827 lfrevs = kwargs.pop(r'lfrev', None)
827 lfrevs = kwargs.pop(r'lfrev', None)
828 if lfrevs:
828 if lfrevs:
829 opargs = kwargs.setdefault(r'opargs', {})
829 opargs = kwargs.setdefault(r'opargs', {})
830 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
830 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
831 return orig(ui, repo, *args, **kwargs)
831 return orig(ui, repo, *args, **kwargs)
832
832
833 def exchangepushoperation(orig, *args, **kwargs):
833 def exchangepushoperation(orig, *args, **kwargs):
834 """Override pushoperation constructor and store lfrevs parameter"""
834 """Override pushoperation constructor and store lfrevs parameter"""
835 lfrevs = kwargs.pop(r'lfrevs', None)
835 lfrevs = kwargs.pop(r'lfrevs', None)
836 pushop = orig(*args, **kwargs)
836 pushop = orig(*args, **kwargs)
837 pushop.lfrevs = lfrevs
837 pushop.lfrevs = lfrevs
838 return pushop
838 return pushop
839
839
840 revsetpredicate = registrar.revsetpredicate()
840 revsetpredicate = registrar.revsetpredicate()
841
841
842 @revsetpredicate('pulled()')
842 @revsetpredicate('pulled()')
843 def pulledrevsetsymbol(repo, subset, x):
843 def pulledrevsetsymbol(repo, subset, x):
844 """Changesets that just has been pulled.
844 """Changesets that just has been pulled.
845
845
846 Only available with largefiles from pull --lfrev expressions.
846 Only available with largefiles from pull --lfrev expressions.
847
847
848 .. container:: verbose
848 .. container:: verbose
849
849
850 Some examples:
850 Some examples:
851
851
852 - pull largefiles for all new changesets::
852 - pull largefiles for all new changesets::
853
853
854 hg pull -lfrev "pulled()"
854 hg pull -lfrev "pulled()"
855
855
856 - pull largefiles for all new branch heads::
856 - pull largefiles for all new branch heads::
857
857
858 hg pull -lfrev "head(pulled()) and not closed()"
858 hg pull -lfrev "head(pulled()) and not closed()"
859
859
860 """
860 """
861
861
862 try:
862 try:
863 firstpulled = repo.firstpulled
863 firstpulled = repo.firstpulled
864 except AttributeError:
864 except AttributeError:
865 raise error.Abort(_("pulled() only available in --lfrev"))
865 raise error.Abort(_("pulled() only available in --lfrev"))
866 return smartset.baseset([r for r in subset if r >= firstpulled])
866 return smartset.baseset([r for r in subset if r >= firstpulled])
867
867
868 def overrideclone(orig, ui, source, dest=None, **opts):
868 def overrideclone(orig, ui, source, dest=None, **opts):
869 d = dest
869 d = dest
870 if d is None:
870 if d is None:
871 d = hg.defaultdest(source)
871 d = hg.defaultdest(source)
872 if opts.get(r'all_largefiles') and not hg.islocal(d):
872 if opts.get(r'all_largefiles') and not hg.islocal(d):
873 raise error.Abort(_(
873 raise error.Abort(_(
874 '--all-largefiles is incompatible with non-local destination %s') %
874 '--all-largefiles is incompatible with non-local destination %s') %
875 d)
875 d)
876
876
877 return orig(ui, source, dest, **opts)
877 return orig(ui, source, dest, **opts)
878
878
879 def hgclone(orig, ui, opts, *args, **kwargs):
879 def hgclone(orig, ui, opts, *args, **kwargs):
880 result = orig(ui, opts, *args, **kwargs)
880 result = orig(ui, opts, *args, **kwargs)
881
881
882 if result is not None:
882 if result is not None:
883 sourcerepo, destrepo = result
883 sourcerepo, destrepo = result
884 repo = destrepo.local()
884 repo = destrepo.local()
885
885
886 # When cloning to a remote repo (like through SSH), no repo is available
886 # When cloning to a remote repo (like through SSH), no repo is available
887 # from the peer. Therefore the largefiles can't be downloaded and the
887 # from the peer. Therefore the largefiles can't be downloaded and the
888 # hgrc can't be updated.
888 # hgrc can't be updated.
889 if not repo:
889 if not repo:
890 return result
890 return result
891
891
892 # Caching is implicitly limited to 'rev' option, since the dest repo was
892 # Caching is implicitly limited to 'rev' option, since the dest repo was
893 # truncated at that point. The user may expect a download count with
893 # truncated at that point. The user may expect a download count with
894 # this option, so attempt whether or not this is a largefile repo.
894 # this option, so attempt whether or not this is a largefile repo.
895 if opts.get('all_largefiles'):
895 if opts.get('all_largefiles'):
896 success, missing = lfcommands.downloadlfiles(ui, repo, None)
896 success, missing = lfcommands.downloadlfiles(ui, repo, None)
897
897
898 if missing != 0:
898 if missing != 0:
899 return None
899 return None
900
900
901 return result
901 return result
902
902
903 def overriderebase(orig, ui, repo, **opts):
903 def overriderebase(orig, ui, repo, **opts):
904 if not util.safehasattr(repo, '_largefilesenabled'):
904 if not util.safehasattr(repo, '_largefilesenabled'):
905 return orig(ui, repo, **opts)
905 return orig(ui, repo, **opts)
906
906
907 resuming = opts.get(r'continue')
907 resuming = opts.get(r'continue')
908 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
908 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
909 repo._lfstatuswriters.append(lambda *msg, **opts: None)
909 repo._lfstatuswriters.append(lambda *msg, **opts: None)
910 try:
910 try:
911 return orig(ui, repo, **opts)
911 return orig(ui, repo, **opts)
912 finally:
912 finally:
913 repo._lfstatuswriters.pop()
913 repo._lfstatuswriters.pop()
914 repo._lfcommithooks.pop()
914 repo._lfcommithooks.pop()
915
915
916 def overridearchivecmd(orig, ui, repo, dest, **opts):
916 def overridearchivecmd(orig, ui, repo, dest, **opts):
917 repo.unfiltered().lfstatus = True
917 repo.unfiltered().lfstatus = True
918
918
919 try:
919 try:
920 return orig(ui, repo.unfiltered(), dest, **opts)
920 return orig(ui, repo.unfiltered(), dest, **opts)
921 finally:
921 finally:
922 repo.unfiltered().lfstatus = False
922 repo.unfiltered().lfstatus = False
923
923
924 def hgwebarchive(orig, web):
924 def hgwebarchive(orig, web):
925 web.repo.lfstatus = True
925 web.repo.lfstatus = True
926
926
927 try:
927 try:
928 return orig(web)
928 return orig(web)
929 finally:
929 finally:
930 web.repo.lfstatus = False
930 web.repo.lfstatus = False
931
931
932 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
932 def overridearchive(orig, repo, dest, node, kind, decode=True, match=None,
933 prefix='', mtime=None, subrepos=None):
933 prefix='', mtime=None, subrepos=None):
934 # For some reason setting repo.lfstatus in hgwebarchive only changes the
934 # For some reason setting repo.lfstatus in hgwebarchive only changes the
935 # unfiltered repo's attr, so check that as well.
935 # unfiltered repo's attr, so check that as well.
936 if not repo.lfstatus and not repo.unfiltered().lfstatus:
936 if not repo.lfstatus and not repo.unfiltered().lfstatus:
937 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
937 return orig(repo, dest, node, kind, decode, match, prefix, mtime,
938 subrepos)
938 subrepos)
939
939
940 # No need to lock because we are only reading history and
940 # No need to lock because we are only reading history and
941 # largefile caches, neither of which are modified.
941 # largefile caches, neither of which are modified.
942 if node is not None:
942 if node is not None:
943 lfcommands.cachelfiles(repo.ui, repo, node)
943 lfcommands.cachelfiles(repo.ui, repo, node)
944
944
945 if kind not in archival.archivers:
945 if kind not in archival.archivers:
946 raise error.Abort(_("unknown archive type '%s'") % kind)
946 raise error.Abort(_("unknown archive type '%s'") % kind)
947
947
948 ctx = repo[node]
948 ctx = repo[node]
949
949
950 if kind == 'files':
950 if kind == 'files':
951 if prefix:
951 if prefix:
952 raise error.Abort(
952 raise error.Abort(
953 _('cannot give prefix when archiving to files'))
953 _('cannot give prefix when archiving to files'))
954 else:
954 else:
955 prefix = archival.tidyprefix(dest, kind, prefix)
955 prefix = archival.tidyprefix(dest, kind, prefix)
956
956
957 def write(name, mode, islink, getdata):
957 def write(name, mode, islink, getdata):
958 if matchfn and not matchfn(name):
958 if match and not match(name):
959 return
959 return
960 data = getdata()
960 data = getdata()
961 if decode:
961 if decode:
962 data = repo.wwritedata(name, data)
962 data = repo.wwritedata(name, data)
963 archiver.addfile(prefix + name, mode, islink, data)
963 archiver.addfile(prefix + name, mode, islink, data)
964
964
965 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
965 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
966
966
967 if repo.ui.configbool("ui", "archivemeta"):
967 if repo.ui.configbool("ui", "archivemeta"):
968 write('.hg_archival.txt', 0o644, False,
968 write('.hg_archival.txt', 0o644, False,
969 lambda: archival.buildmetadata(ctx))
969 lambda: archival.buildmetadata(ctx))
970
970
971 for f in ctx:
971 for f in ctx:
972 ff = ctx.flags(f)
972 ff = ctx.flags(f)
973 getdata = ctx[f].data
973 getdata = ctx[f].data
974 lfile = lfutil.splitstandin(f)
974 lfile = lfutil.splitstandin(f)
975 if lfile is not None:
975 if lfile is not None:
976 if node is not None:
976 if node is not None:
977 path = lfutil.findfile(repo, getdata().strip())
977 path = lfutil.findfile(repo, getdata().strip())
978
978
979 if path is None:
979 if path is None:
980 raise error.Abort(
980 raise error.Abort(
981 _('largefile %s not found in repo store or system cache')
981 _('largefile %s not found in repo store or system cache')
982 % lfile)
982 % lfile)
983 else:
983 else:
984 path = lfile
984 path = lfile
985
985
986 f = lfile
986 f = lfile
987
987
988 getdata = lambda: util.readfile(path)
988 getdata = lambda: util.readfile(path)
989 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
989 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
990
990
991 if subrepos:
991 if subrepos:
992 for subpath in sorted(ctx.substate):
992 for subpath in sorted(ctx.substate):
993 sub = ctx.workingsub(subpath)
993 sub = ctx.workingsub(subpath)
994 submatch = matchmod.subdirmatcher(subpath, matchfn)
994 submatch = matchmod.subdirmatcher(subpath, match)
995 sub._repo.lfstatus = True
995 sub._repo.lfstatus = True
996 sub.archive(archiver, prefix, submatch)
996 sub.archive(archiver, prefix, submatch)
997
997
998 archiver.done()
998 archiver.done()
999
999
1000 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1000 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1001 lfenabled = util.safehasattr(repo._repo, '_largefilesenabled')
1001 lfenabled = util.safehasattr(repo._repo, '_largefilesenabled')
1002 if not lfenabled or not repo._repo.lfstatus:
1002 if not lfenabled or not repo._repo.lfstatus:
1003 return orig(repo, archiver, prefix, match, decode)
1003 return orig(repo, archiver, prefix, match, decode)
1004
1004
1005 repo._get(repo._state + ('hg',))
1005 repo._get(repo._state + ('hg',))
1006 rev = repo._state[1]
1006 rev = repo._state[1]
1007 ctx = repo._repo[rev]
1007 ctx = repo._repo[rev]
1008
1008
1009 if ctx.node() is not None:
1009 if ctx.node() is not None:
1010 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1010 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1011
1011
1012 def write(name, mode, islink, getdata):
1012 def write(name, mode, islink, getdata):
1013 # At this point, the standin has been replaced with the largefile name,
1013 # At this point, the standin has been replaced with the largefile name,
1014 # so the normal matcher works here without the lfutil variants.
1014 # so the normal matcher works here without the lfutil variants.
1015 if match and not match(f):
1015 if match and not match(f):
1016 return
1016 return
1017 data = getdata()
1017 data = getdata()
1018 if decode:
1018 if decode:
1019 data = repo._repo.wwritedata(name, data)
1019 data = repo._repo.wwritedata(name, data)
1020
1020
1021 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1021 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1022
1022
1023 for f in ctx:
1023 for f in ctx:
1024 ff = ctx.flags(f)
1024 ff = ctx.flags(f)
1025 getdata = ctx[f].data
1025 getdata = ctx[f].data
1026 lfile = lfutil.splitstandin(f)
1026 lfile = lfutil.splitstandin(f)
1027 if lfile is not None:
1027 if lfile is not None:
1028 if ctx.node() is not None:
1028 if ctx.node() is not None:
1029 path = lfutil.findfile(repo._repo, getdata().strip())
1029 path = lfutil.findfile(repo._repo, getdata().strip())
1030
1030
1031 if path is None:
1031 if path is None:
1032 raise error.Abort(
1032 raise error.Abort(
1033 _('largefile %s not found in repo store or system cache')
1033 _('largefile %s not found in repo store or system cache')
1034 % lfile)
1034 % lfile)
1035 else:
1035 else:
1036 path = lfile
1036 path = lfile
1037
1037
1038 f = lfile
1038 f = lfile
1039
1039
1040 getdata = lambda: util.readfile(os.path.join(prefix, path))
1040 getdata = lambda: util.readfile(os.path.join(prefix, path))
1041
1041
1042 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1042 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1043
1043
1044 for subpath in sorted(ctx.substate):
1044 for subpath in sorted(ctx.substate):
1045 sub = ctx.workingsub(subpath)
1045 sub = ctx.workingsub(subpath)
1046 submatch = matchmod.subdirmatcher(subpath, match)
1046 submatch = matchmod.subdirmatcher(subpath, match)
1047 sub._repo.lfstatus = True
1047 sub._repo.lfstatus = True
1048 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1048 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1049
1049
1050 # If a largefile is modified, the change is not reflected in its
1050 # If a largefile is modified, the change is not reflected in its
1051 # standin until a commit. cmdutil.bailifchanged() raises an exception
1051 # standin until a commit. cmdutil.bailifchanged() raises an exception
1052 # if the repo has uncommitted changes. Wrap it to also check if
1052 # if the repo has uncommitted changes. Wrap it to also check if
1053 # largefiles were changed. This is used by bisect, backout and fetch.
1053 # largefiles were changed. This is used by bisect, backout and fetch.
1054 def overridebailifchanged(orig, repo, *args, **kwargs):
1054 def overridebailifchanged(orig, repo, *args, **kwargs):
1055 orig(repo, *args, **kwargs)
1055 orig(repo, *args, **kwargs)
1056 repo.lfstatus = True
1056 repo.lfstatus = True
1057 s = repo.status()
1057 s = repo.status()
1058 repo.lfstatus = False
1058 repo.lfstatus = False
1059 if s.modified or s.added or s.removed or s.deleted:
1059 if s.modified or s.added or s.removed or s.deleted:
1060 raise error.Abort(_('uncommitted changes'))
1060 raise error.Abort(_('uncommitted changes'))
1061
1061
1062 def postcommitstatus(orig, repo, *args, **kwargs):
1062 def postcommitstatus(orig, repo, *args, **kwargs):
1063 repo.lfstatus = True
1063 repo.lfstatus = True
1064 try:
1064 try:
1065 return orig(repo, *args, **kwargs)
1065 return orig(repo, *args, **kwargs)
1066 finally:
1066 finally:
1067 repo.lfstatus = False
1067 repo.lfstatus = False
1068
1068
1069 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly, dryrun,
1069 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly, dryrun,
1070 interactive):
1070 interactive):
1071 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1071 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1072 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly, dryrun,
1072 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly, dryrun,
1073 interactive)
1073 interactive)
1074 m = composelargefilematcher(match, repo[None].manifest())
1074 m = composelargefilematcher(match, repo[None].manifest())
1075
1075
1076 try:
1076 try:
1077 repo.lfstatus = True
1077 repo.lfstatus = True
1078 s = repo.status(match=m, clean=True)
1078 s = repo.status(match=m, clean=True)
1079 finally:
1079 finally:
1080 repo.lfstatus = False
1080 repo.lfstatus = False
1081 manifest = repo[None].manifest()
1081 manifest = repo[None].manifest()
1082 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1082 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1083 forget = [f for f in forget if lfutil.standin(f) in manifest]
1083 forget = [f for f in forget if lfutil.standin(f) in manifest]
1084
1084
1085 for f in forget:
1085 for f in forget:
1086 fstandin = lfutil.standin(f)
1086 fstandin = lfutil.standin(f)
1087 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1087 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1088 ui.warn(_('not removing %s: file is already untracked\n')
1088 ui.warn(_('not removing %s: file is already untracked\n')
1089 % m.rel(f))
1089 % m.rel(f))
1090 bad.append(f)
1090 bad.append(f)
1091
1091
1092 for f in forget:
1092 for f in forget:
1093 if ui.verbose or not m.exact(f):
1093 if ui.verbose or not m.exact(f):
1094 ui.status(_('removing %s\n') % m.rel(f))
1094 ui.status(_('removing %s\n') % m.rel(f))
1095
1095
1096 # Need to lock because standin files are deleted then removed from the
1096 # Need to lock because standin files are deleted then removed from the
1097 # repository and we could race in-between.
1097 # repository and we could race in-between.
1098 with repo.wlock():
1098 with repo.wlock():
1099 lfdirstate = lfutil.openlfdirstate(ui, repo)
1099 lfdirstate = lfutil.openlfdirstate(ui, repo)
1100 for f in forget:
1100 for f in forget:
1101 if lfdirstate[f] == 'a':
1101 if lfdirstate[f] == 'a':
1102 lfdirstate.drop(f)
1102 lfdirstate.drop(f)
1103 else:
1103 else:
1104 lfdirstate.remove(f)
1104 lfdirstate.remove(f)
1105 lfdirstate.write()
1105 lfdirstate.write()
1106 standins = [lfutil.standin(f) for f in forget]
1106 standins = [lfutil.standin(f) for f in forget]
1107 for f in standins:
1107 for f in standins:
1108 repo.wvfs.unlinkpath(f, ignoremissing=True)
1108 repo.wvfs.unlinkpath(f, ignoremissing=True)
1109 rejected = repo[None].forget(standins)
1109 rejected = repo[None].forget(standins)
1110
1110
1111 bad.extend(f for f in rejected if f in m.files())
1111 bad.extend(f for f in rejected if f in m.files())
1112 forgot.extend(f for f in forget if f not in rejected)
1112 forgot.extend(f for f in forget if f not in rejected)
1113 return bad, forgot
1113 return bad, forgot
1114
1114
1115 def _getoutgoings(repo, other, missing, addfunc):
1115 def _getoutgoings(repo, other, missing, addfunc):
1116 """get pairs of filename and largefile hash in outgoing revisions
1116 """get pairs of filename and largefile hash in outgoing revisions
1117 in 'missing'.
1117 in 'missing'.
1118
1118
1119 largefiles already existing on 'other' repository are ignored.
1119 largefiles already existing on 'other' repository are ignored.
1120
1120
1121 'addfunc' is invoked with each unique pairs of filename and
1121 'addfunc' is invoked with each unique pairs of filename and
1122 largefile hash value.
1122 largefile hash value.
1123 """
1123 """
1124 knowns = set()
1124 knowns = set()
1125 lfhashes = set()
1125 lfhashes = set()
1126 def dedup(fn, lfhash):
1126 def dedup(fn, lfhash):
1127 k = (fn, lfhash)
1127 k = (fn, lfhash)
1128 if k not in knowns:
1128 if k not in knowns:
1129 knowns.add(k)
1129 knowns.add(k)
1130 lfhashes.add(lfhash)
1130 lfhashes.add(lfhash)
1131 lfutil.getlfilestoupload(repo, missing, dedup)
1131 lfutil.getlfilestoupload(repo, missing, dedup)
1132 if lfhashes:
1132 if lfhashes:
1133 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1133 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1134 for fn, lfhash in knowns:
1134 for fn, lfhash in knowns:
1135 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1135 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1136 addfunc(fn, lfhash)
1136 addfunc(fn, lfhash)
1137
1137
1138 def outgoinghook(ui, repo, other, opts, missing):
1138 def outgoinghook(ui, repo, other, opts, missing):
1139 if opts.pop('large', None):
1139 if opts.pop('large', None):
1140 lfhashes = set()
1140 lfhashes = set()
1141 if ui.debugflag:
1141 if ui.debugflag:
1142 toupload = {}
1142 toupload = {}
1143 def addfunc(fn, lfhash):
1143 def addfunc(fn, lfhash):
1144 if fn not in toupload:
1144 if fn not in toupload:
1145 toupload[fn] = []
1145 toupload[fn] = []
1146 toupload[fn].append(lfhash)
1146 toupload[fn].append(lfhash)
1147 lfhashes.add(lfhash)
1147 lfhashes.add(lfhash)
1148 def showhashes(fn):
1148 def showhashes(fn):
1149 for lfhash in sorted(toupload[fn]):
1149 for lfhash in sorted(toupload[fn]):
1150 ui.debug(' %s\n' % (lfhash))
1150 ui.debug(' %s\n' % (lfhash))
1151 else:
1151 else:
1152 toupload = set()
1152 toupload = set()
1153 def addfunc(fn, lfhash):
1153 def addfunc(fn, lfhash):
1154 toupload.add(fn)
1154 toupload.add(fn)
1155 lfhashes.add(lfhash)
1155 lfhashes.add(lfhash)
1156 def showhashes(fn):
1156 def showhashes(fn):
1157 pass
1157 pass
1158 _getoutgoings(repo, other, missing, addfunc)
1158 _getoutgoings(repo, other, missing, addfunc)
1159
1159
1160 if not toupload:
1160 if not toupload:
1161 ui.status(_('largefiles: no files to upload\n'))
1161 ui.status(_('largefiles: no files to upload\n'))
1162 else:
1162 else:
1163 ui.status(_('largefiles to upload (%d entities):\n')
1163 ui.status(_('largefiles to upload (%d entities):\n')
1164 % (len(lfhashes)))
1164 % (len(lfhashes)))
1165 for file in sorted(toupload):
1165 for file in sorted(toupload):
1166 ui.status(lfutil.splitstandin(file) + '\n')
1166 ui.status(lfutil.splitstandin(file) + '\n')
1167 showhashes(file)
1167 showhashes(file)
1168 ui.status('\n')
1168 ui.status('\n')
1169
1169
1170 def summaryremotehook(ui, repo, opts, changes):
1170 def summaryremotehook(ui, repo, opts, changes):
1171 largeopt = opts.get('large', False)
1171 largeopt = opts.get('large', False)
1172 if changes is None:
1172 if changes is None:
1173 if largeopt:
1173 if largeopt:
1174 return (False, True) # only outgoing check is needed
1174 return (False, True) # only outgoing check is needed
1175 else:
1175 else:
1176 return (False, False)
1176 return (False, False)
1177 elif largeopt:
1177 elif largeopt:
1178 url, branch, peer, outgoing = changes[1]
1178 url, branch, peer, outgoing = changes[1]
1179 if peer is None:
1179 if peer is None:
1180 # i18n: column positioning for "hg summary"
1180 # i18n: column positioning for "hg summary"
1181 ui.status(_('largefiles: (no remote repo)\n'))
1181 ui.status(_('largefiles: (no remote repo)\n'))
1182 return
1182 return
1183
1183
1184 toupload = set()
1184 toupload = set()
1185 lfhashes = set()
1185 lfhashes = set()
1186 def addfunc(fn, lfhash):
1186 def addfunc(fn, lfhash):
1187 toupload.add(fn)
1187 toupload.add(fn)
1188 lfhashes.add(lfhash)
1188 lfhashes.add(lfhash)
1189 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1189 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1190
1190
1191 if not toupload:
1191 if not toupload:
1192 # i18n: column positioning for "hg summary"
1192 # i18n: column positioning for "hg summary"
1193 ui.status(_('largefiles: (no files to upload)\n'))
1193 ui.status(_('largefiles: (no files to upload)\n'))
1194 else:
1194 else:
1195 # i18n: column positioning for "hg summary"
1195 # i18n: column positioning for "hg summary"
1196 ui.status(_('largefiles: %d entities for %d files to upload\n')
1196 ui.status(_('largefiles: %d entities for %d files to upload\n')
1197 % (len(lfhashes), len(toupload)))
1197 % (len(lfhashes), len(toupload)))
1198
1198
1199 def overridesummary(orig, ui, repo, *pats, **opts):
1199 def overridesummary(orig, ui, repo, *pats, **opts):
1200 try:
1200 try:
1201 repo.lfstatus = True
1201 repo.lfstatus = True
1202 orig(ui, repo, *pats, **opts)
1202 orig(ui, repo, *pats, **opts)
1203 finally:
1203 finally:
1204 repo.lfstatus = False
1204 repo.lfstatus = False
1205
1205
1206 def scmutiladdremove(orig, repo, matcher, prefix, opts=None):
1206 def scmutiladdremove(orig, repo, matcher, prefix, opts=None):
1207 if opts is None:
1207 if opts is None:
1208 opts = {}
1208 opts = {}
1209 if not lfutil.islfilesrepo(repo):
1209 if not lfutil.islfilesrepo(repo):
1210 return orig(repo, matcher, prefix, opts)
1210 return orig(repo, matcher, prefix, opts)
1211 # Get the list of missing largefiles so we can remove them
1211 # Get the list of missing largefiles so we can remove them
1212 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1212 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1213 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()),
1213 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()),
1214 subrepos=[], ignored=False, clean=False,
1214 subrepos=[], ignored=False, clean=False,
1215 unknown=False)
1215 unknown=False)
1216
1216
1217 # Call into the normal remove code, but the removing of the standin, we want
1217 # Call into the normal remove code, but the removing of the standin, we want
1218 # to have handled by original addremove. Monkey patching here makes sure
1218 # to have handled by original addremove. Monkey patching here makes sure
1219 # we don't remove the standin in the largefiles code, preventing a very
1219 # we don't remove the standin in the largefiles code, preventing a very
1220 # confused state later.
1220 # confused state later.
1221 if s.deleted:
1221 if s.deleted:
1222 m = copy.copy(matcher)
1222 m = copy.copy(matcher)
1223
1223
1224 # The m._files and m._map attributes are not changed to the deleted list
1224 # The m._files and m._map attributes are not changed to the deleted list
1225 # because that affects the m.exact() test, which in turn governs whether
1225 # because that affects the m.exact() test, which in turn governs whether
1226 # or not the file name is printed, and how. Simply limit the original
1226 # or not the file name is printed, and how. Simply limit the original
1227 # matches to those in the deleted status list.
1227 # matches to those in the deleted status list.
1228 matchfn = m.matchfn
1228 matchfn = m.matchfn
1229 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1229 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1230
1230
1231 removelargefiles(repo.ui, repo, True, m, opts.get('dry_run'),
1231 removelargefiles(repo.ui, repo, True, m, opts.get('dry_run'),
1232 **pycompat.strkwargs(opts))
1232 **pycompat.strkwargs(opts))
1233 # Call into the normal add code, and any files that *should* be added as
1233 # Call into the normal add code, and any files that *should* be added as
1234 # largefiles will be
1234 # largefiles will be
1235 added, bad = addlargefiles(repo.ui, repo, True, matcher,
1235 added, bad = addlargefiles(repo.ui, repo, True, matcher,
1236 **pycompat.strkwargs(opts))
1236 **pycompat.strkwargs(opts))
1237 # Now that we've handled largefiles, hand off to the original addremove
1237 # Now that we've handled largefiles, hand off to the original addremove
1238 # function to take care of the rest. Make sure it doesn't do anything with
1238 # function to take care of the rest. Make sure it doesn't do anything with
1239 # largefiles by passing a matcher that will ignore them.
1239 # largefiles by passing a matcher that will ignore them.
1240 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1240 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1241 return orig(repo, matcher, prefix, opts)
1241 return orig(repo, matcher, prefix, opts)
1242
1242
1243 # Calling purge with --all will cause the largefiles to be deleted.
1243 # Calling purge with --all will cause the largefiles to be deleted.
1244 # Override repo.status to prevent this from happening.
1244 # Override repo.status to prevent this from happening.
1245 def overridepurge(orig, ui, repo, *dirs, **opts):
1245 def overridepurge(orig, ui, repo, *dirs, **opts):
1246 # XXX Monkey patching a repoview will not work. The assigned attribute will
1246 # XXX Monkey patching a repoview will not work. The assigned attribute will
1247 # be set on the unfiltered repo, but we will only lookup attributes in the
1247 # be set on the unfiltered repo, but we will only lookup attributes in the
1248 # unfiltered repo if the lookup in the repoview object itself fails. As the
1248 # unfiltered repo if the lookup in the repoview object itself fails. As the
1249 # monkey patched method exists on the repoview class the lookup will not
1249 # monkey patched method exists on the repoview class the lookup will not
1250 # fail. As a result, the original version will shadow the monkey patched
1250 # fail. As a result, the original version will shadow the monkey patched
1251 # one, defeating the monkey patch.
1251 # one, defeating the monkey patch.
1252 #
1252 #
1253 # As a work around we use an unfiltered repo here. We should do something
1253 # As a work around we use an unfiltered repo here. We should do something
1254 # cleaner instead.
1254 # cleaner instead.
1255 repo = repo.unfiltered()
1255 repo = repo.unfiltered()
1256 oldstatus = repo.status
1256 oldstatus = repo.status
1257 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1257 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1258 clean=False, unknown=False, listsubrepos=False):
1258 clean=False, unknown=False, listsubrepos=False):
1259 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1259 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1260 listsubrepos)
1260 listsubrepos)
1261 lfdirstate = lfutil.openlfdirstate(ui, repo)
1261 lfdirstate = lfutil.openlfdirstate(ui, repo)
1262 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1262 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1263 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1263 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1264 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1264 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1265 unknown, ignored, r.clean)
1265 unknown, ignored, r.clean)
1266 repo.status = overridestatus
1266 repo.status = overridestatus
1267 orig(ui, repo, *dirs, **opts)
1267 orig(ui, repo, *dirs, **opts)
1268 repo.status = oldstatus
1268 repo.status = oldstatus
1269
1269
1270 def overriderollback(orig, ui, repo, **opts):
1270 def overriderollback(orig, ui, repo, **opts):
1271 with repo.wlock():
1271 with repo.wlock():
1272 before = repo.dirstate.parents()
1272 before = repo.dirstate.parents()
1273 orphans = set(f for f in repo.dirstate
1273 orphans = set(f for f in repo.dirstate
1274 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1274 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1275 result = orig(ui, repo, **opts)
1275 result = orig(ui, repo, **opts)
1276 after = repo.dirstate.parents()
1276 after = repo.dirstate.parents()
1277 if before == after:
1277 if before == after:
1278 return result # no need to restore standins
1278 return result # no need to restore standins
1279
1279
1280 pctx = repo['.']
1280 pctx = repo['.']
1281 for f in repo.dirstate:
1281 for f in repo.dirstate:
1282 if lfutil.isstandin(f):
1282 if lfutil.isstandin(f):
1283 orphans.discard(f)
1283 orphans.discard(f)
1284 if repo.dirstate[f] == 'r':
1284 if repo.dirstate[f] == 'r':
1285 repo.wvfs.unlinkpath(f, ignoremissing=True)
1285 repo.wvfs.unlinkpath(f, ignoremissing=True)
1286 elif f in pctx:
1286 elif f in pctx:
1287 fctx = pctx[f]
1287 fctx = pctx[f]
1288 repo.wwrite(f, fctx.data(), fctx.flags())
1288 repo.wwrite(f, fctx.data(), fctx.flags())
1289 else:
1289 else:
1290 # content of standin is not so important in 'a',
1290 # content of standin is not so important in 'a',
1291 # 'm' or 'n' (coming from the 2nd parent) cases
1291 # 'm' or 'n' (coming from the 2nd parent) cases
1292 lfutil.writestandin(repo, f, '', False)
1292 lfutil.writestandin(repo, f, '', False)
1293 for standin in orphans:
1293 for standin in orphans:
1294 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1294 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1295
1295
1296 lfdirstate = lfutil.openlfdirstate(ui, repo)
1296 lfdirstate = lfutil.openlfdirstate(ui, repo)
1297 orphans = set(lfdirstate)
1297 orphans = set(lfdirstate)
1298 lfiles = lfutil.listlfiles(repo)
1298 lfiles = lfutil.listlfiles(repo)
1299 for file in lfiles:
1299 for file in lfiles:
1300 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1300 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1301 orphans.discard(file)
1301 orphans.discard(file)
1302 for lfile in orphans:
1302 for lfile in orphans:
1303 lfdirstate.drop(lfile)
1303 lfdirstate.drop(lfile)
1304 lfdirstate.write()
1304 lfdirstate.write()
1305 return result
1305 return result
1306
1306
1307 def overridetransplant(orig, ui, repo, *revs, **opts):
1307 def overridetransplant(orig, ui, repo, *revs, **opts):
1308 resuming = opts.get(r'continue')
1308 resuming = opts.get(r'continue')
1309 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1309 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1310 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1310 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1311 try:
1311 try:
1312 result = orig(ui, repo, *revs, **opts)
1312 result = orig(ui, repo, *revs, **opts)
1313 finally:
1313 finally:
1314 repo._lfstatuswriters.pop()
1314 repo._lfstatuswriters.pop()
1315 repo._lfcommithooks.pop()
1315 repo._lfcommithooks.pop()
1316 return result
1316 return result
1317
1317
1318 def overridecat(orig, ui, repo, file1, *pats, **opts):
1318 def overridecat(orig, ui, repo, file1, *pats, **opts):
1319 opts = pycompat.byteskwargs(opts)
1319 opts = pycompat.byteskwargs(opts)
1320 ctx = scmutil.revsingle(repo, opts.get('rev'))
1320 ctx = scmutil.revsingle(repo, opts.get('rev'))
1321 err = 1
1321 err = 1
1322 notbad = set()
1322 notbad = set()
1323 m = scmutil.match(ctx, (file1,) + pats, opts)
1323 m = scmutil.match(ctx, (file1,) + pats, opts)
1324 origmatchfn = m.matchfn
1324 origmatchfn = m.matchfn
1325 def lfmatchfn(f):
1325 def lfmatchfn(f):
1326 if origmatchfn(f):
1326 if origmatchfn(f):
1327 return True
1327 return True
1328 lf = lfutil.splitstandin(f)
1328 lf = lfutil.splitstandin(f)
1329 if lf is None:
1329 if lf is None:
1330 return False
1330 return False
1331 notbad.add(lf)
1331 notbad.add(lf)
1332 return origmatchfn(lf)
1332 return origmatchfn(lf)
1333 m.matchfn = lfmatchfn
1333 m.matchfn = lfmatchfn
1334 origbadfn = m.bad
1334 origbadfn = m.bad
1335 def lfbadfn(f, msg):
1335 def lfbadfn(f, msg):
1336 if not f in notbad:
1336 if not f in notbad:
1337 origbadfn(f, msg)
1337 origbadfn(f, msg)
1338 m.bad = lfbadfn
1338 m.bad = lfbadfn
1339
1339
1340 origvisitdirfn = m.visitdir
1340 origvisitdirfn = m.visitdir
1341 def lfvisitdirfn(dir):
1341 def lfvisitdirfn(dir):
1342 if dir == lfutil.shortname:
1342 if dir == lfutil.shortname:
1343 return True
1343 return True
1344 ret = origvisitdirfn(dir)
1344 ret = origvisitdirfn(dir)
1345 if ret:
1345 if ret:
1346 return ret
1346 return ret
1347 lf = lfutil.splitstandin(dir)
1347 lf = lfutil.splitstandin(dir)
1348 if lf is None:
1348 if lf is None:
1349 return False
1349 return False
1350 return origvisitdirfn(lf)
1350 return origvisitdirfn(lf)
1351 m.visitdir = lfvisitdirfn
1351 m.visitdir = lfvisitdirfn
1352
1352
1353 for f in ctx.walk(m):
1353 for f in ctx.walk(m):
1354 with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp:
1354 with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp:
1355 lf = lfutil.splitstandin(f)
1355 lf = lfutil.splitstandin(f)
1356 if lf is None or origmatchfn(f):
1356 if lf is None or origmatchfn(f):
1357 # duplicating unreachable code from commands.cat
1357 # duplicating unreachable code from commands.cat
1358 data = ctx[f].data()
1358 data = ctx[f].data()
1359 if opts.get('decode'):
1359 if opts.get('decode'):
1360 data = repo.wwritedata(f, data)
1360 data = repo.wwritedata(f, data)
1361 fp.write(data)
1361 fp.write(data)
1362 else:
1362 else:
1363 hash = lfutil.readasstandin(ctx[f])
1363 hash = lfutil.readasstandin(ctx[f])
1364 if not lfutil.inusercache(repo.ui, hash):
1364 if not lfutil.inusercache(repo.ui, hash):
1365 store = storefactory.openstore(repo)
1365 store = storefactory.openstore(repo)
1366 success, missing = store.get([(lf, hash)])
1366 success, missing = store.get([(lf, hash)])
1367 if len(success) != 1:
1367 if len(success) != 1:
1368 raise error.Abort(
1368 raise error.Abort(
1369 _('largefile %s is not in cache and could not be '
1369 _('largefile %s is not in cache and could not be '
1370 'downloaded') % lf)
1370 'downloaded') % lf)
1371 path = lfutil.usercachepath(repo.ui, hash)
1371 path = lfutil.usercachepath(repo.ui, hash)
1372 with open(path, "rb") as fpin:
1372 with open(path, "rb") as fpin:
1373 for chunk in util.filechunkiter(fpin):
1373 for chunk in util.filechunkiter(fpin):
1374 fp.write(chunk)
1374 fp.write(chunk)
1375 err = 0
1375 err = 0
1376 return err
1376 return err
1377
1377
1378 def mergeupdate(orig, repo, node, branchmerge, force,
1378 def mergeupdate(orig, repo, node, branchmerge, force,
1379 *args, **kwargs):
1379 *args, **kwargs):
1380 matcher = kwargs.get(r'matcher', None)
1380 matcher = kwargs.get(r'matcher', None)
1381 # note if this is a partial update
1381 # note if this is a partial update
1382 partial = matcher and not matcher.always()
1382 partial = matcher and not matcher.always()
1383 with repo.wlock():
1383 with repo.wlock():
1384 # branch | | |
1384 # branch | | |
1385 # merge | force | partial | action
1385 # merge | force | partial | action
1386 # -------+-------+---------+--------------
1386 # -------+-------+---------+--------------
1387 # x | x | x | linear-merge
1387 # x | x | x | linear-merge
1388 # o | x | x | branch-merge
1388 # o | x | x | branch-merge
1389 # x | o | x | overwrite (as clean update)
1389 # x | o | x | overwrite (as clean update)
1390 # o | o | x | force-branch-merge (*1)
1390 # o | o | x | force-branch-merge (*1)
1391 # x | x | o | (*)
1391 # x | x | o | (*)
1392 # o | x | o | (*)
1392 # o | x | o | (*)
1393 # x | o | o | overwrite (as revert)
1393 # x | o | o | overwrite (as revert)
1394 # o | o | o | (*)
1394 # o | o | o | (*)
1395 #
1395 #
1396 # (*) don't care
1396 # (*) don't care
1397 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1397 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1398
1398
1399 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1399 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1400 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1400 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1401 repo.getcwd()),
1401 repo.getcwd()),
1402 subrepos=[], ignored=False,
1402 subrepos=[], ignored=False,
1403 clean=True, unknown=False)
1403 clean=True, unknown=False)
1404 oldclean = set(s.clean)
1404 oldclean = set(s.clean)
1405 pctx = repo['.']
1405 pctx = repo['.']
1406 dctx = repo[node]
1406 dctx = repo[node]
1407 for lfile in unsure + s.modified:
1407 for lfile in unsure + s.modified:
1408 lfileabs = repo.wvfs.join(lfile)
1408 lfileabs = repo.wvfs.join(lfile)
1409 if not repo.wvfs.exists(lfileabs):
1409 if not repo.wvfs.exists(lfileabs):
1410 continue
1410 continue
1411 lfhash = lfutil.hashfile(lfileabs)
1411 lfhash = lfutil.hashfile(lfileabs)
1412 standin = lfutil.standin(lfile)
1412 standin = lfutil.standin(lfile)
1413 lfutil.writestandin(repo, standin, lfhash,
1413 lfutil.writestandin(repo, standin, lfhash,
1414 lfutil.getexecutable(lfileabs))
1414 lfutil.getexecutable(lfileabs))
1415 if (standin in pctx and
1415 if (standin in pctx and
1416 lfhash == lfutil.readasstandin(pctx[standin])):
1416 lfhash == lfutil.readasstandin(pctx[standin])):
1417 oldclean.add(lfile)
1417 oldclean.add(lfile)
1418 for lfile in s.added:
1418 for lfile in s.added:
1419 fstandin = lfutil.standin(lfile)
1419 fstandin = lfutil.standin(lfile)
1420 if fstandin not in dctx:
1420 if fstandin not in dctx:
1421 # in this case, content of standin file is meaningless
1421 # in this case, content of standin file is meaningless
1422 # (in dctx, lfile is unknown, or normal file)
1422 # (in dctx, lfile is unknown, or normal file)
1423 continue
1423 continue
1424 lfutil.updatestandin(repo, lfile, fstandin)
1424 lfutil.updatestandin(repo, lfile, fstandin)
1425 # mark all clean largefiles as dirty, just in case the update gets
1425 # mark all clean largefiles as dirty, just in case the update gets
1426 # interrupted before largefiles and lfdirstate are synchronized
1426 # interrupted before largefiles and lfdirstate are synchronized
1427 for lfile in oldclean:
1427 for lfile in oldclean:
1428 lfdirstate.normallookup(lfile)
1428 lfdirstate.normallookup(lfile)
1429 lfdirstate.write()
1429 lfdirstate.write()
1430
1430
1431 oldstandins = lfutil.getstandinsstate(repo)
1431 oldstandins = lfutil.getstandinsstate(repo)
1432 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1432 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1433 # good candidate for in-memory merge (large files, custom dirstate,
1433 # good candidate for in-memory merge (large files, custom dirstate,
1434 # matcher usage).
1434 # matcher usage).
1435 kwargs[r'wc'] = repo[None]
1435 kwargs[r'wc'] = repo[None]
1436 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1436 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1437
1437
1438 newstandins = lfutil.getstandinsstate(repo)
1438 newstandins = lfutil.getstandinsstate(repo)
1439 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1439 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1440
1440
1441 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1441 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1442 # all the ones that didn't change as clean
1442 # all the ones that didn't change as clean
1443 for lfile in oldclean.difference(filelist):
1443 for lfile in oldclean.difference(filelist):
1444 lfdirstate.normal(lfile)
1444 lfdirstate.normal(lfile)
1445 lfdirstate.write()
1445 lfdirstate.write()
1446
1446
1447 if branchmerge or force or partial:
1447 if branchmerge or force or partial:
1448 filelist.extend(s.deleted + s.removed)
1448 filelist.extend(s.deleted + s.removed)
1449
1449
1450 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1450 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1451 normallookup=partial)
1451 normallookup=partial)
1452
1452
1453 return result
1453 return result
1454
1454
1455 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1455 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1456 result = orig(repo, files, *args, **kwargs)
1456 result = orig(repo, files, *args, **kwargs)
1457
1457
1458 filelist = []
1458 filelist = []
1459 for f in files:
1459 for f in files:
1460 lf = lfutil.splitstandin(f)
1460 lf = lfutil.splitstandin(f)
1461 if lf is not None:
1461 if lf is not None:
1462 filelist.append(lf)
1462 filelist.append(lf)
1463 if filelist:
1463 if filelist:
1464 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1464 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1465 printmessage=False, normallookup=True)
1465 printmessage=False, normallookup=True)
1466
1466
1467 return result
1467 return result
1468
1468
1469 def upgraderequirements(orig, repo):
1469 def upgraderequirements(orig, repo):
1470 reqs = orig(repo)
1470 reqs = orig(repo)
1471 if 'largefiles' in repo.requirements:
1471 if 'largefiles' in repo.requirements:
1472 reqs.add('largefiles')
1472 reqs.add('largefiles')
1473 return reqs
1473 return reqs
1474
1474
1475 _lfscheme = 'largefile://'
1475 _lfscheme = 'largefile://'
1476 def openlargefile(orig, ui, url_, data=None):
1476 def openlargefile(orig, ui, url_, data=None):
1477 if url_.startswith(_lfscheme):
1477 if url_.startswith(_lfscheme):
1478 if data:
1478 if data:
1479 msg = "cannot use data on a 'largefile://' url"
1479 msg = "cannot use data on a 'largefile://' url"
1480 raise error.ProgrammingError(msg)
1480 raise error.ProgrammingError(msg)
1481 lfid = url_[len(_lfscheme):]
1481 lfid = url_[len(_lfscheme):]
1482 return storefactory.getlfile(ui, lfid)
1482 return storefactory.getlfile(ui, lfid)
1483 else:
1483 else:
1484 return orig(ui, url_, data=data)
1484 return orig(ui, url_, data=data)
@@ -1,349 +1,349 b''
1 # archival.py - revision archival for mercurial
1 # archival.py - revision archival for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import gzip
10 import gzip
11 import os
11 import os
12 import struct
12 import struct
13 import tarfile
13 import tarfile
14 import time
14 import time
15 import zipfile
15 import zipfile
16 import zlib
16 import zlib
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 nullrev,
20 nullrev,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 error,
24 error,
25 formatter,
25 formatter,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 vfs as vfsmod,
30 vfs as vfsmod,
31 )
31 )
32 stringio = util.stringio
32 stringio = util.stringio
33
33
34 # from unzip source code:
34 # from unzip source code:
35 _UNX_IFREG = 0x8000
35 _UNX_IFREG = 0x8000
36 _UNX_IFLNK = 0xa000
36 _UNX_IFLNK = 0xa000
37
37
38 def tidyprefix(dest, kind, prefix):
38 def tidyprefix(dest, kind, prefix):
39 '''choose prefix to use for names in archive. make sure prefix is
39 '''choose prefix to use for names in archive. make sure prefix is
40 safe for consumers.'''
40 safe for consumers.'''
41
41
42 if prefix:
42 if prefix:
43 prefix = util.normpath(prefix)
43 prefix = util.normpath(prefix)
44 else:
44 else:
45 if not isinstance(dest, bytes):
45 if not isinstance(dest, bytes):
46 raise ValueError('dest must be string if no prefix')
46 raise ValueError('dest must be string if no prefix')
47 prefix = os.path.basename(dest)
47 prefix = os.path.basename(dest)
48 lower = prefix.lower()
48 lower = prefix.lower()
49 for sfx in exts.get(kind, []):
49 for sfx in exts.get(kind, []):
50 if lower.endswith(sfx):
50 if lower.endswith(sfx):
51 prefix = prefix[:-len(sfx)]
51 prefix = prefix[:-len(sfx)]
52 break
52 break
53 lpfx = os.path.normpath(util.localpath(prefix))
53 lpfx = os.path.normpath(util.localpath(prefix))
54 prefix = util.pconvert(lpfx)
54 prefix = util.pconvert(lpfx)
55 if not prefix.endswith('/'):
55 if not prefix.endswith('/'):
56 prefix += '/'
56 prefix += '/'
57 # Drop the leading '.' path component if present, so Windows can read the
57 # Drop the leading '.' path component if present, so Windows can read the
58 # zip files (issue4634)
58 # zip files (issue4634)
59 if prefix.startswith('./'):
59 if prefix.startswith('./'):
60 prefix = prefix[2:]
60 prefix = prefix[2:]
61 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
61 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
62 raise error.Abort(_('archive prefix contains illegal components'))
62 raise error.Abort(_('archive prefix contains illegal components'))
63 return prefix
63 return prefix
64
64
65 exts = {
65 exts = {
66 'tar': ['.tar'],
66 'tar': ['.tar'],
67 'tbz2': ['.tbz2', '.tar.bz2'],
67 'tbz2': ['.tbz2', '.tar.bz2'],
68 'tgz': ['.tgz', '.tar.gz'],
68 'tgz': ['.tgz', '.tar.gz'],
69 'zip': ['.zip'],
69 'zip': ['.zip'],
70 }
70 }
71
71
72 def guesskind(dest):
72 def guesskind(dest):
73 for kind, extensions in exts.iteritems():
73 for kind, extensions in exts.iteritems():
74 if any(dest.endswith(ext) for ext in extensions):
74 if any(dest.endswith(ext) for ext in extensions):
75 return kind
75 return kind
76 return None
76 return None
77
77
78 def _rootctx(repo):
78 def _rootctx(repo):
79 # repo[0] may be hidden
79 # repo[0] may be hidden
80 for rev in repo:
80 for rev in repo:
81 return repo[rev]
81 return repo[rev]
82 return repo[nullrev]
82 return repo[nullrev]
83
83
84 # {tags} on ctx includes local tags and 'tip', with no current way to limit
84 # {tags} on ctx includes local tags and 'tip', with no current way to limit
85 # that to global tags. Therefore, use {latesttag} as a substitute when
85 # that to global tags. Therefore, use {latesttag} as a substitute when
86 # the distance is 0, since that will be the list of global tags on ctx.
86 # the distance is 0, since that will be the list of global tags on ctx.
87 _defaultmetatemplate = br'''
87 _defaultmetatemplate = br'''
88 repo: {root}
88 repo: {root}
89 node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
89 node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
90 branch: {branch|utf8}
90 branch: {branch|utf8}
91 {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
91 {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
92 separate("\n",
92 separate("\n",
93 join(latesttag % "latesttag: {tag}", "\n"),
93 join(latesttag % "latesttag: {tag}", "\n"),
94 "latesttagdistance: {latesttagdistance}",
94 "latesttagdistance: {latesttagdistance}",
95 "changessincelatesttag: {changessincelatesttag}"))}
95 "changessincelatesttag: {changessincelatesttag}"))}
96 '''[1:] # drop leading '\n'
96 '''[1:] # drop leading '\n'
97
97
98 def buildmetadata(ctx):
98 def buildmetadata(ctx):
99 '''build content of .hg_archival.txt'''
99 '''build content of .hg_archival.txt'''
100 repo = ctx.repo()
100 repo = ctx.repo()
101
101
102 opts = {
102 opts = {
103 'template': repo.ui.config('experimental', 'archivemetatemplate',
103 'template': repo.ui.config('experimental', 'archivemetatemplate',
104 _defaultmetatemplate)
104 _defaultmetatemplate)
105 }
105 }
106
106
107 out = util.stringio()
107 out = util.stringio()
108
108
109 fm = formatter.formatter(repo.ui, out, 'archive', opts)
109 fm = formatter.formatter(repo.ui, out, 'archive', opts)
110 fm.startitem()
110 fm.startitem()
111 fm.context(ctx=ctx)
111 fm.context(ctx=ctx)
112 fm.data(root=_rootctx(repo).hex())
112 fm.data(root=_rootctx(repo).hex())
113
113
114 if ctx.rev() is None:
114 if ctx.rev() is None:
115 dirty = ''
115 dirty = ''
116 if ctx.dirty(missing=True):
116 if ctx.dirty(missing=True):
117 dirty = '+'
117 dirty = '+'
118 fm.data(dirty=dirty)
118 fm.data(dirty=dirty)
119 fm.end()
119 fm.end()
120
120
121 return out.getvalue()
121 return out.getvalue()
122
122
123 class tarit(object):
123 class tarit(object):
124 '''write archive to tar file or stream. can write uncompressed,
124 '''write archive to tar file or stream. can write uncompressed,
125 or compress with gzip or bzip2.'''
125 or compress with gzip or bzip2.'''
126
126
127 class GzipFileWithTime(gzip.GzipFile):
127 class GzipFileWithTime(gzip.GzipFile):
128
128
129 def __init__(self, *args, **kw):
129 def __init__(self, *args, **kw):
130 timestamp = None
130 timestamp = None
131 if r'timestamp' in kw:
131 if r'timestamp' in kw:
132 timestamp = kw.pop(r'timestamp')
132 timestamp = kw.pop(r'timestamp')
133 if timestamp is None:
133 if timestamp is None:
134 self.timestamp = time.time()
134 self.timestamp = time.time()
135 else:
135 else:
136 self.timestamp = timestamp
136 self.timestamp = timestamp
137 gzip.GzipFile.__init__(self, *args, **kw)
137 gzip.GzipFile.__init__(self, *args, **kw)
138
138
139 def _write_gzip_header(self):
139 def _write_gzip_header(self):
140 self.fileobj.write('\037\213') # magic header
140 self.fileobj.write('\037\213') # magic header
141 self.fileobj.write('\010') # compression method
141 self.fileobj.write('\010') # compression method
142 fname = self.name
142 fname = self.name
143 if fname and fname.endswith('.gz'):
143 if fname and fname.endswith('.gz'):
144 fname = fname[:-3]
144 fname = fname[:-3]
145 flags = 0
145 flags = 0
146 if fname:
146 if fname:
147 flags = gzip.FNAME
147 flags = gzip.FNAME
148 self.fileobj.write(pycompat.bytechr(flags))
148 self.fileobj.write(pycompat.bytechr(flags))
149 gzip.write32u(self.fileobj, int(self.timestamp))
149 gzip.write32u(self.fileobj, int(self.timestamp))
150 self.fileobj.write('\002')
150 self.fileobj.write('\002')
151 self.fileobj.write('\377')
151 self.fileobj.write('\377')
152 if fname:
152 if fname:
153 self.fileobj.write(fname + '\000')
153 self.fileobj.write(fname + '\000')
154
154
155 def __init__(self, dest, mtime, kind=''):
155 def __init__(self, dest, mtime, kind=''):
156 self.mtime = mtime
156 self.mtime = mtime
157 self.fileobj = None
157 self.fileobj = None
158
158
159 def taropen(mode, name='', fileobj=None):
159 def taropen(mode, name='', fileobj=None):
160 if kind == 'gz':
160 if kind == 'gz':
161 mode = mode[0:1]
161 mode = mode[0:1]
162 if not fileobj:
162 if not fileobj:
163 fileobj = open(name, mode + 'b')
163 fileobj = open(name, mode + 'b')
164 gzfileobj = self.GzipFileWithTime(name,
164 gzfileobj = self.GzipFileWithTime(name,
165 pycompat.sysstr(mode + 'b'),
165 pycompat.sysstr(mode + 'b'),
166 zlib.Z_BEST_COMPRESSION,
166 zlib.Z_BEST_COMPRESSION,
167 fileobj, timestamp=mtime)
167 fileobj, timestamp=mtime)
168 self.fileobj = gzfileobj
168 self.fileobj = gzfileobj
169 return tarfile.TarFile.taropen(
169 return tarfile.TarFile.taropen(
170 name, pycompat.sysstr(mode), gzfileobj)
170 name, pycompat.sysstr(mode), gzfileobj)
171 else:
171 else:
172 return tarfile.open(
172 return tarfile.open(
173 name, pycompat.sysstr(mode + kind), fileobj)
173 name, pycompat.sysstr(mode + kind), fileobj)
174
174
175 if isinstance(dest, bytes):
175 if isinstance(dest, bytes):
176 self.z = taropen('w:', name=dest)
176 self.z = taropen('w:', name=dest)
177 else:
177 else:
178 self.z = taropen('w|', fileobj=dest)
178 self.z = taropen('w|', fileobj=dest)
179
179
180 def addfile(self, name, mode, islink, data):
180 def addfile(self, name, mode, islink, data):
181 name = pycompat.fsdecode(name)
181 name = pycompat.fsdecode(name)
182 i = tarfile.TarInfo(name)
182 i = tarfile.TarInfo(name)
183 i.mtime = self.mtime
183 i.mtime = self.mtime
184 i.size = len(data)
184 i.size = len(data)
185 if islink:
185 if islink:
186 i.type = tarfile.SYMTYPE
186 i.type = tarfile.SYMTYPE
187 i.mode = 0o777
187 i.mode = 0o777
188 i.linkname = pycompat.fsdecode(data)
188 i.linkname = pycompat.fsdecode(data)
189 data = None
189 data = None
190 i.size = 0
190 i.size = 0
191 else:
191 else:
192 i.mode = mode
192 i.mode = mode
193 data = stringio(data)
193 data = stringio(data)
194 self.z.addfile(i, data)
194 self.z.addfile(i, data)
195
195
196 def done(self):
196 def done(self):
197 self.z.close()
197 self.z.close()
198 if self.fileobj:
198 if self.fileobj:
199 self.fileobj.close()
199 self.fileobj.close()
200
200
201 class zipit(object):
201 class zipit(object):
202 '''write archive to zip file or stream. can write uncompressed,
202 '''write archive to zip file or stream. can write uncompressed,
203 or compressed with deflate.'''
203 or compressed with deflate.'''
204
204
205 def __init__(self, dest, mtime, compress=True):
205 def __init__(self, dest, mtime, compress=True):
206 if isinstance(dest, bytes):
206 if isinstance(dest, bytes):
207 dest = pycompat.fsdecode(dest)
207 dest = pycompat.fsdecode(dest)
208 self.z = zipfile.ZipFile(dest, r'w',
208 self.z = zipfile.ZipFile(dest, r'w',
209 compress and zipfile.ZIP_DEFLATED or
209 compress and zipfile.ZIP_DEFLATED or
210 zipfile.ZIP_STORED)
210 zipfile.ZIP_STORED)
211
211
212 # Python's zipfile module emits deprecation warnings if we try
212 # Python's zipfile module emits deprecation warnings if we try
213 # to store files with a date before 1980.
213 # to store files with a date before 1980.
214 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
214 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
215 if mtime < epoch:
215 if mtime < epoch:
216 mtime = epoch
216 mtime = epoch
217
217
218 self.mtime = mtime
218 self.mtime = mtime
219 self.date_time = time.gmtime(mtime)[:6]
219 self.date_time = time.gmtime(mtime)[:6]
220
220
221 def addfile(self, name, mode, islink, data):
221 def addfile(self, name, mode, islink, data):
222 i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time)
222 i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time)
223 i.compress_type = self.z.compression
223 i.compress_type = self.z.compression
224 # unzip will not honor unix file modes unless file creator is
224 # unzip will not honor unix file modes unless file creator is
225 # set to unix (id 3).
225 # set to unix (id 3).
226 i.create_system = 3
226 i.create_system = 3
227 ftype = _UNX_IFREG
227 ftype = _UNX_IFREG
228 if islink:
228 if islink:
229 mode = 0o777
229 mode = 0o777
230 ftype = _UNX_IFLNK
230 ftype = _UNX_IFLNK
231 i.external_attr = (mode | ftype) << 16
231 i.external_attr = (mode | ftype) << 16
232 # add "extended-timestamp" extra block, because zip archives
232 # add "extended-timestamp" extra block, because zip archives
233 # without this will be extracted with unexpected timestamp,
233 # without this will be extracted with unexpected timestamp,
234 # if TZ is not configured as GMT
234 # if TZ is not configured as GMT
235 i.extra += struct.pack('<hhBl',
235 i.extra += struct.pack('<hhBl',
236 0x5455, # block type: "extended-timestamp"
236 0x5455, # block type: "extended-timestamp"
237 1 + 4, # size of this block
237 1 + 4, # size of this block
238 1, # "modification time is present"
238 1, # "modification time is present"
239 int(self.mtime)) # last modification (UTC)
239 int(self.mtime)) # last modification (UTC)
240 self.z.writestr(i, data)
240 self.z.writestr(i, data)
241
241
242 def done(self):
242 def done(self):
243 self.z.close()
243 self.z.close()
244
244
245 class fileit(object):
245 class fileit(object):
246 '''write archive as files in directory.'''
246 '''write archive as files in directory.'''
247
247
248 def __init__(self, name, mtime):
248 def __init__(self, name, mtime):
249 self.basedir = name
249 self.basedir = name
250 self.opener = vfsmod.vfs(self.basedir)
250 self.opener = vfsmod.vfs(self.basedir)
251 self.mtime = mtime
251 self.mtime = mtime
252
252
253 def addfile(self, name, mode, islink, data):
253 def addfile(self, name, mode, islink, data):
254 if islink:
254 if islink:
255 self.opener.symlink(data, name)
255 self.opener.symlink(data, name)
256 return
256 return
257 f = self.opener(name, "w", atomictemp=False)
257 f = self.opener(name, "w", atomictemp=False)
258 f.write(data)
258 f.write(data)
259 f.close()
259 f.close()
260 destfile = os.path.join(self.basedir, name)
260 destfile = os.path.join(self.basedir, name)
261 os.chmod(destfile, mode)
261 os.chmod(destfile, mode)
262 if self.mtime is not None:
262 if self.mtime is not None:
263 os.utime(destfile, (self.mtime, self.mtime))
263 os.utime(destfile, (self.mtime, self.mtime))
264
264
265 def done(self):
265 def done(self):
266 pass
266 pass
267
267
268 archivers = {
268 archivers = {
269 'files': fileit,
269 'files': fileit,
270 'tar': tarit,
270 'tar': tarit,
271 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
271 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
272 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
272 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
273 'uzip': lambda name, mtime: zipit(name, mtime, False),
273 'uzip': lambda name, mtime: zipit(name, mtime, False),
274 'zip': zipit,
274 'zip': zipit,
275 }
275 }
276
276
277 def archive(repo, dest, node, kind, decode=True, matchfn=None,
277 def archive(repo, dest, node, kind, decode=True, match=None,
278 prefix='', mtime=None, subrepos=False):
278 prefix='', mtime=None, subrepos=False):
279 '''create archive of repo as it was at node.
279 '''create archive of repo as it was at node.
280
280
281 dest can be name of directory, name of archive file, or file
281 dest can be name of directory, name of archive file, or file
282 object to write archive to.
282 object to write archive to.
283
283
284 kind is type of archive to create.
284 kind is type of archive to create.
285
285
286 decode tells whether to put files through decode filters from
286 decode tells whether to put files through decode filters from
287 hgrc.
287 hgrc.
288
288
289 matchfn is function to filter names of files to write to archive.
289 match is a matcher to filter names of files to write to archive.
290
290
291 prefix is name of path to put before every archive member.
291 prefix is name of path to put before every archive member.
292
292
293 mtime is the modified time, in seconds, or None to use the changeset time.
293 mtime is the modified time, in seconds, or None to use the changeset time.
294
294
295 subrepos tells whether to include subrepos.
295 subrepos tells whether to include subrepos.
296 '''
296 '''
297
297
298 if kind == 'files':
298 if kind == 'files':
299 if prefix:
299 if prefix:
300 raise error.Abort(_('cannot give prefix when archiving to files'))
300 raise error.Abort(_('cannot give prefix when archiving to files'))
301 else:
301 else:
302 prefix = tidyprefix(dest, kind, prefix)
302 prefix = tidyprefix(dest, kind, prefix)
303
303
304 def write(name, mode, islink, getdata):
304 def write(name, mode, islink, getdata):
305 data = getdata()
305 data = getdata()
306 if decode:
306 if decode:
307 data = repo.wwritedata(name, data)
307 data = repo.wwritedata(name, data)
308 archiver.addfile(prefix + name, mode, islink, data)
308 archiver.addfile(prefix + name, mode, islink, data)
309
309
310 if kind not in archivers:
310 if kind not in archivers:
311 raise error.Abort(_("unknown archive type '%s'") % kind)
311 raise error.Abort(_("unknown archive type '%s'") % kind)
312
312
313 ctx = repo[node]
313 ctx = repo[node]
314 archiver = archivers[kind](dest, mtime or ctx.date()[0])
314 archiver = archivers[kind](dest, mtime or ctx.date()[0])
315
315
316 if repo.ui.configbool("ui", "archivemeta"):
316 if repo.ui.configbool("ui", "archivemeta"):
317 name = '.hg_archival.txt'
317 name = '.hg_archival.txt'
318 if not matchfn or matchfn(name):
318 if not match or match(name):
319 write(name, 0o644, False, lambda: buildmetadata(ctx))
319 write(name, 0o644, False, lambda: buildmetadata(ctx))
320
320
321 if matchfn:
321 if match:
322 files = [f for f in ctx.manifest().keys() if matchfn(f)]
322 files = [f for f in ctx.manifest().keys() if match(f)]
323 else:
323 else:
324 files = ctx.manifest().keys()
324 files = ctx.manifest().keys()
325 total = len(files)
325 total = len(files)
326 if total:
326 if total:
327 files.sort()
327 files.sort()
328 scmutil.prefetchfiles(repo, [ctx.rev()],
328 scmutil.prefetchfiles(repo, [ctx.rev()],
329 scmutil.matchfiles(repo, files))
329 scmutil.matchfiles(repo, files))
330 progress = scmutil.progress(repo.ui, _('archiving'), unit=_('files'),
330 progress = scmutil.progress(repo.ui, _('archiving'), unit=_('files'),
331 total=total)
331 total=total)
332 progress.update(0)
332 progress.update(0)
333 for f in files:
333 for f in files:
334 ff = ctx.flags(f)
334 ff = ctx.flags(f)
335 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data)
335 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data)
336 progress.increment(item=f)
336 progress.increment(item=f)
337 progress.complete()
337 progress.complete()
338
338
339 if subrepos:
339 if subrepos:
340 for subpath in sorted(ctx.substate):
340 for subpath in sorted(ctx.substate):
341 sub = ctx.workingsub(subpath)
341 sub = ctx.workingsub(subpath)
342 submatch = matchmod.subdirmatcher(subpath, matchfn)
342 submatch = matchmod.subdirmatcher(subpath, match)
343 total += sub.archive(archiver, prefix, submatch, decode)
343 total += sub.archive(archiver, prefix, submatch, decode)
344
344
345 if total == 0:
345 if total == 0:
346 raise error.Abort(_('no files match the archive pattern'))
346 raise error.Abort(_('no files match the archive pattern'))
347
347
348 archiver.done()
348 archiver.done()
349 return total
349 return total
@@ -1,1485 +1,1484 b''
1 #
1 #
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import copy
10 import copy
11 import mimetypes
11 import mimetypes
12 import os
12 import os
13 import re
13 import re
14
14
15 from ..i18n import _
15 from ..i18n import _
16 from ..node import hex, short
16 from ..node import hex, short
17
17
18 from .common import (
18 from .common import (
19 ErrorResponse,
19 ErrorResponse,
20 HTTP_FORBIDDEN,
20 HTTP_FORBIDDEN,
21 HTTP_NOT_FOUND,
21 HTTP_NOT_FOUND,
22 get_contact,
22 get_contact,
23 paritygen,
23 paritygen,
24 staticfile,
24 staticfile,
25 )
25 )
26
26
27 from .. import (
27 from .. import (
28 archival,
28 archival,
29 dagop,
29 dagop,
30 encoding,
30 encoding,
31 error,
31 error,
32 graphmod,
32 graphmod,
33 pycompat,
33 pycompat,
34 revset,
34 revset,
35 revsetlang,
35 revsetlang,
36 scmutil,
36 scmutil,
37 smartset,
37 smartset,
38 templater,
38 templater,
39 templateutil,
39 templateutil,
40 )
40 )
41
41
42 from ..utils import (
42 from ..utils import (
43 stringutil,
43 stringutil,
44 )
44 )
45
45
46 from . import (
46 from . import (
47 webutil,
47 webutil,
48 )
48 )
49
49
50 __all__ = []
50 __all__ = []
51 commands = {}
51 commands = {}
52
52
53 class webcommand(object):
53 class webcommand(object):
54 """Decorator used to register a web command handler.
54 """Decorator used to register a web command handler.
55
55
56 The decorator takes as its positional arguments the name/path the
56 The decorator takes as its positional arguments the name/path the
57 command should be accessible under.
57 command should be accessible under.
58
58
59 When called, functions receive as arguments a ``requestcontext``,
59 When called, functions receive as arguments a ``requestcontext``,
60 ``wsgirequest``, and a templater instance for generatoring output.
60 ``wsgirequest``, and a templater instance for generatoring output.
61 The functions should populate the ``rctx.res`` object with details
61 The functions should populate the ``rctx.res`` object with details
62 about the HTTP response.
62 about the HTTP response.
63
63
64 The function returns a generator to be consumed by the WSGI application.
64 The function returns a generator to be consumed by the WSGI application.
65 For most commands, this should be the result from
65 For most commands, this should be the result from
66 ``web.res.sendresponse()``. Many commands will call ``web.sendtemplate()``
66 ``web.res.sendresponse()``. Many commands will call ``web.sendtemplate()``
67 to render a template.
67 to render a template.
68
68
69 Usage:
69 Usage:
70
70
71 @webcommand('mycommand')
71 @webcommand('mycommand')
72 def mycommand(web):
72 def mycommand(web):
73 pass
73 pass
74 """
74 """
75
75
76 def __init__(self, name):
76 def __init__(self, name):
77 self.name = name
77 self.name = name
78
78
79 def __call__(self, func):
79 def __call__(self, func):
80 __all__.append(self.name)
80 __all__.append(self.name)
81 commands[self.name] = func
81 commands[self.name] = func
82 return func
82 return func
83
83
84 @webcommand('log')
84 @webcommand('log')
85 def log(web):
85 def log(web):
86 """
86 """
87 /log[/{revision}[/{path}]]
87 /log[/{revision}[/{path}]]
88 --------------------------
88 --------------------------
89
89
90 Show repository or file history.
90 Show repository or file history.
91
91
92 For URLs of the form ``/log/{revision}``, a list of changesets starting at
92 For URLs of the form ``/log/{revision}``, a list of changesets starting at
93 the specified changeset identifier is shown. If ``{revision}`` is not
93 the specified changeset identifier is shown. If ``{revision}`` is not
94 defined, the default is ``tip``. This form is equivalent to the
94 defined, the default is ``tip``. This form is equivalent to the
95 ``changelog`` handler.
95 ``changelog`` handler.
96
96
97 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
97 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
98 file will be shown. This form is equivalent to the ``filelog`` handler.
98 file will be shown. This form is equivalent to the ``filelog`` handler.
99 """
99 """
100
100
101 if web.req.qsparams.get('file'):
101 if web.req.qsparams.get('file'):
102 return filelog(web)
102 return filelog(web)
103 else:
103 else:
104 return changelog(web)
104 return changelog(web)
105
105
106 @webcommand('rawfile')
106 @webcommand('rawfile')
107 def rawfile(web):
107 def rawfile(web):
108 guessmime = web.configbool('web', 'guessmime')
108 guessmime = web.configbool('web', 'guessmime')
109
109
110 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
110 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
111 if not path:
111 if not path:
112 return manifest(web)
112 return manifest(web)
113
113
114 try:
114 try:
115 fctx = webutil.filectx(web.repo, web.req)
115 fctx = webutil.filectx(web.repo, web.req)
116 except error.LookupError as inst:
116 except error.LookupError as inst:
117 try:
117 try:
118 return manifest(web)
118 return manifest(web)
119 except ErrorResponse:
119 except ErrorResponse:
120 raise inst
120 raise inst
121
121
122 path = fctx.path()
122 path = fctx.path()
123 text = fctx.data()
123 text = fctx.data()
124 mt = 'application/binary'
124 mt = 'application/binary'
125 if guessmime:
125 if guessmime:
126 mt = mimetypes.guess_type(pycompat.fsdecode(path))[0]
126 mt = mimetypes.guess_type(pycompat.fsdecode(path))[0]
127 if mt is None:
127 if mt is None:
128 if stringutil.binary(text):
128 if stringutil.binary(text):
129 mt = 'application/binary'
129 mt = 'application/binary'
130 else:
130 else:
131 mt = 'text/plain'
131 mt = 'text/plain'
132 else:
132 else:
133 mt = pycompat.sysbytes(mt)
133 mt = pycompat.sysbytes(mt)
134
134
135 if mt.startswith('text/'):
135 if mt.startswith('text/'):
136 mt += '; charset="%s"' % encoding.encoding
136 mt += '; charset="%s"' % encoding.encoding
137
137
138 web.res.headers['Content-Type'] = mt
138 web.res.headers['Content-Type'] = mt
139 filename = (path.rpartition('/')[-1]
139 filename = (path.rpartition('/')[-1]
140 .replace('\\', '\\\\').replace('"', '\\"'))
140 .replace('\\', '\\\\').replace('"', '\\"'))
141 web.res.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
141 web.res.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
142 web.res.setbodybytes(text)
142 web.res.setbodybytes(text)
143 return web.res.sendresponse()
143 return web.res.sendresponse()
144
144
145 def _filerevision(web, fctx):
145 def _filerevision(web, fctx):
146 f = fctx.path()
146 f = fctx.path()
147 text = fctx.data()
147 text = fctx.data()
148 parity = paritygen(web.stripecount)
148 parity = paritygen(web.stripecount)
149 ishead = fctx.filenode() in fctx.filelog().heads()
149 ishead = fctx.filenode() in fctx.filelog().heads()
150
150
151 if stringutil.binary(text):
151 if stringutil.binary(text):
152 mt = pycompat.sysbytes(
152 mt = pycompat.sysbytes(
153 mimetypes.guess_type(pycompat.fsdecode(f))[0]
153 mimetypes.guess_type(pycompat.fsdecode(f))[0]
154 or r'application/octet-stream')
154 or r'application/octet-stream')
155 text = '(binary:%s)' % mt
155 text = '(binary:%s)' % mt
156
156
157 def lines(context):
157 def lines(context):
158 for lineno, t in enumerate(text.splitlines(True)):
158 for lineno, t in enumerate(text.splitlines(True)):
159 yield {"line": t,
159 yield {"line": t,
160 "lineid": "l%d" % (lineno + 1),
160 "lineid": "l%d" % (lineno + 1),
161 "linenumber": "% 6d" % (lineno + 1),
161 "linenumber": "% 6d" % (lineno + 1),
162 "parity": next(parity)}
162 "parity": next(parity)}
163
163
164 return web.sendtemplate(
164 return web.sendtemplate(
165 'filerevision',
165 'filerevision',
166 file=f,
166 file=f,
167 path=webutil.up(f),
167 path=webutil.up(f),
168 text=templateutil.mappinggenerator(lines),
168 text=templateutil.mappinggenerator(lines),
169 symrev=webutil.symrevorshortnode(web.req, fctx),
169 symrev=webutil.symrevorshortnode(web.req, fctx),
170 rename=webutil.renamelink(fctx),
170 rename=webutil.renamelink(fctx),
171 permissions=fctx.manifest().flags(f),
171 permissions=fctx.manifest().flags(f),
172 ishead=int(ishead),
172 ishead=int(ishead),
173 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
173 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
174
174
175 @webcommand('file')
175 @webcommand('file')
176 def file(web):
176 def file(web):
177 """
177 """
178 /file/{revision}[/{path}]
178 /file/{revision}[/{path}]
179 -------------------------
179 -------------------------
180
180
181 Show information about a directory or file in the repository.
181 Show information about a directory or file in the repository.
182
182
183 Info about the ``path`` given as a URL parameter will be rendered.
183 Info about the ``path`` given as a URL parameter will be rendered.
184
184
185 If ``path`` is a directory, information about the entries in that
185 If ``path`` is a directory, information about the entries in that
186 directory will be rendered. This form is equivalent to the ``manifest``
186 directory will be rendered. This form is equivalent to the ``manifest``
187 handler.
187 handler.
188
188
189 If ``path`` is a file, information about that file will be shown via
189 If ``path`` is a file, information about that file will be shown via
190 the ``filerevision`` template.
190 the ``filerevision`` template.
191
191
192 If ``path`` is not defined, information about the root directory will
192 If ``path`` is not defined, information about the root directory will
193 be rendered.
193 be rendered.
194 """
194 """
195 if web.req.qsparams.get('style') == 'raw':
195 if web.req.qsparams.get('style') == 'raw':
196 return rawfile(web)
196 return rawfile(web)
197
197
198 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
198 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
199 if not path:
199 if not path:
200 return manifest(web)
200 return manifest(web)
201 try:
201 try:
202 return _filerevision(web, webutil.filectx(web.repo, web.req))
202 return _filerevision(web, webutil.filectx(web.repo, web.req))
203 except error.LookupError as inst:
203 except error.LookupError as inst:
204 try:
204 try:
205 return manifest(web)
205 return manifest(web)
206 except ErrorResponse:
206 except ErrorResponse:
207 raise inst
207 raise inst
208
208
209 def _search(web):
209 def _search(web):
210 MODE_REVISION = 'rev'
210 MODE_REVISION = 'rev'
211 MODE_KEYWORD = 'keyword'
211 MODE_KEYWORD = 'keyword'
212 MODE_REVSET = 'revset'
212 MODE_REVSET = 'revset'
213
213
214 def revsearch(ctx):
214 def revsearch(ctx):
215 yield ctx
215 yield ctx
216
216
217 def keywordsearch(query):
217 def keywordsearch(query):
218 lower = encoding.lower
218 lower = encoding.lower
219 qw = lower(query).split()
219 qw = lower(query).split()
220
220
221 def revgen():
221 def revgen():
222 cl = web.repo.changelog
222 cl = web.repo.changelog
223 for i in pycompat.xrange(len(web.repo) - 1, 0, -100):
223 for i in pycompat.xrange(len(web.repo) - 1, 0, -100):
224 l = []
224 l = []
225 for j in cl.revs(max(0, i - 99), i):
225 for j in cl.revs(max(0, i - 99), i):
226 ctx = web.repo[j]
226 ctx = web.repo[j]
227 l.append(ctx)
227 l.append(ctx)
228 l.reverse()
228 l.reverse()
229 for e in l:
229 for e in l:
230 yield e
230 yield e
231
231
232 for ctx in revgen():
232 for ctx in revgen():
233 miss = 0
233 miss = 0
234 for q in qw:
234 for q in qw:
235 if not (q in lower(ctx.user()) or
235 if not (q in lower(ctx.user()) or
236 q in lower(ctx.description()) or
236 q in lower(ctx.description()) or
237 q in lower(" ".join(ctx.files()))):
237 q in lower(" ".join(ctx.files()))):
238 miss = 1
238 miss = 1
239 break
239 break
240 if miss:
240 if miss:
241 continue
241 continue
242
242
243 yield ctx
243 yield ctx
244
244
245 def revsetsearch(revs):
245 def revsetsearch(revs):
246 for r in revs:
246 for r in revs:
247 yield web.repo[r]
247 yield web.repo[r]
248
248
249 searchfuncs = {
249 searchfuncs = {
250 MODE_REVISION: (revsearch, 'exact revision search'),
250 MODE_REVISION: (revsearch, 'exact revision search'),
251 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
251 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
252 MODE_REVSET: (revsetsearch, 'revset expression search'),
252 MODE_REVSET: (revsetsearch, 'revset expression search'),
253 }
253 }
254
254
255 def getsearchmode(query):
255 def getsearchmode(query):
256 try:
256 try:
257 ctx = scmutil.revsymbol(web.repo, query)
257 ctx = scmutil.revsymbol(web.repo, query)
258 except (error.RepoError, error.LookupError):
258 except (error.RepoError, error.LookupError):
259 # query is not an exact revision pointer, need to
259 # query is not an exact revision pointer, need to
260 # decide if it's a revset expression or keywords
260 # decide if it's a revset expression or keywords
261 pass
261 pass
262 else:
262 else:
263 return MODE_REVISION, ctx
263 return MODE_REVISION, ctx
264
264
265 revdef = 'reverse(%s)' % query
265 revdef = 'reverse(%s)' % query
266 try:
266 try:
267 tree = revsetlang.parse(revdef)
267 tree = revsetlang.parse(revdef)
268 except error.ParseError:
268 except error.ParseError:
269 # can't parse to a revset tree
269 # can't parse to a revset tree
270 return MODE_KEYWORD, query
270 return MODE_KEYWORD, query
271
271
272 if revsetlang.depth(tree) <= 2:
272 if revsetlang.depth(tree) <= 2:
273 # no revset syntax used
273 # no revset syntax used
274 return MODE_KEYWORD, query
274 return MODE_KEYWORD, query
275
275
276 if any((token, (value or '')[:3]) == ('string', 're:')
276 if any((token, (value or '')[:3]) == ('string', 're:')
277 for token, value, pos in revsetlang.tokenize(revdef)):
277 for token, value, pos in revsetlang.tokenize(revdef)):
278 return MODE_KEYWORD, query
278 return MODE_KEYWORD, query
279
279
280 funcsused = revsetlang.funcsused(tree)
280 funcsused = revsetlang.funcsused(tree)
281 if not funcsused.issubset(revset.safesymbols):
281 if not funcsused.issubset(revset.safesymbols):
282 return MODE_KEYWORD, query
282 return MODE_KEYWORD, query
283
283
284 try:
284 try:
285 mfunc = revset.match(web.repo.ui, revdef,
285 mfunc = revset.match(web.repo.ui, revdef,
286 lookup=revset.lookupfn(web.repo))
286 lookup=revset.lookupfn(web.repo))
287 revs = mfunc(web.repo)
287 revs = mfunc(web.repo)
288 return MODE_REVSET, revs
288 return MODE_REVSET, revs
289 # ParseError: wrongly placed tokens, wrongs arguments, etc
289 # ParseError: wrongly placed tokens, wrongs arguments, etc
290 # RepoLookupError: no such revision, e.g. in 'revision:'
290 # RepoLookupError: no such revision, e.g. in 'revision:'
291 # Abort: bookmark/tag not exists
291 # Abort: bookmark/tag not exists
292 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
292 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
293 except (error.ParseError, error.RepoLookupError, error.Abort,
293 except (error.ParseError, error.RepoLookupError, error.Abort,
294 LookupError):
294 LookupError):
295 return MODE_KEYWORD, query
295 return MODE_KEYWORD, query
296
296
297 def changelist(context):
297 def changelist(context):
298 count = 0
298 count = 0
299
299
300 for ctx in searchfunc[0](funcarg):
300 for ctx in searchfunc[0](funcarg):
301 count += 1
301 count += 1
302 n = scmutil.binnode(ctx)
302 n = scmutil.binnode(ctx)
303 showtags = webutil.showtag(web.repo, 'changelogtag', n)
303 showtags = webutil.showtag(web.repo, 'changelogtag', n)
304 files = webutil.listfilediffs(ctx.files(), n, web.maxfiles)
304 files = webutil.listfilediffs(ctx.files(), n, web.maxfiles)
305
305
306 lm = webutil.commonentry(web.repo, ctx)
306 lm = webutil.commonentry(web.repo, ctx)
307 lm.update({
307 lm.update({
308 'parity': next(parity),
308 'parity': next(parity),
309 'changelogtag': showtags,
309 'changelogtag': showtags,
310 'files': files,
310 'files': files,
311 })
311 })
312 yield lm
312 yield lm
313
313
314 if count >= revcount:
314 if count >= revcount:
315 break
315 break
316
316
317 query = web.req.qsparams['rev']
317 query = web.req.qsparams['rev']
318 revcount = web.maxchanges
318 revcount = web.maxchanges
319 if 'revcount' in web.req.qsparams:
319 if 'revcount' in web.req.qsparams:
320 try:
320 try:
321 revcount = int(web.req.qsparams.get('revcount', revcount))
321 revcount = int(web.req.qsparams.get('revcount', revcount))
322 revcount = max(revcount, 1)
322 revcount = max(revcount, 1)
323 web.tmpl.defaults['sessionvars']['revcount'] = revcount
323 web.tmpl.defaults['sessionvars']['revcount'] = revcount
324 except ValueError:
324 except ValueError:
325 pass
325 pass
326
326
327 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
327 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
328 lessvars['revcount'] = max(revcount // 2, 1)
328 lessvars['revcount'] = max(revcount // 2, 1)
329 lessvars['rev'] = query
329 lessvars['rev'] = query
330 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
330 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
331 morevars['revcount'] = revcount * 2
331 morevars['revcount'] = revcount * 2
332 morevars['rev'] = query
332 morevars['rev'] = query
333
333
334 mode, funcarg = getsearchmode(query)
334 mode, funcarg = getsearchmode(query)
335
335
336 if 'forcekw' in web.req.qsparams:
336 if 'forcekw' in web.req.qsparams:
337 showforcekw = ''
337 showforcekw = ''
338 showunforcekw = searchfuncs[mode][1]
338 showunforcekw = searchfuncs[mode][1]
339 mode = MODE_KEYWORD
339 mode = MODE_KEYWORD
340 funcarg = query
340 funcarg = query
341 else:
341 else:
342 if mode != MODE_KEYWORD:
342 if mode != MODE_KEYWORD:
343 showforcekw = searchfuncs[MODE_KEYWORD][1]
343 showforcekw = searchfuncs[MODE_KEYWORD][1]
344 else:
344 else:
345 showforcekw = ''
345 showforcekw = ''
346 showunforcekw = ''
346 showunforcekw = ''
347
347
348 searchfunc = searchfuncs[mode]
348 searchfunc = searchfuncs[mode]
349
349
350 tip = web.repo['tip']
350 tip = web.repo['tip']
351 parity = paritygen(web.stripecount)
351 parity = paritygen(web.stripecount)
352
352
353 return web.sendtemplate(
353 return web.sendtemplate(
354 'search',
354 'search',
355 query=query,
355 query=query,
356 node=tip.hex(),
356 node=tip.hex(),
357 symrev='tip',
357 symrev='tip',
358 entries=templateutil.mappinggenerator(changelist, name='searchentry'),
358 entries=templateutil.mappinggenerator(changelist, name='searchentry'),
359 archives=web.archivelist('tip'),
359 archives=web.archivelist('tip'),
360 morevars=morevars,
360 morevars=morevars,
361 lessvars=lessvars,
361 lessvars=lessvars,
362 modedesc=searchfunc[1],
362 modedesc=searchfunc[1],
363 showforcekw=showforcekw,
363 showforcekw=showforcekw,
364 showunforcekw=showunforcekw)
364 showunforcekw=showunforcekw)
365
365
366 @webcommand('changelog')
366 @webcommand('changelog')
367 def changelog(web, shortlog=False):
367 def changelog(web, shortlog=False):
368 """
368 """
369 /changelog[/{revision}]
369 /changelog[/{revision}]
370 -----------------------
370 -----------------------
371
371
372 Show information about multiple changesets.
372 Show information about multiple changesets.
373
373
374 If the optional ``revision`` URL argument is absent, information about
374 If the optional ``revision`` URL argument is absent, information about
375 all changesets starting at ``tip`` will be rendered. If the ``revision``
375 all changesets starting at ``tip`` will be rendered. If the ``revision``
376 argument is present, changesets will be shown starting from the specified
376 argument is present, changesets will be shown starting from the specified
377 revision.
377 revision.
378
378
379 If ``revision`` is absent, the ``rev`` query string argument may be
379 If ``revision`` is absent, the ``rev`` query string argument may be
380 defined. This will perform a search for changesets.
380 defined. This will perform a search for changesets.
381
381
382 The argument for ``rev`` can be a single revision, a revision set,
382 The argument for ``rev`` can be a single revision, a revision set,
383 or a literal keyword to search for in changeset data (equivalent to
383 or a literal keyword to search for in changeset data (equivalent to
384 :hg:`log -k`).
384 :hg:`log -k`).
385
385
386 The ``revcount`` query string argument defines the maximum numbers of
386 The ``revcount`` query string argument defines the maximum numbers of
387 changesets to render.
387 changesets to render.
388
388
389 For non-searches, the ``changelog`` template will be rendered.
389 For non-searches, the ``changelog`` template will be rendered.
390 """
390 """
391
391
392 query = ''
392 query = ''
393 if 'node' in web.req.qsparams:
393 if 'node' in web.req.qsparams:
394 ctx = webutil.changectx(web.repo, web.req)
394 ctx = webutil.changectx(web.repo, web.req)
395 symrev = webutil.symrevorshortnode(web.req, ctx)
395 symrev = webutil.symrevorshortnode(web.req, ctx)
396 elif 'rev' in web.req.qsparams:
396 elif 'rev' in web.req.qsparams:
397 return _search(web)
397 return _search(web)
398 else:
398 else:
399 ctx = web.repo['tip']
399 ctx = web.repo['tip']
400 symrev = 'tip'
400 symrev = 'tip'
401
401
402 def changelist(maxcount):
402 def changelist(maxcount):
403 revs = []
403 revs = []
404 if pos != -1:
404 if pos != -1:
405 revs = web.repo.changelog.revs(pos, 0)
405 revs = web.repo.changelog.revs(pos, 0)
406
406
407 for entry in webutil.changelistentries(web, revs, maxcount, parity):
407 for entry in webutil.changelistentries(web, revs, maxcount, parity):
408 yield entry
408 yield entry
409
409
410 if shortlog:
410 if shortlog:
411 revcount = web.maxshortchanges
411 revcount = web.maxshortchanges
412 else:
412 else:
413 revcount = web.maxchanges
413 revcount = web.maxchanges
414
414
415 if 'revcount' in web.req.qsparams:
415 if 'revcount' in web.req.qsparams:
416 try:
416 try:
417 revcount = int(web.req.qsparams.get('revcount', revcount))
417 revcount = int(web.req.qsparams.get('revcount', revcount))
418 revcount = max(revcount, 1)
418 revcount = max(revcount, 1)
419 web.tmpl.defaults['sessionvars']['revcount'] = revcount
419 web.tmpl.defaults['sessionvars']['revcount'] = revcount
420 except ValueError:
420 except ValueError:
421 pass
421 pass
422
422
423 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
423 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
424 lessvars['revcount'] = max(revcount // 2, 1)
424 lessvars['revcount'] = max(revcount // 2, 1)
425 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
425 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
426 morevars['revcount'] = revcount * 2
426 morevars['revcount'] = revcount * 2
427
427
428 count = len(web.repo)
428 count = len(web.repo)
429 pos = ctx.rev()
429 pos = ctx.rev()
430 parity = paritygen(web.stripecount)
430 parity = paritygen(web.stripecount)
431
431
432 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
432 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
433
433
434 entries = list(changelist(revcount + 1))
434 entries = list(changelist(revcount + 1))
435 latestentry = entries[:1]
435 latestentry = entries[:1]
436 if len(entries) > revcount:
436 if len(entries) > revcount:
437 nextentry = entries[-1:]
437 nextentry = entries[-1:]
438 entries = entries[:-1]
438 entries = entries[:-1]
439 else:
439 else:
440 nextentry = []
440 nextentry = []
441
441
442 return web.sendtemplate(
442 return web.sendtemplate(
443 'shortlog' if shortlog else 'changelog',
443 'shortlog' if shortlog else 'changelog',
444 changenav=changenav,
444 changenav=changenav,
445 node=ctx.hex(),
445 node=ctx.hex(),
446 rev=pos,
446 rev=pos,
447 symrev=symrev,
447 symrev=symrev,
448 changesets=count,
448 changesets=count,
449 entries=templateutil.mappinglist(entries),
449 entries=templateutil.mappinglist(entries),
450 latestentry=templateutil.mappinglist(latestentry),
450 latestentry=templateutil.mappinglist(latestentry),
451 nextentry=templateutil.mappinglist(nextentry),
451 nextentry=templateutil.mappinglist(nextentry),
452 archives=web.archivelist('tip'),
452 archives=web.archivelist('tip'),
453 revcount=revcount,
453 revcount=revcount,
454 morevars=morevars,
454 morevars=morevars,
455 lessvars=lessvars,
455 lessvars=lessvars,
456 query=query)
456 query=query)
457
457
458 @webcommand('shortlog')
458 @webcommand('shortlog')
459 def shortlog(web):
459 def shortlog(web):
460 """
460 """
461 /shortlog
461 /shortlog
462 ---------
462 ---------
463
463
464 Show basic information about a set of changesets.
464 Show basic information about a set of changesets.
465
465
466 This accepts the same parameters as the ``changelog`` handler. The only
466 This accepts the same parameters as the ``changelog`` handler. The only
467 difference is the ``shortlog`` template will be rendered instead of the
467 difference is the ``shortlog`` template will be rendered instead of the
468 ``changelog`` template.
468 ``changelog`` template.
469 """
469 """
470 return changelog(web, shortlog=True)
470 return changelog(web, shortlog=True)
471
471
472 @webcommand('changeset')
472 @webcommand('changeset')
473 def changeset(web):
473 def changeset(web):
474 """
474 """
475 /changeset[/{revision}]
475 /changeset[/{revision}]
476 -----------------------
476 -----------------------
477
477
478 Show information about a single changeset.
478 Show information about a single changeset.
479
479
480 A URL path argument is the changeset identifier to show. See ``hg help
480 A URL path argument is the changeset identifier to show. See ``hg help
481 revisions`` for possible values. If not defined, the ``tip`` changeset
481 revisions`` for possible values. If not defined, the ``tip`` changeset
482 will be shown.
482 will be shown.
483
483
484 The ``changeset`` template is rendered. Contents of the ``changesettag``,
484 The ``changeset`` template is rendered. Contents of the ``changesettag``,
485 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
485 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
486 templates related to diffs may all be used to produce the output.
486 templates related to diffs may all be used to produce the output.
487 """
487 """
488 ctx = webutil.changectx(web.repo, web.req)
488 ctx = webutil.changectx(web.repo, web.req)
489
489
490 return web.sendtemplate(
490 return web.sendtemplate(
491 'changeset',
491 'changeset',
492 **webutil.changesetentry(web, ctx))
492 **webutil.changesetentry(web, ctx))
493
493
494 rev = webcommand('rev')(changeset)
494 rev = webcommand('rev')(changeset)
495
495
496 def decodepath(path):
496 def decodepath(path):
497 """Hook for mapping a path in the repository to a path in the
497 """Hook for mapping a path in the repository to a path in the
498 working copy.
498 working copy.
499
499
500 Extensions (e.g., largefiles) can override this to remap files in
500 Extensions (e.g., largefiles) can override this to remap files in
501 the virtual file system presented by the manifest command below."""
501 the virtual file system presented by the manifest command below."""
502 return path
502 return path
503
503
504 @webcommand('manifest')
504 @webcommand('manifest')
505 def manifest(web):
505 def manifest(web):
506 """
506 """
507 /manifest[/{revision}[/{path}]]
507 /manifest[/{revision}[/{path}]]
508 -------------------------------
508 -------------------------------
509
509
510 Show information about a directory.
510 Show information about a directory.
511
511
512 If the URL path arguments are omitted, information about the root
512 If the URL path arguments are omitted, information about the root
513 directory for the ``tip`` changeset will be shown.
513 directory for the ``tip`` changeset will be shown.
514
514
515 Because this handler can only show information for directories, it
515 Because this handler can only show information for directories, it
516 is recommended to use the ``file`` handler instead, as it can handle both
516 is recommended to use the ``file`` handler instead, as it can handle both
517 directories and files.
517 directories and files.
518
518
519 The ``manifest`` template will be rendered for this handler.
519 The ``manifest`` template will be rendered for this handler.
520 """
520 """
521 if 'node' in web.req.qsparams:
521 if 'node' in web.req.qsparams:
522 ctx = webutil.changectx(web.repo, web.req)
522 ctx = webutil.changectx(web.repo, web.req)
523 symrev = webutil.symrevorshortnode(web.req, ctx)
523 symrev = webutil.symrevorshortnode(web.req, ctx)
524 else:
524 else:
525 ctx = web.repo['tip']
525 ctx = web.repo['tip']
526 symrev = 'tip'
526 symrev = 'tip'
527 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
527 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
528 mf = ctx.manifest()
528 mf = ctx.manifest()
529 node = scmutil.binnode(ctx)
529 node = scmutil.binnode(ctx)
530
530
531 files = {}
531 files = {}
532 dirs = {}
532 dirs = {}
533 parity = paritygen(web.stripecount)
533 parity = paritygen(web.stripecount)
534
534
535 if path and path[-1:] != "/":
535 if path and path[-1:] != "/":
536 path += "/"
536 path += "/"
537 l = len(path)
537 l = len(path)
538 abspath = "/" + path
538 abspath = "/" + path
539
539
540 for full, n in mf.iteritems():
540 for full, n in mf.iteritems():
541 # the virtual path (working copy path) used for the full
541 # the virtual path (working copy path) used for the full
542 # (repository) path
542 # (repository) path
543 f = decodepath(full)
543 f = decodepath(full)
544
544
545 if f[:l] != path:
545 if f[:l] != path:
546 continue
546 continue
547 remain = f[l:]
547 remain = f[l:]
548 elements = remain.split('/')
548 elements = remain.split('/')
549 if len(elements) == 1:
549 if len(elements) == 1:
550 files[remain] = full
550 files[remain] = full
551 else:
551 else:
552 h = dirs # need to retain ref to dirs (root)
552 h = dirs # need to retain ref to dirs (root)
553 for elem in elements[0:-1]:
553 for elem in elements[0:-1]:
554 if elem not in h:
554 if elem not in h:
555 h[elem] = {}
555 h[elem] = {}
556 h = h[elem]
556 h = h[elem]
557 if len(h) > 1:
557 if len(h) > 1:
558 break
558 break
559 h[None] = None # denotes files present
559 h[None] = None # denotes files present
560
560
561 if mf and not files and not dirs:
561 if mf and not files and not dirs:
562 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
562 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
563
563
564 def filelist(context):
564 def filelist(context):
565 for f in sorted(files):
565 for f in sorted(files):
566 full = files[f]
566 full = files[f]
567
567
568 fctx = ctx.filectx(full)
568 fctx = ctx.filectx(full)
569 yield {"file": full,
569 yield {"file": full,
570 "parity": next(parity),
570 "parity": next(parity),
571 "basename": f,
571 "basename": f,
572 "date": fctx.date(),
572 "date": fctx.date(),
573 "size": fctx.size(),
573 "size": fctx.size(),
574 "permissions": mf.flags(full)}
574 "permissions": mf.flags(full)}
575
575
576 def dirlist(context):
576 def dirlist(context):
577 for d in sorted(dirs):
577 for d in sorted(dirs):
578
578
579 emptydirs = []
579 emptydirs = []
580 h = dirs[d]
580 h = dirs[d]
581 while isinstance(h, dict) and len(h) == 1:
581 while isinstance(h, dict) and len(h) == 1:
582 k, v = next(iter(h.items()))
582 k, v = next(iter(h.items()))
583 if v:
583 if v:
584 emptydirs.append(k)
584 emptydirs.append(k)
585 h = v
585 h = v
586
586
587 path = "%s%s" % (abspath, d)
587 path = "%s%s" % (abspath, d)
588 yield {"parity": next(parity),
588 yield {"parity": next(parity),
589 "path": path,
589 "path": path,
590 "emptydirs": "/".join(emptydirs),
590 "emptydirs": "/".join(emptydirs),
591 "basename": d}
591 "basename": d}
592
592
593 return web.sendtemplate(
593 return web.sendtemplate(
594 'manifest',
594 'manifest',
595 symrev=symrev,
595 symrev=symrev,
596 path=abspath,
596 path=abspath,
597 up=webutil.up(abspath),
597 up=webutil.up(abspath),
598 upparity=next(parity),
598 upparity=next(parity),
599 fentries=templateutil.mappinggenerator(filelist),
599 fentries=templateutil.mappinggenerator(filelist),
600 dentries=templateutil.mappinggenerator(dirlist),
600 dentries=templateutil.mappinggenerator(dirlist),
601 archives=web.archivelist(hex(node)),
601 archives=web.archivelist(hex(node)),
602 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
602 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
603
603
604 @webcommand('tags')
604 @webcommand('tags')
605 def tags(web):
605 def tags(web):
606 """
606 """
607 /tags
607 /tags
608 -----
608 -----
609
609
610 Show information about tags.
610 Show information about tags.
611
611
612 No arguments are accepted.
612 No arguments are accepted.
613
613
614 The ``tags`` template is rendered.
614 The ``tags`` template is rendered.
615 """
615 """
616 i = list(reversed(web.repo.tagslist()))
616 i = list(reversed(web.repo.tagslist()))
617 parity = paritygen(web.stripecount)
617 parity = paritygen(web.stripecount)
618
618
619 def entries(context, notip, latestonly):
619 def entries(context, notip, latestonly):
620 t = i
620 t = i
621 if notip:
621 if notip:
622 t = [(k, n) for k, n in i if k != "tip"]
622 t = [(k, n) for k, n in i if k != "tip"]
623 if latestonly:
623 if latestonly:
624 t = t[:1]
624 t = t[:1]
625 for k, n in t:
625 for k, n in t:
626 yield {"parity": next(parity),
626 yield {"parity": next(parity),
627 "tag": k,
627 "tag": k,
628 "date": web.repo[n].date(),
628 "date": web.repo[n].date(),
629 "node": hex(n)}
629 "node": hex(n)}
630
630
631 return web.sendtemplate(
631 return web.sendtemplate(
632 'tags',
632 'tags',
633 node=hex(web.repo.changelog.tip()),
633 node=hex(web.repo.changelog.tip()),
634 entries=templateutil.mappinggenerator(entries, args=(False, False)),
634 entries=templateutil.mappinggenerator(entries, args=(False, False)),
635 entriesnotip=templateutil.mappinggenerator(entries,
635 entriesnotip=templateutil.mappinggenerator(entries,
636 args=(True, False)),
636 args=(True, False)),
637 latestentry=templateutil.mappinggenerator(entries, args=(True, True)))
637 latestentry=templateutil.mappinggenerator(entries, args=(True, True)))
638
638
639 @webcommand('bookmarks')
639 @webcommand('bookmarks')
640 def bookmarks(web):
640 def bookmarks(web):
641 """
641 """
642 /bookmarks
642 /bookmarks
643 ----------
643 ----------
644
644
645 Show information about bookmarks.
645 Show information about bookmarks.
646
646
647 No arguments are accepted.
647 No arguments are accepted.
648
648
649 The ``bookmarks`` template is rendered.
649 The ``bookmarks`` template is rendered.
650 """
650 """
651 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
651 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
652 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
652 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
653 i = sorted(i, key=sortkey, reverse=True)
653 i = sorted(i, key=sortkey, reverse=True)
654 parity = paritygen(web.stripecount)
654 parity = paritygen(web.stripecount)
655
655
656 def entries(context, latestonly):
656 def entries(context, latestonly):
657 t = i
657 t = i
658 if latestonly:
658 if latestonly:
659 t = i[:1]
659 t = i[:1]
660 for k, n in t:
660 for k, n in t:
661 yield {"parity": next(parity),
661 yield {"parity": next(parity),
662 "bookmark": k,
662 "bookmark": k,
663 "date": web.repo[n].date(),
663 "date": web.repo[n].date(),
664 "node": hex(n)}
664 "node": hex(n)}
665
665
666 if i:
666 if i:
667 latestrev = i[0][1]
667 latestrev = i[0][1]
668 else:
668 else:
669 latestrev = -1
669 latestrev = -1
670 lastdate = web.repo[latestrev].date()
670 lastdate = web.repo[latestrev].date()
671
671
672 return web.sendtemplate(
672 return web.sendtemplate(
673 'bookmarks',
673 'bookmarks',
674 node=hex(web.repo.changelog.tip()),
674 node=hex(web.repo.changelog.tip()),
675 lastchange=templateutil.mappinglist([{'date': lastdate}]),
675 lastchange=templateutil.mappinglist([{'date': lastdate}]),
676 entries=templateutil.mappinggenerator(entries, args=(False,)),
676 entries=templateutil.mappinggenerator(entries, args=(False,)),
677 latestentry=templateutil.mappinggenerator(entries, args=(True,)))
677 latestentry=templateutil.mappinggenerator(entries, args=(True,)))
678
678
679 @webcommand('branches')
679 @webcommand('branches')
680 def branches(web):
680 def branches(web):
681 """
681 """
682 /branches
682 /branches
683 ---------
683 ---------
684
684
685 Show information about branches.
685 Show information about branches.
686
686
687 All known branches are contained in the output, even closed branches.
687 All known branches are contained in the output, even closed branches.
688
688
689 No arguments are accepted.
689 No arguments are accepted.
690
690
691 The ``branches`` template is rendered.
691 The ``branches`` template is rendered.
692 """
692 """
693 entries = webutil.branchentries(web.repo, web.stripecount)
693 entries = webutil.branchentries(web.repo, web.stripecount)
694 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
694 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
695
695
696 return web.sendtemplate(
696 return web.sendtemplate(
697 'branches',
697 'branches',
698 node=hex(web.repo.changelog.tip()),
698 node=hex(web.repo.changelog.tip()),
699 entries=entries,
699 entries=entries,
700 latestentry=latestentry)
700 latestentry=latestentry)
701
701
702 @webcommand('summary')
702 @webcommand('summary')
703 def summary(web):
703 def summary(web):
704 """
704 """
705 /summary
705 /summary
706 --------
706 --------
707
707
708 Show a summary of repository state.
708 Show a summary of repository state.
709
709
710 Information about the latest changesets, bookmarks, tags, and branches
710 Information about the latest changesets, bookmarks, tags, and branches
711 is captured by this handler.
711 is captured by this handler.
712
712
713 The ``summary`` template is rendered.
713 The ``summary`` template is rendered.
714 """
714 """
715 i = reversed(web.repo.tagslist())
715 i = reversed(web.repo.tagslist())
716
716
717 def tagentries(context):
717 def tagentries(context):
718 parity = paritygen(web.stripecount)
718 parity = paritygen(web.stripecount)
719 count = 0
719 count = 0
720 for k, n in i:
720 for k, n in i:
721 if k == "tip": # skip tip
721 if k == "tip": # skip tip
722 continue
722 continue
723
723
724 count += 1
724 count += 1
725 if count > 10: # limit to 10 tags
725 if count > 10: # limit to 10 tags
726 break
726 break
727
727
728 yield {
728 yield {
729 'parity': next(parity),
729 'parity': next(parity),
730 'tag': k,
730 'tag': k,
731 'node': hex(n),
731 'node': hex(n),
732 'date': web.repo[n].date(),
732 'date': web.repo[n].date(),
733 }
733 }
734
734
735 def bookmarks(context):
735 def bookmarks(context):
736 parity = paritygen(web.stripecount)
736 parity = paritygen(web.stripecount)
737 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
737 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
738 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
738 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
739 marks = sorted(marks, key=sortkey, reverse=True)
739 marks = sorted(marks, key=sortkey, reverse=True)
740 for k, n in marks[:10]: # limit to 10 bookmarks
740 for k, n in marks[:10]: # limit to 10 bookmarks
741 yield {'parity': next(parity),
741 yield {'parity': next(parity),
742 'bookmark': k,
742 'bookmark': k,
743 'date': web.repo[n].date(),
743 'date': web.repo[n].date(),
744 'node': hex(n)}
744 'node': hex(n)}
745
745
746 def changelist(context):
746 def changelist(context):
747 parity = paritygen(web.stripecount, offset=start - end)
747 parity = paritygen(web.stripecount, offset=start - end)
748 l = [] # build a list in forward order for efficiency
748 l = [] # build a list in forward order for efficiency
749 revs = []
749 revs = []
750 if start < end:
750 if start < end:
751 revs = web.repo.changelog.revs(start, end - 1)
751 revs = web.repo.changelog.revs(start, end - 1)
752 for i in revs:
752 for i in revs:
753 ctx = web.repo[i]
753 ctx = web.repo[i]
754 lm = webutil.commonentry(web.repo, ctx)
754 lm = webutil.commonentry(web.repo, ctx)
755 lm['parity'] = next(parity)
755 lm['parity'] = next(parity)
756 l.append(lm)
756 l.append(lm)
757
757
758 for entry in reversed(l):
758 for entry in reversed(l):
759 yield entry
759 yield entry
760
760
761 tip = web.repo['tip']
761 tip = web.repo['tip']
762 count = len(web.repo)
762 count = len(web.repo)
763 start = max(0, count - web.maxchanges)
763 start = max(0, count - web.maxchanges)
764 end = min(count, start + web.maxchanges)
764 end = min(count, start + web.maxchanges)
765
765
766 desc = web.config("web", "description")
766 desc = web.config("web", "description")
767 if not desc:
767 if not desc:
768 desc = 'unknown'
768 desc = 'unknown'
769 labels = web.configlist('web', 'labels')
769 labels = web.configlist('web', 'labels')
770
770
771 return web.sendtemplate(
771 return web.sendtemplate(
772 'summary',
772 'summary',
773 desc=desc,
773 desc=desc,
774 owner=get_contact(web.config) or 'unknown',
774 owner=get_contact(web.config) or 'unknown',
775 lastchange=tip.date(),
775 lastchange=tip.date(),
776 tags=templateutil.mappinggenerator(tagentries, name='tagentry'),
776 tags=templateutil.mappinggenerator(tagentries, name='tagentry'),
777 bookmarks=templateutil.mappinggenerator(bookmarks),
777 bookmarks=templateutil.mappinggenerator(bookmarks),
778 branches=webutil.branchentries(web.repo, web.stripecount, 10),
778 branches=webutil.branchentries(web.repo, web.stripecount, 10),
779 shortlog=templateutil.mappinggenerator(changelist,
779 shortlog=templateutil.mappinggenerator(changelist,
780 name='shortlogentry'),
780 name='shortlogentry'),
781 node=tip.hex(),
781 node=tip.hex(),
782 symrev='tip',
782 symrev='tip',
783 archives=web.archivelist('tip'),
783 archives=web.archivelist('tip'),
784 labels=templateutil.hybridlist(labels, name='label'))
784 labels=templateutil.hybridlist(labels, name='label'))
785
785
786 @webcommand('filediff')
786 @webcommand('filediff')
787 def filediff(web):
787 def filediff(web):
788 """
788 """
789 /diff/{revision}/{path}
789 /diff/{revision}/{path}
790 -----------------------
790 -----------------------
791
791
792 Show how a file changed in a particular commit.
792 Show how a file changed in a particular commit.
793
793
794 The ``filediff`` template is rendered.
794 The ``filediff`` template is rendered.
795
795
796 This handler is registered under both the ``/diff`` and ``/filediff``
796 This handler is registered under both the ``/diff`` and ``/filediff``
797 paths. ``/diff`` is used in modern code.
797 paths. ``/diff`` is used in modern code.
798 """
798 """
799 fctx, ctx = None, None
799 fctx, ctx = None, None
800 try:
800 try:
801 fctx = webutil.filectx(web.repo, web.req)
801 fctx = webutil.filectx(web.repo, web.req)
802 except LookupError:
802 except LookupError:
803 ctx = webutil.changectx(web.repo, web.req)
803 ctx = webutil.changectx(web.repo, web.req)
804 path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
804 path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
805 if path not in ctx.files():
805 if path not in ctx.files():
806 raise
806 raise
807
807
808 if fctx is not None:
808 if fctx is not None:
809 path = fctx.path()
809 path = fctx.path()
810 ctx = fctx.changectx()
810 ctx = fctx.changectx()
811 basectx = ctx.p1()
811 basectx = ctx.p1()
812
812
813 style = web.config('web', 'style')
813 style = web.config('web', 'style')
814 if 'style' in web.req.qsparams:
814 if 'style' in web.req.qsparams:
815 style = web.req.qsparams['style']
815 style = web.req.qsparams['style']
816
816
817 diffs = webutil.diffs(web, ctx, basectx, [path], style)
817 diffs = webutil.diffs(web, ctx, basectx, [path], style)
818 if fctx is not None:
818 if fctx is not None:
819 rename = webutil.renamelink(fctx)
819 rename = webutil.renamelink(fctx)
820 ctx = fctx
820 ctx = fctx
821 else:
821 else:
822 rename = templateutil.mappinglist([])
822 rename = templateutil.mappinglist([])
823 ctx = ctx
823 ctx = ctx
824
824
825 return web.sendtemplate(
825 return web.sendtemplate(
826 'filediff',
826 'filediff',
827 file=path,
827 file=path,
828 symrev=webutil.symrevorshortnode(web.req, ctx),
828 symrev=webutil.symrevorshortnode(web.req, ctx),
829 rename=rename,
829 rename=rename,
830 diff=diffs,
830 diff=diffs,
831 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
831 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
832
832
833 diff = webcommand('diff')(filediff)
833 diff = webcommand('diff')(filediff)
834
834
835 @webcommand('comparison')
835 @webcommand('comparison')
836 def comparison(web):
836 def comparison(web):
837 """
837 """
838 /comparison/{revision}/{path}
838 /comparison/{revision}/{path}
839 -----------------------------
839 -----------------------------
840
840
841 Show a comparison between the old and new versions of a file from changes
841 Show a comparison between the old and new versions of a file from changes
842 made on a particular revision.
842 made on a particular revision.
843
843
844 This is similar to the ``diff`` handler. However, this form features
844 This is similar to the ``diff`` handler. However, this form features
845 a split or side-by-side diff rather than a unified diff.
845 a split or side-by-side diff rather than a unified diff.
846
846
847 The ``context`` query string argument can be used to control the lines of
847 The ``context`` query string argument can be used to control the lines of
848 context in the diff.
848 context in the diff.
849
849
850 The ``filecomparison`` template is rendered.
850 The ``filecomparison`` template is rendered.
851 """
851 """
852 ctx = webutil.changectx(web.repo, web.req)
852 ctx = webutil.changectx(web.repo, web.req)
853 if 'file' not in web.req.qsparams:
853 if 'file' not in web.req.qsparams:
854 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
854 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
855 path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
855 path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
856
856
857 parsecontext = lambda v: v == 'full' and -1 or int(v)
857 parsecontext = lambda v: v == 'full' and -1 or int(v)
858 if 'context' in web.req.qsparams:
858 if 'context' in web.req.qsparams:
859 context = parsecontext(web.req.qsparams['context'])
859 context = parsecontext(web.req.qsparams['context'])
860 else:
860 else:
861 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
861 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
862
862
863 def filelines(f):
863 def filelines(f):
864 if f.isbinary():
864 if f.isbinary():
865 mt = pycompat.sysbytes(
865 mt = pycompat.sysbytes(
866 mimetypes.guess_type(pycompat.fsdecode(f.path()))[0]
866 mimetypes.guess_type(pycompat.fsdecode(f.path()))[0]
867 or r'application/octet-stream')
867 or r'application/octet-stream')
868 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
868 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
869 return f.data().splitlines()
869 return f.data().splitlines()
870
870
871 fctx = None
871 fctx = None
872 parent = ctx.p1()
872 parent = ctx.p1()
873 leftrev = parent.rev()
873 leftrev = parent.rev()
874 leftnode = parent.node()
874 leftnode = parent.node()
875 rightrev = ctx.rev()
875 rightrev = ctx.rev()
876 rightnode = scmutil.binnode(ctx)
876 rightnode = scmutil.binnode(ctx)
877 if path in ctx:
877 if path in ctx:
878 fctx = ctx[path]
878 fctx = ctx[path]
879 rightlines = filelines(fctx)
879 rightlines = filelines(fctx)
880 if path not in parent:
880 if path not in parent:
881 leftlines = ()
881 leftlines = ()
882 else:
882 else:
883 pfctx = parent[path]
883 pfctx = parent[path]
884 leftlines = filelines(pfctx)
884 leftlines = filelines(pfctx)
885 else:
885 else:
886 rightlines = ()
886 rightlines = ()
887 pfctx = ctx.parents()[0][path]
887 pfctx = ctx.parents()[0][path]
888 leftlines = filelines(pfctx)
888 leftlines = filelines(pfctx)
889
889
890 comparison = webutil.compare(context, leftlines, rightlines)
890 comparison = webutil.compare(context, leftlines, rightlines)
891 if fctx is not None:
891 if fctx is not None:
892 rename = webutil.renamelink(fctx)
892 rename = webutil.renamelink(fctx)
893 ctx = fctx
893 ctx = fctx
894 else:
894 else:
895 rename = templateutil.mappinglist([])
895 rename = templateutil.mappinglist([])
896 ctx = ctx
896 ctx = ctx
897
897
898 return web.sendtemplate(
898 return web.sendtemplate(
899 'filecomparison',
899 'filecomparison',
900 file=path,
900 file=path,
901 symrev=webutil.symrevorshortnode(web.req, ctx),
901 symrev=webutil.symrevorshortnode(web.req, ctx),
902 rename=rename,
902 rename=rename,
903 leftrev=leftrev,
903 leftrev=leftrev,
904 leftnode=hex(leftnode),
904 leftnode=hex(leftnode),
905 rightrev=rightrev,
905 rightrev=rightrev,
906 rightnode=hex(rightnode),
906 rightnode=hex(rightnode),
907 comparison=comparison,
907 comparison=comparison,
908 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
908 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
909
909
910 @webcommand('annotate')
910 @webcommand('annotate')
911 def annotate(web):
911 def annotate(web):
912 """
912 """
913 /annotate/{revision}/{path}
913 /annotate/{revision}/{path}
914 ---------------------------
914 ---------------------------
915
915
916 Show changeset information for each line in a file.
916 Show changeset information for each line in a file.
917
917
918 The ``ignorews``, ``ignorewsamount``, ``ignorewseol``, and
918 The ``ignorews``, ``ignorewsamount``, ``ignorewseol``, and
919 ``ignoreblanklines`` query string arguments have the same meaning as
919 ``ignoreblanklines`` query string arguments have the same meaning as
920 their ``[annotate]`` config equivalents. It uses the hgrc boolean
920 their ``[annotate]`` config equivalents. It uses the hgrc boolean
921 parsing logic to interpret the value. e.g. ``0`` and ``false`` are
921 parsing logic to interpret the value. e.g. ``0`` and ``false`` are
922 false and ``1`` and ``true`` are true. If not defined, the server
922 false and ``1`` and ``true`` are true. If not defined, the server
923 default settings are used.
923 default settings are used.
924
924
925 The ``fileannotate`` template is rendered.
925 The ``fileannotate`` template is rendered.
926 """
926 """
927 fctx = webutil.filectx(web.repo, web.req)
927 fctx = webutil.filectx(web.repo, web.req)
928 f = fctx.path()
928 f = fctx.path()
929 parity = paritygen(web.stripecount)
929 parity = paritygen(web.stripecount)
930 ishead = fctx.filenode() in fctx.filelog().heads()
930 ishead = fctx.filenode() in fctx.filelog().heads()
931
931
932 # parents() is called once per line and several lines likely belong to
932 # parents() is called once per line and several lines likely belong to
933 # same revision. So it is worth caching.
933 # same revision. So it is worth caching.
934 # TODO there are still redundant operations within basefilectx.parents()
934 # TODO there are still redundant operations within basefilectx.parents()
935 # and from the fctx.annotate() call itself that could be cached.
935 # and from the fctx.annotate() call itself that could be cached.
936 parentscache = {}
936 parentscache = {}
937 def parents(context, f):
937 def parents(context, f):
938 rev = f.rev()
938 rev = f.rev()
939 if rev not in parentscache:
939 if rev not in parentscache:
940 parentscache[rev] = []
940 parentscache[rev] = []
941 for p in f.parents():
941 for p in f.parents():
942 entry = {
942 entry = {
943 'node': p.hex(),
943 'node': p.hex(),
944 'rev': p.rev(),
944 'rev': p.rev(),
945 }
945 }
946 parentscache[rev].append(entry)
946 parentscache[rev].append(entry)
947
947
948 for p in parentscache[rev]:
948 for p in parentscache[rev]:
949 yield p
949 yield p
950
950
951 def annotate(context):
951 def annotate(context):
952 if fctx.isbinary():
952 if fctx.isbinary():
953 mt = pycompat.sysbytes(
953 mt = pycompat.sysbytes(
954 mimetypes.guess_type(pycompat.fsdecode(fctx.path()))[0]
954 mimetypes.guess_type(pycompat.fsdecode(fctx.path()))[0]
955 or r'application/octet-stream')
955 or r'application/octet-stream')
956 lines = [dagop.annotateline(fctx=fctx.filectx(fctx.filerev()),
956 lines = [dagop.annotateline(fctx=fctx.filectx(fctx.filerev()),
957 lineno=1, text='(binary:%s)' % mt)]
957 lineno=1, text='(binary:%s)' % mt)]
958 else:
958 else:
959 lines = webutil.annotate(web.req, fctx, web.repo.ui)
959 lines = webutil.annotate(web.req, fctx, web.repo.ui)
960
960
961 previousrev = None
961 previousrev = None
962 blockparitygen = paritygen(1)
962 blockparitygen = paritygen(1)
963 for lineno, aline in enumerate(lines):
963 for lineno, aline in enumerate(lines):
964 f = aline.fctx
964 f = aline.fctx
965 rev = f.rev()
965 rev = f.rev()
966 if rev != previousrev:
966 if rev != previousrev:
967 blockhead = True
967 blockhead = True
968 blockparity = next(blockparitygen)
968 blockparity = next(blockparitygen)
969 else:
969 else:
970 blockhead = None
970 blockhead = None
971 previousrev = rev
971 previousrev = rev
972 yield {"parity": next(parity),
972 yield {"parity": next(parity),
973 "node": f.hex(),
973 "node": f.hex(),
974 "rev": rev,
974 "rev": rev,
975 "author": f.user(),
975 "author": f.user(),
976 "parents": templateutil.mappinggenerator(parents, args=(f,)),
976 "parents": templateutil.mappinggenerator(parents, args=(f,)),
977 "desc": f.description(),
977 "desc": f.description(),
978 "extra": f.extra(),
978 "extra": f.extra(),
979 "file": f.path(),
979 "file": f.path(),
980 "blockhead": blockhead,
980 "blockhead": blockhead,
981 "blockparity": blockparity,
981 "blockparity": blockparity,
982 "targetline": aline.lineno,
982 "targetline": aline.lineno,
983 "line": aline.text,
983 "line": aline.text,
984 "lineno": lineno + 1,
984 "lineno": lineno + 1,
985 "lineid": "l%d" % (lineno + 1),
985 "lineid": "l%d" % (lineno + 1),
986 "linenumber": "% 6d" % (lineno + 1),
986 "linenumber": "% 6d" % (lineno + 1),
987 "revdate": f.date()}
987 "revdate": f.date()}
988
988
989 diffopts = webutil.difffeatureopts(web.req, web.repo.ui, 'annotate')
989 diffopts = webutil.difffeatureopts(web.req, web.repo.ui, 'annotate')
990 diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults}
990 diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults}
991
991
992 return web.sendtemplate(
992 return web.sendtemplate(
993 'fileannotate',
993 'fileannotate',
994 file=f,
994 file=f,
995 annotate=templateutil.mappinggenerator(annotate),
995 annotate=templateutil.mappinggenerator(annotate),
996 path=webutil.up(f),
996 path=webutil.up(f),
997 symrev=webutil.symrevorshortnode(web.req, fctx),
997 symrev=webutil.symrevorshortnode(web.req, fctx),
998 rename=webutil.renamelink(fctx),
998 rename=webutil.renamelink(fctx),
999 permissions=fctx.manifest().flags(f),
999 permissions=fctx.manifest().flags(f),
1000 ishead=int(ishead),
1000 ishead=int(ishead),
1001 diffopts=templateutil.hybriddict(diffopts),
1001 diffopts=templateutil.hybriddict(diffopts),
1002 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
1002 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
1003
1003
1004 @webcommand('filelog')
1004 @webcommand('filelog')
1005 def filelog(web):
1005 def filelog(web):
1006 """
1006 """
1007 /filelog/{revision}/{path}
1007 /filelog/{revision}/{path}
1008 --------------------------
1008 --------------------------
1009
1009
1010 Show information about the history of a file in the repository.
1010 Show information about the history of a file in the repository.
1011
1011
1012 The ``revcount`` query string argument can be defined to control the
1012 The ``revcount`` query string argument can be defined to control the
1013 maximum number of entries to show.
1013 maximum number of entries to show.
1014
1014
1015 The ``filelog`` template will be rendered.
1015 The ``filelog`` template will be rendered.
1016 """
1016 """
1017
1017
1018 try:
1018 try:
1019 fctx = webutil.filectx(web.repo, web.req)
1019 fctx = webutil.filectx(web.repo, web.req)
1020 f = fctx.path()
1020 f = fctx.path()
1021 fl = fctx.filelog()
1021 fl = fctx.filelog()
1022 except error.LookupError:
1022 except error.LookupError:
1023 f = webutil.cleanpath(web.repo, web.req.qsparams['file'])
1023 f = webutil.cleanpath(web.repo, web.req.qsparams['file'])
1024 fl = web.repo.file(f)
1024 fl = web.repo.file(f)
1025 numrevs = len(fl)
1025 numrevs = len(fl)
1026 if not numrevs: # file doesn't exist at all
1026 if not numrevs: # file doesn't exist at all
1027 raise
1027 raise
1028 rev = webutil.changectx(web.repo, web.req).rev()
1028 rev = webutil.changectx(web.repo, web.req).rev()
1029 first = fl.linkrev(0)
1029 first = fl.linkrev(0)
1030 if rev < first: # current rev is from before file existed
1030 if rev < first: # current rev is from before file existed
1031 raise
1031 raise
1032 frev = numrevs - 1
1032 frev = numrevs - 1
1033 while fl.linkrev(frev) > rev:
1033 while fl.linkrev(frev) > rev:
1034 frev -= 1
1034 frev -= 1
1035 fctx = web.repo.filectx(f, fl.linkrev(frev))
1035 fctx = web.repo.filectx(f, fl.linkrev(frev))
1036
1036
1037 revcount = web.maxshortchanges
1037 revcount = web.maxshortchanges
1038 if 'revcount' in web.req.qsparams:
1038 if 'revcount' in web.req.qsparams:
1039 try:
1039 try:
1040 revcount = int(web.req.qsparams.get('revcount', revcount))
1040 revcount = int(web.req.qsparams.get('revcount', revcount))
1041 revcount = max(revcount, 1)
1041 revcount = max(revcount, 1)
1042 web.tmpl.defaults['sessionvars']['revcount'] = revcount
1042 web.tmpl.defaults['sessionvars']['revcount'] = revcount
1043 except ValueError:
1043 except ValueError:
1044 pass
1044 pass
1045
1045
1046 lrange = webutil.linerange(web.req)
1046 lrange = webutil.linerange(web.req)
1047
1047
1048 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
1048 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
1049 lessvars['revcount'] = max(revcount // 2, 1)
1049 lessvars['revcount'] = max(revcount // 2, 1)
1050 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
1050 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
1051 morevars['revcount'] = revcount * 2
1051 morevars['revcount'] = revcount * 2
1052
1052
1053 patch = 'patch' in web.req.qsparams
1053 patch = 'patch' in web.req.qsparams
1054 if patch:
1054 if patch:
1055 lessvars['patch'] = morevars['patch'] = web.req.qsparams['patch']
1055 lessvars['patch'] = morevars['patch'] = web.req.qsparams['patch']
1056 descend = 'descend' in web.req.qsparams
1056 descend = 'descend' in web.req.qsparams
1057 if descend:
1057 if descend:
1058 lessvars['descend'] = morevars['descend'] = web.req.qsparams['descend']
1058 lessvars['descend'] = morevars['descend'] = web.req.qsparams['descend']
1059
1059
1060 count = fctx.filerev() + 1
1060 count = fctx.filerev() + 1
1061 start = max(0, count - revcount) # first rev on this page
1061 start = max(0, count - revcount) # first rev on this page
1062 end = min(count, start + revcount) # last rev on this page
1062 end = min(count, start + revcount) # last rev on this page
1063 parity = paritygen(web.stripecount, offset=start - end)
1063 parity = paritygen(web.stripecount, offset=start - end)
1064
1064
1065 repo = web.repo
1065 repo = web.repo
1066 filelog = fctx.filelog()
1066 filelog = fctx.filelog()
1067 revs = [filerev for filerev in filelog.revs(start, end - 1)
1067 revs = [filerev for filerev in filelog.revs(start, end - 1)
1068 if filelog.linkrev(filerev) in repo]
1068 if filelog.linkrev(filerev) in repo]
1069 entries = []
1069 entries = []
1070
1070
1071 diffstyle = web.config('web', 'style')
1071 diffstyle = web.config('web', 'style')
1072 if 'style' in web.req.qsparams:
1072 if 'style' in web.req.qsparams:
1073 diffstyle = web.req.qsparams['style']
1073 diffstyle = web.req.qsparams['style']
1074
1074
1075 def diff(fctx, linerange=None):
1075 def diff(fctx, linerange=None):
1076 ctx = fctx.changectx()
1076 ctx = fctx.changectx()
1077 basectx = ctx.p1()
1077 basectx = ctx.p1()
1078 path = fctx.path()
1078 path = fctx.path()
1079 return webutil.diffs(web, ctx, basectx, [path], diffstyle,
1079 return webutil.diffs(web, ctx, basectx, [path], diffstyle,
1080 linerange=linerange,
1080 linerange=linerange,
1081 lineidprefix='%s-' % ctx.hex()[:12])
1081 lineidprefix='%s-' % ctx.hex()[:12])
1082
1082
1083 linerange = None
1083 linerange = None
1084 if lrange is not None:
1084 if lrange is not None:
1085 linerange = webutil.formatlinerange(*lrange)
1085 linerange = webutil.formatlinerange(*lrange)
1086 # deactivate numeric nav links when linerange is specified as this
1086 # deactivate numeric nav links when linerange is specified as this
1087 # would required a dedicated "revnav" class
1087 # would required a dedicated "revnav" class
1088 nav = templateutil.mappinglist([])
1088 nav = templateutil.mappinglist([])
1089 if descend:
1089 if descend:
1090 it = dagop.blockdescendants(fctx, *lrange)
1090 it = dagop.blockdescendants(fctx, *lrange)
1091 else:
1091 else:
1092 it = dagop.blockancestors(fctx, *lrange)
1092 it = dagop.blockancestors(fctx, *lrange)
1093 for i, (c, lr) in enumerate(it, 1):
1093 for i, (c, lr) in enumerate(it, 1):
1094 diffs = None
1094 diffs = None
1095 if patch:
1095 if patch:
1096 diffs = diff(c, linerange=lr)
1096 diffs = diff(c, linerange=lr)
1097 # follow renames accross filtered (not in range) revisions
1097 # follow renames accross filtered (not in range) revisions
1098 path = c.path()
1098 path = c.path()
1099 lm = webutil.commonentry(repo, c)
1099 lm = webutil.commonentry(repo, c)
1100 lm.update({
1100 lm.update({
1101 'parity': next(parity),
1101 'parity': next(parity),
1102 'filerev': c.rev(),
1102 'filerev': c.rev(),
1103 'file': path,
1103 'file': path,
1104 'diff': diffs,
1104 'diff': diffs,
1105 'linerange': webutil.formatlinerange(*lr),
1105 'linerange': webutil.formatlinerange(*lr),
1106 'rename': templateutil.mappinglist([]),
1106 'rename': templateutil.mappinglist([]),
1107 })
1107 })
1108 entries.append(lm)
1108 entries.append(lm)
1109 if i == revcount:
1109 if i == revcount:
1110 break
1110 break
1111 lessvars['linerange'] = webutil.formatlinerange(*lrange)
1111 lessvars['linerange'] = webutil.formatlinerange(*lrange)
1112 morevars['linerange'] = lessvars['linerange']
1112 morevars['linerange'] = lessvars['linerange']
1113 else:
1113 else:
1114 for i in revs:
1114 for i in revs:
1115 iterfctx = fctx.filectx(i)
1115 iterfctx = fctx.filectx(i)
1116 diffs = None
1116 diffs = None
1117 if patch:
1117 if patch:
1118 diffs = diff(iterfctx)
1118 diffs = diff(iterfctx)
1119 lm = webutil.commonentry(repo, iterfctx)
1119 lm = webutil.commonentry(repo, iterfctx)
1120 lm.update({
1120 lm.update({
1121 'parity': next(parity),
1121 'parity': next(parity),
1122 'filerev': i,
1122 'filerev': i,
1123 'file': f,
1123 'file': f,
1124 'diff': diffs,
1124 'diff': diffs,
1125 'rename': webutil.renamelink(iterfctx),
1125 'rename': webutil.renamelink(iterfctx),
1126 })
1126 })
1127 entries.append(lm)
1127 entries.append(lm)
1128 entries.reverse()
1128 entries.reverse()
1129 revnav = webutil.filerevnav(web.repo, fctx.path())
1129 revnav = webutil.filerevnav(web.repo, fctx.path())
1130 nav = revnav.gen(end - 1, revcount, count)
1130 nav = revnav.gen(end - 1, revcount, count)
1131
1131
1132 latestentry = entries[:1]
1132 latestentry = entries[:1]
1133
1133
1134 return web.sendtemplate(
1134 return web.sendtemplate(
1135 'filelog',
1135 'filelog',
1136 file=f,
1136 file=f,
1137 nav=nav,
1137 nav=nav,
1138 symrev=webutil.symrevorshortnode(web.req, fctx),
1138 symrev=webutil.symrevorshortnode(web.req, fctx),
1139 entries=templateutil.mappinglist(entries),
1139 entries=templateutil.mappinglist(entries),
1140 descend=descend,
1140 descend=descend,
1141 patch=patch,
1141 patch=patch,
1142 latestentry=templateutil.mappinglist(latestentry),
1142 latestentry=templateutil.mappinglist(latestentry),
1143 linerange=linerange,
1143 linerange=linerange,
1144 revcount=revcount,
1144 revcount=revcount,
1145 morevars=morevars,
1145 morevars=morevars,
1146 lessvars=lessvars,
1146 lessvars=lessvars,
1147 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
1147 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
1148
1148
1149 @webcommand('archive')
1149 @webcommand('archive')
1150 def archive(web):
1150 def archive(web):
1151 """
1151 """
1152 /archive/{revision}.{format}[/{path}]
1152 /archive/{revision}.{format}[/{path}]
1153 -------------------------------------
1153 -------------------------------------
1154
1154
1155 Obtain an archive of repository content.
1155 Obtain an archive of repository content.
1156
1156
1157 The content and type of the archive is defined by a URL path parameter.
1157 The content and type of the archive is defined by a URL path parameter.
1158 ``format`` is the file extension of the archive type to be generated. e.g.
1158 ``format`` is the file extension of the archive type to be generated. e.g.
1159 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1159 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1160 server configuration.
1160 server configuration.
1161
1161
1162 The optional ``path`` URL parameter controls content to include in the
1162 The optional ``path`` URL parameter controls content to include in the
1163 archive. If omitted, every file in the specified revision is present in the
1163 archive. If omitted, every file in the specified revision is present in the
1164 archive. If included, only the specified file or contents of the specified
1164 archive. If included, only the specified file or contents of the specified
1165 directory will be included in the archive.
1165 directory will be included in the archive.
1166
1166
1167 No template is used for this handler. Raw, binary content is generated.
1167 No template is used for this handler. Raw, binary content is generated.
1168 """
1168 """
1169
1169
1170 type_ = web.req.qsparams.get('type')
1170 type_ = web.req.qsparams.get('type')
1171 allowed = web.configlist("web", "allow-archive")
1171 allowed = web.configlist("web", "allow-archive")
1172 key = web.req.qsparams['node']
1172 key = web.req.qsparams['node']
1173
1173
1174 if type_ not in webutil.archivespecs:
1174 if type_ not in webutil.archivespecs:
1175 msg = 'Unsupported archive type: %s' % stringutil.pprint(type_)
1175 msg = 'Unsupported archive type: %s' % stringutil.pprint(type_)
1176 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1176 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1177
1177
1178 if not ((type_ in allowed or
1178 if not ((type_ in allowed or
1179 web.configbool("web", "allow" + type_))):
1179 web.configbool("web", "allow" + type_))):
1180 msg = 'Archive type not allowed: %s' % type_
1180 msg = 'Archive type not allowed: %s' % type_
1181 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1181 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1182
1182
1183 reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame))
1183 reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame))
1184 cnode = web.repo.lookup(key)
1184 cnode = web.repo.lookup(key)
1185 arch_version = key
1185 arch_version = key
1186 if cnode == key or key == 'tip':
1186 if cnode == key or key == 'tip':
1187 arch_version = short(cnode)
1187 arch_version = short(cnode)
1188 name = "%s-%s" % (reponame, arch_version)
1188 name = "%s-%s" % (reponame, arch_version)
1189
1189
1190 ctx = webutil.changectx(web.repo, web.req)
1190 ctx = webutil.changectx(web.repo, web.req)
1191 pats = []
1191 pats = []
1192 match = scmutil.match(ctx, [])
1192 match = scmutil.match(ctx, [])
1193 file = web.req.qsparams.get('file')
1193 file = web.req.qsparams.get('file')
1194 if file:
1194 if file:
1195 pats = ['path:' + file]
1195 pats = ['path:' + file]
1196 match = scmutil.match(ctx, pats, default='path')
1196 match = scmutil.match(ctx, pats, default='path')
1197 if pats:
1197 if pats:
1198 files = [f for f in ctx.manifest().keys() if match(f)]
1198 files = [f for f in ctx.manifest().keys() if match(f)]
1199 if not files:
1199 if not files:
1200 raise ErrorResponse(HTTP_NOT_FOUND,
1200 raise ErrorResponse(HTTP_NOT_FOUND,
1201 'file(s) not found: %s' % file)
1201 'file(s) not found: %s' % file)
1202
1202
1203 mimetype, artype, extension, encoding = webutil.archivespecs[type_]
1203 mimetype, artype, extension, encoding = webutil.archivespecs[type_]
1204
1204
1205 web.res.headers['Content-Type'] = mimetype
1205 web.res.headers['Content-Type'] = mimetype
1206 web.res.headers['Content-Disposition'] = 'attachment; filename=%s%s' % (
1206 web.res.headers['Content-Disposition'] = 'attachment; filename=%s%s' % (
1207 name, extension)
1207 name, extension)
1208
1208
1209 if encoding:
1209 if encoding:
1210 web.res.headers['Content-Encoding'] = encoding
1210 web.res.headers['Content-Encoding'] = encoding
1211
1211
1212 web.res.setbodywillwrite()
1212 web.res.setbodywillwrite()
1213 if list(web.res.sendresponse()):
1213 if list(web.res.sendresponse()):
1214 raise error.ProgrammingError('sendresponse() should not emit data '
1214 raise error.ProgrammingError('sendresponse() should not emit data '
1215 'if writing later')
1215 'if writing later')
1216
1216
1217 bodyfh = web.res.getbodyfile()
1217 bodyfh = web.res.getbodyfile()
1218
1218
1219 archival.archive(web.repo, bodyfh, cnode, artype, prefix=name,
1219 archival.archive(web.repo, bodyfh, cnode, artype, prefix=name, match=match,
1220 matchfn=match,
1221 subrepos=web.configbool("web", "archivesubrepos"))
1220 subrepos=web.configbool("web", "archivesubrepos"))
1222
1221
1223 return []
1222 return []
1224
1223
1225 @webcommand('static')
1224 @webcommand('static')
1226 def static(web):
1225 def static(web):
1227 fname = web.req.qsparams['file']
1226 fname = web.req.qsparams['file']
1228 # a repo owner may set web.static in .hg/hgrc to get any file
1227 # a repo owner may set web.static in .hg/hgrc to get any file
1229 # readable by the user running the CGI script
1228 # readable by the user running the CGI script
1230 static = web.config("web", "static", untrusted=False)
1229 static = web.config("web", "static", untrusted=False)
1231 if not static:
1230 if not static:
1232 tp = web.templatepath or templater.templatepaths()
1231 tp = web.templatepath or templater.templatepaths()
1233 if isinstance(tp, str):
1232 if isinstance(tp, str):
1234 tp = [tp]
1233 tp = [tp]
1235 static = [os.path.join(p, 'static') for p in tp]
1234 static = [os.path.join(p, 'static') for p in tp]
1236
1235
1237 staticfile(static, fname, web.res)
1236 staticfile(static, fname, web.res)
1238 return web.res.sendresponse()
1237 return web.res.sendresponse()
1239
1238
1240 @webcommand('graph')
1239 @webcommand('graph')
1241 def graph(web):
1240 def graph(web):
1242 """
1241 """
1243 /graph[/{revision}]
1242 /graph[/{revision}]
1244 -------------------
1243 -------------------
1245
1244
1246 Show information about the graphical topology of the repository.
1245 Show information about the graphical topology of the repository.
1247
1246
1248 Information rendered by this handler can be used to create visual
1247 Information rendered by this handler can be used to create visual
1249 representations of repository topology.
1248 representations of repository topology.
1250
1249
1251 The ``revision`` URL parameter controls the starting changeset. If it's
1250 The ``revision`` URL parameter controls the starting changeset. If it's
1252 absent, the default is ``tip``.
1251 absent, the default is ``tip``.
1253
1252
1254 The ``revcount`` query string argument can define the number of changesets
1253 The ``revcount`` query string argument can define the number of changesets
1255 to show information for.
1254 to show information for.
1256
1255
1257 The ``graphtop`` query string argument can specify the starting changeset
1256 The ``graphtop`` query string argument can specify the starting changeset
1258 for producing ``jsdata`` variable that is used for rendering graph in
1257 for producing ``jsdata`` variable that is used for rendering graph in
1259 JavaScript. By default it has the same value as ``revision``.
1258 JavaScript. By default it has the same value as ``revision``.
1260
1259
1261 This handler will render the ``graph`` template.
1260 This handler will render the ``graph`` template.
1262 """
1261 """
1263
1262
1264 if 'node' in web.req.qsparams:
1263 if 'node' in web.req.qsparams:
1265 ctx = webutil.changectx(web.repo, web.req)
1264 ctx = webutil.changectx(web.repo, web.req)
1266 symrev = webutil.symrevorshortnode(web.req, ctx)
1265 symrev = webutil.symrevorshortnode(web.req, ctx)
1267 else:
1266 else:
1268 ctx = web.repo['tip']
1267 ctx = web.repo['tip']
1269 symrev = 'tip'
1268 symrev = 'tip'
1270 rev = ctx.rev()
1269 rev = ctx.rev()
1271
1270
1272 bg_height = 39
1271 bg_height = 39
1273 revcount = web.maxshortchanges
1272 revcount = web.maxshortchanges
1274 if 'revcount' in web.req.qsparams:
1273 if 'revcount' in web.req.qsparams:
1275 try:
1274 try:
1276 revcount = int(web.req.qsparams.get('revcount', revcount))
1275 revcount = int(web.req.qsparams.get('revcount', revcount))
1277 revcount = max(revcount, 1)
1276 revcount = max(revcount, 1)
1278 web.tmpl.defaults['sessionvars']['revcount'] = revcount
1277 web.tmpl.defaults['sessionvars']['revcount'] = revcount
1279 except ValueError:
1278 except ValueError:
1280 pass
1279 pass
1281
1280
1282 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
1281 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
1283 lessvars['revcount'] = max(revcount // 2, 1)
1282 lessvars['revcount'] = max(revcount // 2, 1)
1284 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
1283 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
1285 morevars['revcount'] = revcount * 2
1284 morevars['revcount'] = revcount * 2
1286
1285
1287 graphtop = web.req.qsparams.get('graphtop', ctx.hex())
1286 graphtop = web.req.qsparams.get('graphtop', ctx.hex())
1288 graphvars = copy.copy(web.tmpl.defaults['sessionvars'])
1287 graphvars = copy.copy(web.tmpl.defaults['sessionvars'])
1289 graphvars['graphtop'] = graphtop
1288 graphvars['graphtop'] = graphtop
1290
1289
1291 count = len(web.repo)
1290 count = len(web.repo)
1292 pos = rev
1291 pos = rev
1293
1292
1294 uprev = min(max(0, count - 1), rev + revcount)
1293 uprev = min(max(0, count - 1), rev + revcount)
1295 downrev = max(0, rev - revcount)
1294 downrev = max(0, rev - revcount)
1296 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1295 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1297
1296
1298 tree = []
1297 tree = []
1299 nextentry = []
1298 nextentry = []
1300 lastrev = 0
1299 lastrev = 0
1301 if pos != -1:
1300 if pos != -1:
1302 allrevs = web.repo.changelog.revs(pos, 0)
1301 allrevs = web.repo.changelog.revs(pos, 0)
1303 revs = []
1302 revs = []
1304 for i in allrevs:
1303 for i in allrevs:
1305 revs.append(i)
1304 revs.append(i)
1306 if len(revs) >= revcount + 1:
1305 if len(revs) >= revcount + 1:
1307 break
1306 break
1308
1307
1309 if len(revs) > revcount:
1308 if len(revs) > revcount:
1310 nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])]
1309 nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])]
1311 revs = revs[:-1]
1310 revs = revs[:-1]
1312
1311
1313 lastrev = revs[-1]
1312 lastrev = revs[-1]
1314
1313
1315 # We have to feed a baseset to dagwalker as it is expecting smartset
1314 # We have to feed a baseset to dagwalker as it is expecting smartset
1316 # object. This does not have a big impact on hgweb performance itself
1315 # object. This does not have a big impact on hgweb performance itself
1317 # since hgweb graphing code is not itself lazy yet.
1316 # since hgweb graphing code is not itself lazy yet.
1318 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1317 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1319 # As we said one line above... not lazy.
1318 # As we said one line above... not lazy.
1320 tree = list(item for item in graphmod.colored(dag, web.repo)
1319 tree = list(item for item in graphmod.colored(dag, web.repo)
1321 if item[1] == graphmod.CHANGESET)
1320 if item[1] == graphmod.CHANGESET)
1322
1321
1323 def fulltree():
1322 def fulltree():
1324 pos = web.repo[graphtop].rev()
1323 pos = web.repo[graphtop].rev()
1325 tree = []
1324 tree = []
1326 if pos != -1:
1325 if pos != -1:
1327 revs = web.repo.changelog.revs(pos, lastrev)
1326 revs = web.repo.changelog.revs(pos, lastrev)
1328 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1327 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1329 tree = list(item for item in graphmod.colored(dag, web.repo)
1328 tree = list(item for item in graphmod.colored(dag, web.repo)
1330 if item[1] == graphmod.CHANGESET)
1329 if item[1] == graphmod.CHANGESET)
1331 return tree
1330 return tree
1332
1331
1333 def jsdata(context):
1332 def jsdata(context):
1334 for (id, type, ctx, vtx, edges) in fulltree():
1333 for (id, type, ctx, vtx, edges) in fulltree():
1335 yield {'node': pycompat.bytestr(ctx),
1334 yield {'node': pycompat.bytestr(ctx),
1336 'graphnode': webutil.getgraphnode(web.repo, ctx),
1335 'graphnode': webutil.getgraphnode(web.repo, ctx),
1337 'vertex': vtx,
1336 'vertex': vtx,
1338 'edges': edges}
1337 'edges': edges}
1339
1338
1340 def nodes(context):
1339 def nodes(context):
1341 parity = paritygen(web.stripecount)
1340 parity = paritygen(web.stripecount)
1342 for row, (id, type, ctx, vtx, edges) in enumerate(tree):
1341 for row, (id, type, ctx, vtx, edges) in enumerate(tree):
1343 entry = webutil.commonentry(web.repo, ctx)
1342 entry = webutil.commonentry(web.repo, ctx)
1344 edgedata = [{'col': edge[0],
1343 edgedata = [{'col': edge[0],
1345 'nextcol': edge[1],
1344 'nextcol': edge[1],
1346 'color': (edge[2] - 1) % 6 + 1,
1345 'color': (edge[2] - 1) % 6 + 1,
1347 'width': edge[3],
1346 'width': edge[3],
1348 'bcolor': edge[4]}
1347 'bcolor': edge[4]}
1349 for edge in edges]
1348 for edge in edges]
1350
1349
1351 entry.update({'col': vtx[0],
1350 entry.update({'col': vtx[0],
1352 'color': (vtx[1] - 1) % 6 + 1,
1351 'color': (vtx[1] - 1) % 6 + 1,
1353 'parity': next(parity),
1352 'parity': next(parity),
1354 'edges': templateutil.mappinglist(edgedata),
1353 'edges': templateutil.mappinglist(edgedata),
1355 'row': row,
1354 'row': row,
1356 'nextrow': row + 1})
1355 'nextrow': row + 1})
1357
1356
1358 yield entry
1357 yield entry
1359
1358
1360 rows = len(tree)
1359 rows = len(tree)
1361
1360
1362 return web.sendtemplate(
1361 return web.sendtemplate(
1363 'graph',
1362 'graph',
1364 rev=rev,
1363 rev=rev,
1365 symrev=symrev,
1364 symrev=symrev,
1366 revcount=revcount,
1365 revcount=revcount,
1367 uprev=uprev,
1366 uprev=uprev,
1368 lessvars=lessvars,
1367 lessvars=lessvars,
1369 morevars=morevars,
1368 morevars=morevars,
1370 downrev=downrev,
1369 downrev=downrev,
1371 graphvars=graphvars,
1370 graphvars=graphvars,
1372 rows=rows,
1371 rows=rows,
1373 bg_height=bg_height,
1372 bg_height=bg_height,
1374 changesets=count,
1373 changesets=count,
1375 nextentry=templateutil.mappinglist(nextentry),
1374 nextentry=templateutil.mappinglist(nextentry),
1376 jsdata=templateutil.mappinggenerator(jsdata),
1375 jsdata=templateutil.mappinggenerator(jsdata),
1377 nodes=templateutil.mappinggenerator(nodes),
1376 nodes=templateutil.mappinggenerator(nodes),
1378 node=ctx.hex(),
1377 node=ctx.hex(),
1379 archives=web.archivelist('tip'),
1378 archives=web.archivelist('tip'),
1380 changenav=changenav)
1379 changenav=changenav)
1381
1380
1382 def _getdoc(e):
1381 def _getdoc(e):
1383 doc = e[0].__doc__
1382 doc = e[0].__doc__
1384 if doc:
1383 if doc:
1385 doc = _(doc).partition('\n')[0]
1384 doc = _(doc).partition('\n')[0]
1386 else:
1385 else:
1387 doc = _('(no help text available)')
1386 doc = _('(no help text available)')
1388 return doc
1387 return doc
1389
1388
1390 @webcommand('help')
1389 @webcommand('help')
1391 def help(web):
1390 def help(web):
1392 """
1391 """
1393 /help[/{topic}]
1392 /help[/{topic}]
1394 ---------------
1393 ---------------
1395
1394
1396 Render help documentation.
1395 Render help documentation.
1397
1396
1398 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1397 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1399 is defined, that help topic will be rendered. If not, an index of
1398 is defined, that help topic will be rendered. If not, an index of
1400 available help topics will be rendered.
1399 available help topics will be rendered.
1401
1400
1402 The ``help`` template will be rendered when requesting help for a topic.
1401 The ``help`` template will be rendered when requesting help for a topic.
1403 ``helptopics`` will be rendered for the index of help topics.
1402 ``helptopics`` will be rendered for the index of help topics.
1404 """
1403 """
1405 from .. import commands, help as helpmod # avoid cycle
1404 from .. import commands, help as helpmod # avoid cycle
1406
1405
1407 topicname = web.req.qsparams.get('node')
1406 topicname = web.req.qsparams.get('node')
1408 if not topicname:
1407 if not topicname:
1409 def topics(context):
1408 def topics(context):
1410 for h in helpmod.helptable:
1409 for h in helpmod.helptable:
1411 entries, summary, _doc = h[0:3]
1410 entries, summary, _doc = h[0:3]
1412 yield {'topic': entries[0], 'summary': summary}
1411 yield {'topic': entries[0], 'summary': summary}
1413
1412
1414 early, other = [], []
1413 early, other = [], []
1415 primary = lambda s: s.partition('|')[0]
1414 primary = lambda s: s.partition('|')[0]
1416 for c, e in commands.table.iteritems():
1415 for c, e in commands.table.iteritems():
1417 doc = _getdoc(e)
1416 doc = _getdoc(e)
1418 if 'DEPRECATED' in doc or c.startswith('debug'):
1417 if 'DEPRECATED' in doc or c.startswith('debug'):
1419 continue
1418 continue
1420 cmd = primary(c)
1419 cmd = primary(c)
1421 if getattr(e[0], 'helpbasic', False):
1420 if getattr(e[0], 'helpbasic', False):
1422 early.append((cmd, doc))
1421 early.append((cmd, doc))
1423 else:
1422 else:
1424 other.append((cmd, doc))
1423 other.append((cmd, doc))
1425
1424
1426 early.sort()
1425 early.sort()
1427 other.sort()
1426 other.sort()
1428
1427
1429 def earlycommands(context):
1428 def earlycommands(context):
1430 for c, doc in early:
1429 for c, doc in early:
1431 yield {'topic': c, 'summary': doc}
1430 yield {'topic': c, 'summary': doc}
1432
1431
1433 def othercommands(context):
1432 def othercommands(context):
1434 for c, doc in other:
1433 for c, doc in other:
1435 yield {'topic': c, 'summary': doc}
1434 yield {'topic': c, 'summary': doc}
1436
1435
1437 return web.sendtemplate(
1436 return web.sendtemplate(
1438 'helptopics',
1437 'helptopics',
1439 topics=templateutil.mappinggenerator(topics),
1438 topics=templateutil.mappinggenerator(topics),
1440 earlycommands=templateutil.mappinggenerator(earlycommands),
1439 earlycommands=templateutil.mappinggenerator(earlycommands),
1441 othercommands=templateutil.mappinggenerator(othercommands),
1440 othercommands=templateutil.mappinggenerator(othercommands),
1442 title='Index')
1441 title='Index')
1443
1442
1444 # Render an index of sub-topics.
1443 # Render an index of sub-topics.
1445 if topicname in helpmod.subtopics:
1444 if topicname in helpmod.subtopics:
1446 topics = []
1445 topics = []
1447 for entries, summary, _doc in helpmod.subtopics[topicname]:
1446 for entries, summary, _doc in helpmod.subtopics[topicname]:
1448 topics.append({
1447 topics.append({
1449 'topic': '%s.%s' % (topicname, entries[0]),
1448 'topic': '%s.%s' % (topicname, entries[0]),
1450 'basename': entries[0],
1449 'basename': entries[0],
1451 'summary': summary,
1450 'summary': summary,
1452 })
1451 })
1453
1452
1454 return web.sendtemplate(
1453 return web.sendtemplate(
1455 'helptopics',
1454 'helptopics',
1456 topics=templateutil.mappinglist(topics),
1455 topics=templateutil.mappinglist(topics),
1457 title=topicname,
1456 title=topicname,
1458 subindex=True)
1457 subindex=True)
1459
1458
1460 u = webutil.wsgiui.load()
1459 u = webutil.wsgiui.load()
1461 u.verbose = True
1460 u.verbose = True
1462
1461
1463 # Render a page from a sub-topic.
1462 # Render a page from a sub-topic.
1464 if '.' in topicname:
1463 if '.' in topicname:
1465 # TODO implement support for rendering sections, like
1464 # TODO implement support for rendering sections, like
1466 # `hg help` works.
1465 # `hg help` works.
1467 topic, subtopic = topicname.split('.', 1)
1466 topic, subtopic = topicname.split('.', 1)
1468 if topic not in helpmod.subtopics:
1467 if topic not in helpmod.subtopics:
1469 raise ErrorResponse(HTTP_NOT_FOUND)
1468 raise ErrorResponse(HTTP_NOT_FOUND)
1470 else:
1469 else:
1471 topic = topicname
1470 topic = topicname
1472 subtopic = None
1471 subtopic = None
1473
1472
1474 try:
1473 try:
1475 doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
1474 doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
1476 except error.Abort:
1475 except error.Abort:
1477 raise ErrorResponse(HTTP_NOT_FOUND)
1476 raise ErrorResponse(HTTP_NOT_FOUND)
1478
1477
1479 return web.sendtemplate(
1478 return web.sendtemplate(
1480 'help',
1479 'help',
1481 topic=topicname,
1480 topic=topicname,
1482 doc=doc)
1481 doc=doc)
1483
1482
1484 # tell hggettext to extract docstrings from these functions:
1483 # tell hggettext to extract docstrings from these functions:
1485 i18nfunctions = commands.values()
1484 i18nfunctions = commands.values()
General Comments 0
You need to be logged in to leave comments. Login now