Show More
@@ -1,423 +1,410 b'' | |||||
1 | # narrowcommands.py - command modifications for narrowhg extension |
|
1 | # narrowcommands.py - command modifications for narrowhg extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import itertools |
|
9 | import itertools | |
10 | import os |
|
10 | import os | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 | from mercurial import ( |
|
13 | from mercurial import ( | |
14 | cmdutil, |
|
14 | cmdutil, | |
15 | commands, |
|
15 | commands, | |
16 | discovery, |
|
16 | discovery, | |
17 | encoding, |
|
17 | encoding, | |
18 | error, |
|
18 | error, | |
19 | exchange, |
|
19 | exchange, | |
20 | extensions, |
|
20 | extensions, | |
21 | hg, |
|
21 | hg, | |
22 | merge, |
|
22 | merge, | |
23 | narrowspec, |
|
23 | narrowspec, | |
24 | node, |
|
24 | node, | |
25 | pycompat, |
|
25 | pycompat, | |
26 | registrar, |
|
26 | registrar, | |
27 | repair, |
|
27 | repair, | |
28 | repository, |
|
28 | repository, | |
29 | repoview, |
|
29 | repoview, | |
30 | sparse, |
|
30 | sparse, | |
31 | util, |
|
31 | util, | |
32 | ) |
|
32 | ) | |
33 |
|
33 | |||
34 | from . import ( |
|
34 | from . import ( | |
35 | narrowwirepeer, |
|
35 | narrowwirepeer, | |
36 | ) |
|
36 | ) | |
37 |
|
37 | |||
38 | table = {} |
|
38 | table = {} | |
39 | command = registrar.command(table) |
|
39 | command = registrar.command(table) | |
40 |
|
40 | |||
41 | def setup(): |
|
41 | def setup(): | |
42 | """Wraps user-facing mercurial commands with narrow-aware versions.""" |
|
42 | """Wraps user-facing mercurial commands with narrow-aware versions.""" | |
43 |
|
43 | |||
44 | entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd) |
|
44 | entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd) | |
45 | entry[1].append(('', 'narrow', None, |
|
45 | entry[1].append(('', 'narrow', None, | |
46 | _("create a narrow clone of select files"))) |
|
46 | _("create a narrow clone of select files"))) | |
47 | entry[1].append(('', 'depth', '', |
|
47 | entry[1].append(('', 'depth', '', | |
48 | _("limit the history fetched by distance from heads"))) |
|
48 | _("limit the history fetched by distance from heads"))) | |
49 | entry[1].append(('', 'narrowspec', '', |
|
49 | entry[1].append(('', 'narrowspec', '', | |
50 | _("read narrowspecs from file"))) |
|
50 | _("read narrowspecs from file"))) | |
51 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit |
|
51 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit | |
52 | if 'sparse' not in extensions.enabled(): |
|
52 | if 'sparse' not in extensions.enabled(): | |
53 | entry[1].append(('', 'include', [], |
|
53 | entry[1].append(('', 'include', [], | |
54 | _("specifically fetch this file/directory"))) |
|
54 | _("specifically fetch this file/directory"))) | |
55 | entry[1].append( |
|
55 | entry[1].append( | |
56 | ('', 'exclude', [], |
|
56 | ('', 'exclude', [], | |
57 | _("do not fetch this file/directory, even if included"))) |
|
57 | _("do not fetch this file/directory, even if included"))) | |
58 |
|
58 | |||
59 | entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd) |
|
59 | entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd) | |
60 | entry[1].append(('', 'depth', '', |
|
60 | entry[1].append(('', 'depth', '', | |
61 | _("limit the history fetched by distance from heads"))) |
|
61 | _("limit the history fetched by distance from heads"))) | |
62 |
|
62 | |||
63 | extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd) |
|
63 | extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd) | |
64 |
|
64 | |||
65 | def clonenarrowcmd(orig, ui, repo, *args, **opts): |
|
65 | def clonenarrowcmd(orig, ui, repo, *args, **opts): | |
66 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" |
|
66 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" | |
67 | opts = pycompat.byteskwargs(opts) |
|
67 | opts = pycompat.byteskwargs(opts) | |
68 | wrappedextraprepare = util.nullcontextmanager() |
|
68 | wrappedextraprepare = util.nullcontextmanager() | |
69 | narrowspecfile = opts['narrowspec'] |
|
69 | narrowspecfile = opts['narrowspec'] | |
70 |
|
70 | |||
71 | if narrowspecfile: |
|
71 | if narrowspecfile: | |
72 | filepath = os.path.join(pycompat.getcwd(), narrowspecfile) |
|
72 | filepath = os.path.join(pycompat.getcwd(), narrowspecfile) | |
73 | ui.status(_("reading narrowspec from '%s'\n") % filepath) |
|
73 | ui.status(_("reading narrowspec from '%s'\n") % filepath) | |
74 | try: |
|
74 | try: | |
75 | fdata = util.readfile(filepath) |
|
75 | fdata = util.readfile(filepath) | |
76 | except IOError as inst: |
|
76 | except IOError as inst: | |
77 | raise error.Abort(_("cannot read narrowspecs from '%s': %s") % |
|
77 | raise error.Abort(_("cannot read narrowspecs from '%s': %s") % | |
78 | (filepath, encoding.strtolocal(inst.strerror))) |
|
78 | (filepath, encoding.strtolocal(inst.strerror))) | |
79 |
|
79 | |||
80 | includes, excludes, profiles = sparse.parseconfig(ui, fdata, 'narrow') |
|
80 | includes, excludes, profiles = sparse.parseconfig(ui, fdata, 'narrow') | |
81 | if profiles: |
|
81 | if profiles: | |
82 | raise error.Abort(_("cannot specify other files using '%include' in" |
|
82 | raise error.Abort(_("cannot specify other files using '%include' in" | |
83 | " narrowspec")) |
|
83 | " narrowspec")) | |
84 |
|
84 | |||
85 | narrowspec.validatepatterns(includes) |
|
85 | narrowspec.validatepatterns(includes) | |
86 | narrowspec.validatepatterns(excludes) |
|
86 | narrowspec.validatepatterns(excludes) | |
87 |
|
87 | |||
88 | # narrowspec is passed so we should assume that user wants narrow clone |
|
88 | # narrowspec is passed so we should assume that user wants narrow clone | |
89 | opts['narrow'] = True |
|
89 | opts['narrow'] = True | |
90 | opts['include'].extend(includes) |
|
90 | opts['include'].extend(includes) | |
91 | opts['exclude'].extend(excludes) |
|
91 | opts['exclude'].extend(excludes) | |
92 |
|
92 | |||
93 | if opts['narrow']: |
|
93 | if opts['narrow']: | |
94 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
94 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
95 | # Create narrow spec patterns from clone flags |
|
|||
96 | includepats = narrowspec.parsepatterns(opts['include']) |
|
|||
97 | excludepats = narrowspec.parsepatterns(opts['exclude']) |
|
|||
98 |
|
||||
99 | if not includepats and excludepats: |
|
|||
100 | # If nothing was included, we assume the user meant to include |
|
|||
101 | # everything, except what they asked to exclude. |
|
|||
102 | includepats = {'path:.'} |
|
|||
103 |
|
||||
104 | pullop.repo.setnarrowpats(includepats, excludepats) |
|
|||
105 |
|
||||
106 | # This will populate 'includepats' etc with the values from the |
|
|||
107 | # narrowspec we just saved. |
|
|||
108 | orig(pullop, kwargs) |
|
95 | orig(pullop, kwargs) | |
109 |
|
96 | |||
110 | if opts.get('depth'): |
|
97 | if opts.get('depth'): | |
111 | kwargs['depth'] = opts['depth'] |
|
98 | kwargs['depth'] = opts['depth'] | |
112 | wrappedextraprepare = extensions.wrappedfunction(exchange, |
|
99 | wrappedextraprepare = extensions.wrappedfunction(exchange, | |
113 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) |
|
100 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) | |
114 |
|
101 | |||
115 | with wrappedextraprepare: |
|
102 | with wrappedextraprepare: | |
116 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) |
|
103 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) | |
117 |
|
104 | |||
118 | def pullnarrowcmd(orig, ui, repo, *args, **opts): |
|
105 | def pullnarrowcmd(orig, ui, repo, *args, **opts): | |
119 | """Wraps pull command to allow modifying narrow spec.""" |
|
106 | """Wraps pull command to allow modifying narrow spec.""" | |
120 | wrappedextraprepare = util.nullcontextmanager() |
|
107 | wrappedextraprepare = util.nullcontextmanager() | |
121 | if repository.NARROW_REQUIREMENT in repo.requirements: |
|
108 | if repository.NARROW_REQUIREMENT in repo.requirements: | |
122 |
|
109 | |||
123 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
110 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
124 | orig(pullop, kwargs) |
|
111 | orig(pullop, kwargs) | |
125 | if opts.get(r'depth'): |
|
112 | if opts.get(r'depth'): | |
126 | kwargs['depth'] = opts[r'depth'] |
|
113 | kwargs['depth'] = opts[r'depth'] | |
127 | wrappedextraprepare = extensions.wrappedfunction(exchange, |
|
114 | wrappedextraprepare = extensions.wrappedfunction(exchange, | |
128 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) |
|
115 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) | |
129 |
|
116 | |||
130 | with wrappedextraprepare: |
|
117 | with wrappedextraprepare: | |
131 | return orig(ui, repo, *args, **opts) |
|
118 | return orig(ui, repo, *args, **opts) | |
132 |
|
119 | |||
133 | def archivenarrowcmd(orig, ui, repo, *args, **opts): |
|
120 | def archivenarrowcmd(orig, ui, repo, *args, **opts): | |
134 | """Wraps archive command to narrow the default includes.""" |
|
121 | """Wraps archive command to narrow the default includes.""" | |
135 | if repository.NARROW_REQUIREMENT in repo.requirements: |
|
122 | if repository.NARROW_REQUIREMENT in repo.requirements: | |
136 | repo_includes, repo_excludes = repo.narrowpats |
|
123 | repo_includes, repo_excludes = repo.narrowpats | |
137 | includes = set(opts.get(r'include', [])) |
|
124 | includes = set(opts.get(r'include', [])) | |
138 | excludes = set(opts.get(r'exclude', [])) |
|
125 | excludes = set(opts.get(r'exclude', [])) | |
139 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( |
|
126 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( | |
140 | includes, excludes, repo_includes, repo_excludes) |
|
127 | includes, excludes, repo_includes, repo_excludes) | |
141 | if includes: |
|
128 | if includes: | |
142 | opts[r'include'] = includes |
|
129 | opts[r'include'] = includes | |
143 | if excludes: |
|
130 | if excludes: | |
144 | opts[r'exclude'] = excludes |
|
131 | opts[r'exclude'] = excludes | |
145 | return orig(ui, repo, *args, **opts) |
|
132 | return orig(ui, repo, *args, **opts) | |
146 |
|
133 | |||
147 | def pullbundle2extraprepare(orig, pullop, kwargs): |
|
134 | def pullbundle2extraprepare(orig, pullop, kwargs): | |
148 | repo = pullop.repo |
|
135 | repo = pullop.repo | |
149 | if repository.NARROW_REQUIREMENT not in repo.requirements: |
|
136 | if repository.NARROW_REQUIREMENT not in repo.requirements: | |
150 | return orig(pullop, kwargs) |
|
137 | return orig(pullop, kwargs) | |
151 |
|
138 | |||
152 | if narrowwirepeer.NARROWCAP not in pullop.remote.capabilities(): |
|
139 | if narrowwirepeer.NARROWCAP not in pullop.remote.capabilities(): | |
153 | raise error.Abort(_("server doesn't support narrow clones")) |
|
140 | raise error.Abort(_("server doesn't support narrow clones")) | |
154 | orig(pullop, kwargs) |
|
141 | orig(pullop, kwargs) | |
155 | kwargs['narrow'] = True |
|
142 | kwargs['narrow'] = True | |
156 | include, exclude = repo.narrowpats |
|
143 | include, exclude = repo.narrowpats | |
157 | kwargs['oldincludepats'] = include |
|
144 | kwargs['oldincludepats'] = include | |
158 | kwargs['oldexcludepats'] = exclude |
|
145 | kwargs['oldexcludepats'] = exclude | |
159 | kwargs['includepats'] = include |
|
146 | kwargs['includepats'] = include | |
160 | kwargs['excludepats'] = exclude |
|
147 | kwargs['excludepats'] = exclude | |
161 | # calculate known nodes only in ellipses cases because in non-ellipses cases |
|
148 | # calculate known nodes only in ellipses cases because in non-ellipses cases | |
162 | # we have all the nodes |
|
149 | # we have all the nodes | |
163 | if narrowwirepeer.ELLIPSESCAP in pullop.remote.capabilities(): |
|
150 | if narrowwirepeer.ELLIPSESCAP in pullop.remote.capabilities(): | |
164 | kwargs['known'] = [node.hex(ctx.node()) for ctx in |
|
151 | kwargs['known'] = [node.hex(ctx.node()) for ctx in | |
165 | repo.set('::%ln', pullop.common) |
|
152 | repo.set('::%ln', pullop.common) | |
166 | if ctx.node() != node.nullid] |
|
153 | if ctx.node() != node.nullid] | |
167 | if not kwargs['known']: |
|
154 | if not kwargs['known']: | |
168 | # Mercurial serializes an empty list as '' and deserializes it as |
|
155 | # Mercurial serializes an empty list as '' and deserializes it as | |
169 | # [''], so delete it instead to avoid handling the empty string on |
|
156 | # [''], so delete it instead to avoid handling the empty string on | |
170 | # the server. |
|
157 | # the server. | |
171 | del kwargs['known'] |
|
158 | del kwargs['known'] | |
172 |
|
159 | |||
173 | extensions.wrapfunction(exchange,'_pullbundle2extraprepare', |
|
160 | extensions.wrapfunction(exchange,'_pullbundle2extraprepare', | |
174 | pullbundle2extraprepare) |
|
161 | pullbundle2extraprepare) | |
175 |
|
162 | |||
176 | def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, |
|
163 | def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, | |
177 | newincludes, newexcludes, force): |
|
164 | newincludes, newexcludes, force): | |
178 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) |
|
165 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) | |
179 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) |
|
166 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) | |
180 |
|
167 | |||
181 | # This is essentially doing "hg outgoing" to find all local-only |
|
168 | # This is essentially doing "hg outgoing" to find all local-only | |
182 | # commits. We will then check that the local-only commits don't |
|
169 | # commits. We will then check that the local-only commits don't | |
183 | # have any changes to files that will be untracked. |
|
170 | # have any changes to files that will be untracked. | |
184 | unfi = repo.unfiltered() |
|
171 | unfi = repo.unfiltered() | |
185 | outgoing = discovery.findcommonoutgoing(unfi, remote, |
|
172 | outgoing = discovery.findcommonoutgoing(unfi, remote, | |
186 | commoninc=commoninc) |
|
173 | commoninc=commoninc) | |
187 | ui.status(_('looking for local changes to affected paths\n')) |
|
174 | ui.status(_('looking for local changes to affected paths\n')) | |
188 | localnodes = [] |
|
175 | localnodes = [] | |
189 | for n in itertools.chain(outgoing.missing, outgoing.excluded): |
|
176 | for n in itertools.chain(outgoing.missing, outgoing.excluded): | |
190 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): |
|
177 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): | |
191 | localnodes.append(n) |
|
178 | localnodes.append(n) | |
192 | revstostrip = unfi.revs('descendants(%ln)', localnodes) |
|
179 | revstostrip = unfi.revs('descendants(%ln)', localnodes) | |
193 | hiddenrevs = repoview.filterrevs(repo, 'visible') |
|
180 | hiddenrevs = repoview.filterrevs(repo, 'visible') | |
194 | visibletostrip = list(repo.changelog.node(r) |
|
181 | visibletostrip = list(repo.changelog.node(r) | |
195 | for r in (revstostrip - hiddenrevs)) |
|
182 | for r in (revstostrip - hiddenrevs)) | |
196 | if visibletostrip: |
|
183 | if visibletostrip: | |
197 | ui.status(_('The following changeset(s) or their ancestors have ' |
|
184 | ui.status(_('The following changeset(s) or their ancestors have ' | |
198 | 'local changes not on the remote:\n')) |
|
185 | 'local changes not on the remote:\n')) | |
199 | maxnodes = 10 |
|
186 | maxnodes = 10 | |
200 | if ui.verbose or len(visibletostrip) <= maxnodes: |
|
187 | if ui.verbose or len(visibletostrip) <= maxnodes: | |
201 | for n in visibletostrip: |
|
188 | for n in visibletostrip: | |
202 | ui.status('%s\n' % node.short(n)) |
|
189 | ui.status('%s\n' % node.short(n)) | |
203 | else: |
|
190 | else: | |
204 | for n in visibletostrip[:maxnodes]: |
|
191 | for n in visibletostrip[:maxnodes]: | |
205 | ui.status('%s\n' % node.short(n)) |
|
192 | ui.status('%s\n' % node.short(n)) | |
206 | ui.status(_('...and %d more, use --verbose to list all\n') % |
|
193 | ui.status(_('...and %d more, use --verbose to list all\n') % | |
207 | (len(visibletostrip) - maxnodes)) |
|
194 | (len(visibletostrip) - maxnodes)) | |
208 | if not force: |
|
195 | if not force: | |
209 | raise error.Abort(_('local changes found'), |
|
196 | raise error.Abort(_('local changes found'), | |
210 | hint=_('use --force-delete-local-changes to ' |
|
197 | hint=_('use --force-delete-local-changes to ' | |
211 | 'ignore')) |
|
198 | 'ignore')) | |
212 |
|
199 | |||
213 | with ui.uninterruptable(): |
|
200 | with ui.uninterruptable(): | |
214 | if revstostrip: |
|
201 | if revstostrip: | |
215 | tostrip = [unfi.changelog.node(r) for r in revstostrip] |
|
202 | tostrip = [unfi.changelog.node(r) for r in revstostrip] | |
216 | if repo['.'].node() in tostrip: |
|
203 | if repo['.'].node() in tostrip: | |
217 | # stripping working copy, so move to a different commit first |
|
204 | # stripping working copy, so move to a different commit first | |
218 | urev = max(repo.revs('(::%n) - %ln + null', |
|
205 | urev = max(repo.revs('(::%n) - %ln + null', | |
219 | repo['.'].node(), visibletostrip)) |
|
206 | repo['.'].node(), visibletostrip)) | |
220 | hg.clean(repo, urev) |
|
207 | hg.clean(repo, urev) | |
221 | repair.strip(ui, unfi, tostrip, topic='narrow') |
|
208 | repair.strip(ui, unfi, tostrip, topic='narrow') | |
222 |
|
209 | |||
223 | todelete = [] |
|
210 | todelete = [] | |
224 | for f, f2, size in repo.store.datafiles(): |
|
211 | for f, f2, size in repo.store.datafiles(): | |
225 | if f.startswith('data/'): |
|
212 | if f.startswith('data/'): | |
226 | file = f[5:-2] |
|
213 | file = f[5:-2] | |
227 | if not newmatch(file): |
|
214 | if not newmatch(file): | |
228 | todelete.append(f) |
|
215 | todelete.append(f) | |
229 | elif f.startswith('meta/'): |
|
216 | elif f.startswith('meta/'): | |
230 | dir = f[5:-13] |
|
217 | dir = f[5:-13] | |
231 | dirs = ['.'] + sorted(util.dirs({dir})) + [dir] |
|
218 | dirs = ['.'] + sorted(util.dirs({dir})) + [dir] | |
232 | include = True |
|
219 | include = True | |
233 | for d in dirs: |
|
220 | for d in dirs: | |
234 | visit = newmatch.visitdir(d) |
|
221 | visit = newmatch.visitdir(d) | |
235 | if not visit: |
|
222 | if not visit: | |
236 | include = False |
|
223 | include = False | |
237 | break |
|
224 | break | |
238 | if visit == 'all': |
|
225 | if visit == 'all': | |
239 | break |
|
226 | break | |
240 | if not include: |
|
227 | if not include: | |
241 | todelete.append(f) |
|
228 | todelete.append(f) | |
242 |
|
229 | |||
243 | repo.destroying() |
|
230 | repo.destroying() | |
244 |
|
231 | |||
245 | with repo.transaction("narrowing"): |
|
232 | with repo.transaction("narrowing"): | |
246 | for f in todelete: |
|
233 | for f in todelete: | |
247 | ui.status(_('deleting %s\n') % f) |
|
234 | ui.status(_('deleting %s\n') % f) | |
248 | util.unlinkpath(repo.svfs.join(f)) |
|
235 | util.unlinkpath(repo.svfs.join(f)) | |
249 | repo.store.markremoved(f) |
|
236 | repo.store.markremoved(f) | |
250 |
|
237 | |||
251 | for f in repo.dirstate: |
|
238 | for f in repo.dirstate: | |
252 | if not newmatch(f): |
|
239 | if not newmatch(f): | |
253 | repo.dirstate.drop(f) |
|
240 | repo.dirstate.drop(f) | |
254 | repo.wvfs.unlinkpath(f) |
|
241 | repo.wvfs.unlinkpath(f) | |
255 | repo.setnarrowpats(newincludes, newexcludes) |
|
242 | repo.setnarrowpats(newincludes, newexcludes) | |
256 |
|
243 | |||
257 | repo.destroyed() |
|
244 | repo.destroyed() | |
258 |
|
245 | |||
259 | def _widen(ui, repo, remote, commoninc, newincludes, newexcludes): |
|
246 | def _widen(ui, repo, remote, commoninc, newincludes, newexcludes): | |
260 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) |
|
247 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) | |
261 |
|
248 | |||
262 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
249 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
263 | orig(pullop, kwargs) |
|
250 | orig(pullop, kwargs) | |
264 | # The old{in,ex}cludepats have already been set by orig() |
|
251 | # The old{in,ex}cludepats have already been set by orig() | |
265 | kwargs['includepats'] = newincludes |
|
252 | kwargs['includepats'] = newincludes | |
266 | kwargs['excludepats'] = newexcludes |
|
253 | kwargs['excludepats'] = newexcludes | |
267 | kwargs['widen'] = True |
|
254 | kwargs['widen'] = True | |
268 | wrappedextraprepare = extensions.wrappedfunction(exchange, |
|
255 | wrappedextraprepare = extensions.wrappedfunction(exchange, | |
269 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) |
|
256 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) | |
270 |
|
257 | |||
271 | # define a function that narrowbundle2 can call after creating the |
|
258 | # define a function that narrowbundle2 can call after creating the | |
272 | # backup bundle, but before applying the bundle from the server |
|
259 | # backup bundle, but before applying the bundle from the server | |
273 | def setnewnarrowpats(): |
|
260 | def setnewnarrowpats(): | |
274 | repo.setnarrowpats(newincludes, newexcludes) |
|
261 | repo.setnarrowpats(newincludes, newexcludes) | |
275 | repo.setnewnarrowpats = setnewnarrowpats |
|
262 | repo.setnewnarrowpats = setnewnarrowpats | |
276 |
|
263 | |||
277 | with ui.uninterruptable(): |
|
264 | with ui.uninterruptable(): | |
278 | ds = repo.dirstate |
|
265 | ds = repo.dirstate | |
279 | p1, p2 = ds.p1(), ds.p2() |
|
266 | p1, p2 = ds.p1(), ds.p2() | |
280 | with ds.parentchange(): |
|
267 | with ds.parentchange(): | |
281 | ds.setparents(node.nullid, node.nullid) |
|
268 | ds.setparents(node.nullid, node.nullid) | |
282 | common = commoninc[0] |
|
269 | common = commoninc[0] | |
283 | with wrappedextraprepare: |
|
270 | with wrappedextraprepare: | |
284 | exchange.pull(repo, remote, heads=common) |
|
271 | exchange.pull(repo, remote, heads=common) | |
285 | with ds.parentchange(): |
|
272 | with ds.parentchange(): | |
286 | ds.setparents(p1, p2) |
|
273 | ds.setparents(p1, p2) | |
287 |
|
274 | |||
288 | repo.setnewnarrowpats() |
|
275 | repo.setnewnarrowpats() | |
289 | actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} |
|
276 | actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} | |
290 | addgaction = actions['g'].append |
|
277 | addgaction = actions['g'].append | |
291 |
|
278 | |||
292 | mf = repo['.'].manifest().matches(newmatch) |
|
279 | mf = repo['.'].manifest().matches(newmatch) | |
293 | for f, fn in mf.iteritems(): |
|
280 | for f, fn in mf.iteritems(): | |
294 | if f not in repo.dirstate: |
|
281 | if f not in repo.dirstate: | |
295 | addgaction((f, (mf.flags(f), False), |
|
282 | addgaction((f, (mf.flags(f), False), | |
296 | "add from widened narrow clone")) |
|
283 | "add from widened narrow clone")) | |
297 |
|
284 | |||
298 | merge.applyupdates(repo, actions, wctx=repo[None], |
|
285 | merge.applyupdates(repo, actions, wctx=repo[None], | |
299 | mctx=repo['.'], overwrite=False) |
|
286 | mctx=repo['.'], overwrite=False) | |
300 | merge.recordupdates(repo, actions, branchmerge=False) |
|
287 | merge.recordupdates(repo, actions, branchmerge=False) | |
301 |
|
288 | |||
302 | # TODO(rdamazio): Make new matcher format and update description |
|
289 | # TODO(rdamazio): Make new matcher format and update description | |
303 | @command('tracked', |
|
290 | @command('tracked', | |
304 | [('', 'addinclude', [], _('new paths to include')), |
|
291 | [('', 'addinclude', [], _('new paths to include')), | |
305 | ('', 'removeinclude', [], _('old paths to no longer include')), |
|
292 | ('', 'removeinclude', [], _('old paths to no longer include')), | |
306 | ('', 'addexclude', [], _('new paths to exclude')), |
|
293 | ('', 'addexclude', [], _('new paths to exclude')), | |
307 | ('', 'import-rules', '', _('import narrowspecs from a file')), |
|
294 | ('', 'import-rules', '', _('import narrowspecs from a file')), | |
308 | ('', 'removeexclude', [], _('old paths to no longer exclude')), |
|
295 | ('', 'removeexclude', [], _('old paths to no longer exclude')), | |
309 | ('', 'clear', False, _('whether to replace the existing narrowspec')), |
|
296 | ('', 'clear', False, _('whether to replace the existing narrowspec')), | |
310 | ('', 'force-delete-local-changes', False, |
|
297 | ('', 'force-delete-local-changes', False, | |
311 | _('forces deletion of local changes when narrowing')), |
|
298 | _('forces deletion of local changes when narrowing')), | |
312 | ] + commands.remoteopts, |
|
299 | ] + commands.remoteopts, | |
313 | _('[OPTIONS]... [REMOTE]'), |
|
300 | _('[OPTIONS]... [REMOTE]'), | |
314 | inferrepo=True) |
|
301 | inferrepo=True) | |
315 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): |
|
302 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): | |
316 | """show or change the current narrowspec |
|
303 | """show or change the current narrowspec | |
317 |
|
304 | |||
318 | With no argument, shows the current narrowspec entries, one per line. Each |
|
305 | With no argument, shows the current narrowspec entries, one per line. Each | |
319 | line will be prefixed with 'I' or 'X' for included or excluded patterns, |
|
306 | line will be prefixed with 'I' or 'X' for included or excluded patterns, | |
320 | respectively. |
|
307 | respectively. | |
321 |
|
308 | |||
322 | The narrowspec is comprised of expressions to match remote files and/or |
|
309 | The narrowspec is comprised of expressions to match remote files and/or | |
323 | directories that should be pulled into your client. |
|
310 | directories that should be pulled into your client. | |
324 | The narrowspec has *include* and *exclude* expressions, with excludes always |
|
311 | The narrowspec has *include* and *exclude* expressions, with excludes always | |
325 | trumping includes: that is, if a file matches an exclude expression, it will |
|
312 | trumping includes: that is, if a file matches an exclude expression, it will | |
326 | be excluded even if it also matches an include expression. |
|
313 | be excluded even if it also matches an include expression. | |
327 | Excluding files that were never included has no effect. |
|
314 | Excluding files that were never included has no effect. | |
328 |
|
315 | |||
329 | Each included or excluded entry is in the format described by |
|
316 | Each included or excluded entry is in the format described by | |
330 | 'hg help patterns'. |
|
317 | 'hg help patterns'. | |
331 |
|
318 | |||
332 | The options allow you to add or remove included and excluded expressions. |
|
319 | The options allow you to add or remove included and excluded expressions. | |
333 |
|
320 | |||
334 | If --clear is specified, then all previous includes and excludes are DROPPED |
|
321 | If --clear is specified, then all previous includes and excludes are DROPPED | |
335 | and replaced by the new ones specified to --addinclude and --addexclude. |
|
322 | and replaced by the new ones specified to --addinclude and --addexclude. | |
336 | If --clear is specified without any further options, the narrowspec will be |
|
323 | If --clear is specified without any further options, the narrowspec will be | |
337 | empty and will not match any files. |
|
324 | empty and will not match any files. | |
338 | """ |
|
325 | """ | |
339 | opts = pycompat.byteskwargs(opts) |
|
326 | opts = pycompat.byteskwargs(opts) | |
340 | if repository.NARROW_REQUIREMENT not in repo.requirements: |
|
327 | if repository.NARROW_REQUIREMENT not in repo.requirements: | |
341 | ui.warn(_('The narrow command is only supported on respositories cloned' |
|
328 | ui.warn(_('The narrow command is only supported on respositories cloned' | |
342 | ' with --narrow.\n')) |
|
329 | ' with --narrow.\n')) | |
343 | return 1 |
|
330 | return 1 | |
344 |
|
331 | |||
345 | # Before supporting, decide whether it "hg tracked --clear" should mean |
|
332 | # Before supporting, decide whether it "hg tracked --clear" should mean | |
346 | # tracking no paths or all paths. |
|
333 | # tracking no paths or all paths. | |
347 | if opts['clear']: |
|
334 | if opts['clear']: | |
348 | ui.warn(_('The --clear option is not yet supported.\n')) |
|
335 | ui.warn(_('The --clear option is not yet supported.\n')) | |
349 | return 1 |
|
336 | return 1 | |
350 |
|
337 | |||
351 | # import rules from a file |
|
338 | # import rules from a file | |
352 | newrules = opts.get('import_rules') |
|
339 | newrules = opts.get('import_rules') | |
353 | if newrules: |
|
340 | if newrules: | |
354 | try: |
|
341 | try: | |
355 | filepath = os.path.join(pycompat.getcwd(), newrules) |
|
342 | filepath = os.path.join(pycompat.getcwd(), newrules) | |
356 | fdata = util.readfile(filepath) |
|
343 | fdata = util.readfile(filepath) | |
357 | except IOError as inst: |
|
344 | except IOError as inst: | |
358 | raise error.Abort(_("cannot read narrowspecs from '%s': %s") % |
|
345 | raise error.Abort(_("cannot read narrowspecs from '%s': %s") % | |
359 | (filepath, encoding.strtolocal(inst.strerror))) |
|
346 | (filepath, encoding.strtolocal(inst.strerror))) | |
360 | includepats, excludepats, profiles = sparse.parseconfig(ui, fdata, |
|
347 | includepats, excludepats, profiles = sparse.parseconfig(ui, fdata, | |
361 | 'narrow') |
|
348 | 'narrow') | |
362 | if profiles: |
|
349 | if profiles: | |
363 | raise error.Abort(_("including other spec files using '%include' " |
|
350 | raise error.Abort(_("including other spec files using '%include' " | |
364 | "is not supported in narrowspec")) |
|
351 | "is not supported in narrowspec")) | |
365 | opts['addinclude'].extend(includepats) |
|
352 | opts['addinclude'].extend(includepats) | |
366 | opts['addexclude'].extend(excludepats) |
|
353 | opts['addexclude'].extend(excludepats) | |
367 |
|
354 | |||
368 | addedincludes = narrowspec.parsepatterns(opts['addinclude']) |
|
355 | addedincludes = narrowspec.parsepatterns(opts['addinclude']) | |
369 | removedincludes = narrowspec.parsepatterns(opts['removeinclude']) |
|
356 | removedincludes = narrowspec.parsepatterns(opts['removeinclude']) | |
370 | addedexcludes = narrowspec.parsepatterns(opts['addexclude']) |
|
357 | addedexcludes = narrowspec.parsepatterns(opts['addexclude']) | |
371 | removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) |
|
358 | removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) | |
372 | widening = addedincludes or removedexcludes |
|
359 | widening = addedincludes or removedexcludes | |
373 | narrowing = removedincludes or addedexcludes |
|
360 | narrowing = removedincludes or addedexcludes | |
374 | only_show = not widening and not narrowing |
|
361 | only_show = not widening and not narrowing | |
375 |
|
362 | |||
376 | # Only print the current narrowspec. |
|
363 | # Only print the current narrowspec. | |
377 | if only_show: |
|
364 | if only_show: | |
378 | include, exclude = repo.narrowpats |
|
365 | include, exclude = repo.narrowpats | |
379 |
|
366 | |||
380 | ui.pager('tracked') |
|
367 | ui.pager('tracked') | |
381 | fm = ui.formatter('narrow', opts) |
|
368 | fm = ui.formatter('narrow', opts) | |
382 | for i in sorted(include): |
|
369 | for i in sorted(include): | |
383 | fm.startitem() |
|
370 | fm.startitem() | |
384 | fm.write('status', '%s ', 'I', label='narrow.included') |
|
371 | fm.write('status', '%s ', 'I', label='narrow.included') | |
385 | fm.write('pat', '%s\n', i, label='narrow.included') |
|
372 | fm.write('pat', '%s\n', i, label='narrow.included') | |
386 | for i in sorted(exclude): |
|
373 | for i in sorted(exclude): | |
387 | fm.startitem() |
|
374 | fm.startitem() | |
388 | fm.write('status', '%s ', 'X', label='narrow.excluded') |
|
375 | fm.write('status', '%s ', 'X', label='narrow.excluded') | |
389 | fm.write('pat', '%s\n', i, label='narrow.excluded') |
|
376 | fm.write('pat', '%s\n', i, label='narrow.excluded') | |
390 | fm.end() |
|
377 | fm.end() | |
391 | return 0 |
|
378 | return 0 | |
392 |
|
379 | |||
393 | with repo.wlock(), repo.lock(): |
|
380 | with repo.wlock(), repo.lock(): | |
394 | cmdutil.bailifchanged(repo) |
|
381 | cmdutil.bailifchanged(repo) | |
395 |
|
382 | |||
396 | # Find the revisions we have in common with the remote. These will |
|
383 | # Find the revisions we have in common with the remote. These will | |
397 | # be used for finding local-only changes for narrowing. They will |
|
384 | # be used for finding local-only changes for narrowing. They will | |
398 | # also define the set of revisions to update for widening. |
|
385 | # also define the set of revisions to update for widening. | |
399 | remotepath = ui.expandpath(remotepath or 'default') |
|
386 | remotepath = ui.expandpath(remotepath or 'default') | |
400 | url, branches = hg.parseurl(remotepath) |
|
387 | url, branches = hg.parseurl(remotepath) | |
401 | ui.status(_('comparing with %s\n') % util.hidepassword(url)) |
|
388 | ui.status(_('comparing with %s\n') % util.hidepassword(url)) | |
402 | remote = hg.peer(repo, opts, url) |
|
389 | remote = hg.peer(repo, opts, url) | |
403 | commoninc = discovery.findcommonincoming(repo, remote) |
|
390 | commoninc = discovery.findcommonincoming(repo, remote) | |
404 |
|
391 | |||
405 | oldincludes, oldexcludes = repo.narrowpats |
|
392 | oldincludes, oldexcludes = repo.narrowpats | |
406 | if narrowing: |
|
393 | if narrowing: | |
407 | newincludes = oldincludes - removedincludes |
|
394 | newincludes = oldincludes - removedincludes | |
408 | newexcludes = oldexcludes | addedexcludes |
|
395 | newexcludes = oldexcludes | addedexcludes | |
409 | _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, |
|
396 | _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, | |
410 | newincludes, newexcludes, |
|
397 | newincludes, newexcludes, | |
411 | opts['force_delete_local_changes']) |
|
398 | opts['force_delete_local_changes']) | |
412 | # _narrow() updated the narrowspec and _widen() below needs to |
|
399 | # _narrow() updated the narrowspec and _widen() below needs to | |
413 | # use the updated values as its base (otherwise removed includes |
|
400 | # use the updated values as its base (otherwise removed includes | |
414 | # and addedexcludes will be lost in the resulting narrowspec) |
|
401 | # and addedexcludes will be lost in the resulting narrowspec) | |
415 | oldincludes = newincludes |
|
402 | oldincludes = newincludes | |
416 | oldexcludes = newexcludes |
|
403 | oldexcludes = newexcludes | |
417 |
|
404 | |||
418 | if widening: |
|
405 | if widening: | |
419 | newincludes = oldincludes | addedincludes |
|
406 | newincludes = oldincludes | addedincludes | |
420 | newexcludes = oldexcludes - removedexcludes |
|
407 | newexcludes = oldexcludes - removedexcludes | |
421 | _widen(ui, repo, remote, commoninc, newincludes, newexcludes) |
|
408 | _widen(ui, repo, remote, commoninc, newincludes, newexcludes) | |
422 |
|
409 | |||
423 | return 0 |
|
410 | return 0 |
@@ -1,1227 +1,1231 b'' | |||||
1 | # hg.py - repository classes for mercurial |
|
1 | # hg.py - repository classes for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | from __future__ import absolute_import |
|
9 | from __future__ import absolute_import | |
10 |
|
10 | |||
11 | import errno |
|
11 | import errno | |
12 | import hashlib |
|
12 | import hashlib | |
13 | import os |
|
13 | import os | |
14 | import shutil |
|
14 | import shutil | |
15 | import stat |
|
15 | import stat | |
16 |
|
16 | |||
17 | from .i18n import _ |
|
17 | from .i18n import _ | |
18 | from .node import ( |
|
18 | from .node import ( | |
19 | nullid, |
|
19 | nullid, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | from . import ( |
|
22 | from . import ( | |
23 | bookmarks, |
|
23 | bookmarks, | |
24 | bundlerepo, |
|
24 | bundlerepo, | |
25 | cacheutil, |
|
25 | cacheutil, | |
26 | cmdutil, |
|
26 | cmdutil, | |
27 | destutil, |
|
27 | destutil, | |
28 | discovery, |
|
28 | discovery, | |
29 | error, |
|
29 | error, | |
30 | exchange, |
|
30 | exchange, | |
31 | extensions, |
|
31 | extensions, | |
32 | httppeer, |
|
32 | httppeer, | |
33 | localrepo, |
|
33 | localrepo, | |
34 | lock, |
|
34 | lock, | |
35 | logcmdutil, |
|
35 | logcmdutil, | |
36 | logexchange, |
|
36 | logexchange, | |
37 | merge as mergemod, |
|
37 | merge as mergemod, | |
38 | narrowspec, |
|
38 | narrowspec, | |
39 | node, |
|
39 | node, | |
40 | phases, |
|
40 | phases, | |
41 | scmutil, |
|
41 | scmutil, | |
42 | sshpeer, |
|
42 | sshpeer, | |
43 | statichttprepo, |
|
43 | statichttprepo, | |
44 | ui as uimod, |
|
44 | ui as uimod, | |
45 | unionrepo, |
|
45 | unionrepo, | |
46 | url, |
|
46 | url, | |
47 | util, |
|
47 | util, | |
48 | verify as verifymod, |
|
48 | verify as verifymod, | |
49 | vfs as vfsmod, |
|
49 | vfs as vfsmod, | |
50 | ) |
|
50 | ) | |
51 |
|
51 | |||
52 | from .utils import ( |
|
52 | from .utils import ( | |
53 | stringutil, |
|
53 | stringutil, | |
54 | ) |
|
54 | ) | |
55 |
|
55 | |||
56 | release = lock.release |
|
56 | release = lock.release | |
57 |
|
57 | |||
58 | # shared features |
|
58 | # shared features | |
59 | sharedbookmarks = 'bookmarks' |
|
59 | sharedbookmarks = 'bookmarks' | |
60 |
|
60 | |||
61 | def _local(path): |
|
61 | def _local(path): | |
62 | path = util.expandpath(util.urllocalpath(path)) |
|
62 | path = util.expandpath(util.urllocalpath(path)) | |
63 | return (os.path.isfile(path) and bundlerepo or localrepo) |
|
63 | return (os.path.isfile(path) and bundlerepo or localrepo) | |
64 |
|
64 | |||
65 | def addbranchrevs(lrepo, other, branches, revs): |
|
65 | def addbranchrevs(lrepo, other, branches, revs): | |
66 | peer = other.peer() # a courtesy to callers using a localrepo for other |
|
66 | peer = other.peer() # a courtesy to callers using a localrepo for other | |
67 | hashbranch, branches = branches |
|
67 | hashbranch, branches = branches | |
68 | if not hashbranch and not branches: |
|
68 | if not hashbranch and not branches: | |
69 | x = revs or None |
|
69 | x = revs or None | |
70 | if revs: |
|
70 | if revs: | |
71 | y = revs[0] |
|
71 | y = revs[0] | |
72 | else: |
|
72 | else: | |
73 | y = None |
|
73 | y = None | |
74 | return x, y |
|
74 | return x, y | |
75 | if revs: |
|
75 | if revs: | |
76 | revs = list(revs) |
|
76 | revs = list(revs) | |
77 | else: |
|
77 | else: | |
78 | revs = [] |
|
78 | revs = [] | |
79 |
|
79 | |||
80 | if not peer.capable('branchmap'): |
|
80 | if not peer.capable('branchmap'): | |
81 | if branches: |
|
81 | if branches: | |
82 | raise error.Abort(_("remote branch lookup not supported")) |
|
82 | raise error.Abort(_("remote branch lookup not supported")) | |
83 | revs.append(hashbranch) |
|
83 | revs.append(hashbranch) | |
84 | return revs, revs[0] |
|
84 | return revs, revs[0] | |
85 |
|
85 | |||
86 | with peer.commandexecutor() as e: |
|
86 | with peer.commandexecutor() as e: | |
87 | branchmap = e.callcommand('branchmap', {}).result() |
|
87 | branchmap = e.callcommand('branchmap', {}).result() | |
88 |
|
88 | |||
89 | def primary(branch): |
|
89 | def primary(branch): | |
90 | if branch == '.': |
|
90 | if branch == '.': | |
91 | if not lrepo: |
|
91 | if not lrepo: | |
92 | raise error.Abort(_("dirstate branch not accessible")) |
|
92 | raise error.Abort(_("dirstate branch not accessible")) | |
93 | branch = lrepo.dirstate.branch() |
|
93 | branch = lrepo.dirstate.branch() | |
94 | if branch in branchmap: |
|
94 | if branch in branchmap: | |
95 | revs.extend(node.hex(r) for r in reversed(branchmap[branch])) |
|
95 | revs.extend(node.hex(r) for r in reversed(branchmap[branch])) | |
96 | return True |
|
96 | return True | |
97 | else: |
|
97 | else: | |
98 | return False |
|
98 | return False | |
99 |
|
99 | |||
100 | for branch in branches: |
|
100 | for branch in branches: | |
101 | if not primary(branch): |
|
101 | if not primary(branch): | |
102 | raise error.RepoLookupError(_("unknown branch '%s'") % branch) |
|
102 | raise error.RepoLookupError(_("unknown branch '%s'") % branch) | |
103 | if hashbranch: |
|
103 | if hashbranch: | |
104 | if not primary(hashbranch): |
|
104 | if not primary(hashbranch): | |
105 | revs.append(hashbranch) |
|
105 | revs.append(hashbranch) | |
106 | return revs, revs[0] |
|
106 | return revs, revs[0] | |
107 |
|
107 | |||
108 | def parseurl(path, branches=None): |
|
108 | def parseurl(path, branches=None): | |
109 | '''parse url#branch, returning (url, (branch, branches))''' |
|
109 | '''parse url#branch, returning (url, (branch, branches))''' | |
110 |
|
110 | |||
111 | u = util.url(path) |
|
111 | u = util.url(path) | |
112 | branch = None |
|
112 | branch = None | |
113 | if u.fragment: |
|
113 | if u.fragment: | |
114 | branch = u.fragment |
|
114 | branch = u.fragment | |
115 | u.fragment = None |
|
115 | u.fragment = None | |
116 | return bytes(u), (branch, branches or []) |
|
116 | return bytes(u), (branch, branches or []) | |
117 |
|
117 | |||
118 | schemes = { |
|
118 | schemes = { | |
119 | 'bundle': bundlerepo, |
|
119 | 'bundle': bundlerepo, | |
120 | 'union': unionrepo, |
|
120 | 'union': unionrepo, | |
121 | 'file': _local, |
|
121 | 'file': _local, | |
122 | 'http': httppeer, |
|
122 | 'http': httppeer, | |
123 | 'https': httppeer, |
|
123 | 'https': httppeer, | |
124 | 'ssh': sshpeer, |
|
124 | 'ssh': sshpeer, | |
125 | 'static-http': statichttprepo, |
|
125 | 'static-http': statichttprepo, | |
126 | } |
|
126 | } | |
127 |
|
127 | |||
128 | def _peerlookup(path): |
|
128 | def _peerlookup(path): | |
129 | u = util.url(path) |
|
129 | u = util.url(path) | |
130 | scheme = u.scheme or 'file' |
|
130 | scheme = u.scheme or 'file' | |
131 | thing = schemes.get(scheme) or schemes['file'] |
|
131 | thing = schemes.get(scheme) or schemes['file'] | |
132 | try: |
|
132 | try: | |
133 | return thing(path) |
|
133 | return thing(path) | |
134 | except TypeError: |
|
134 | except TypeError: | |
135 | # we can't test callable(thing) because 'thing' can be an unloaded |
|
135 | # we can't test callable(thing) because 'thing' can be an unloaded | |
136 | # module that implements __call__ |
|
136 | # module that implements __call__ | |
137 | if not util.safehasattr(thing, 'instance'): |
|
137 | if not util.safehasattr(thing, 'instance'): | |
138 | raise |
|
138 | raise | |
139 | return thing |
|
139 | return thing | |
140 |
|
140 | |||
141 | def islocal(repo): |
|
141 | def islocal(repo): | |
142 | '''return true if repo (or path pointing to repo) is local''' |
|
142 | '''return true if repo (or path pointing to repo) is local''' | |
143 | if isinstance(repo, bytes): |
|
143 | if isinstance(repo, bytes): | |
144 | try: |
|
144 | try: | |
145 | return _peerlookup(repo).islocal(repo) |
|
145 | return _peerlookup(repo).islocal(repo) | |
146 | except AttributeError: |
|
146 | except AttributeError: | |
147 | return False |
|
147 | return False | |
148 | return repo.local() |
|
148 | return repo.local() | |
149 |
|
149 | |||
150 | def openpath(ui, path): |
|
150 | def openpath(ui, path): | |
151 | '''open path with open if local, url.open if remote''' |
|
151 | '''open path with open if local, url.open if remote''' | |
152 | pathurl = util.url(path, parsequery=False, parsefragment=False) |
|
152 | pathurl = util.url(path, parsequery=False, parsefragment=False) | |
153 | if pathurl.islocal(): |
|
153 | if pathurl.islocal(): | |
154 | return util.posixfile(pathurl.localpath(), 'rb') |
|
154 | return util.posixfile(pathurl.localpath(), 'rb') | |
155 | else: |
|
155 | else: | |
156 | return url.open(ui, path) |
|
156 | return url.open(ui, path) | |
157 |
|
157 | |||
158 | # a list of (ui, repo) functions called for wire peer initialization |
|
158 | # a list of (ui, repo) functions called for wire peer initialization | |
159 | wirepeersetupfuncs = [] |
|
159 | wirepeersetupfuncs = [] | |
160 |
|
160 | |||
161 | def _peerorrepo(ui, path, create=False, presetupfuncs=None, |
|
161 | def _peerorrepo(ui, path, create=False, presetupfuncs=None, | |
162 | intents=None, createopts=None): |
|
162 | intents=None, createopts=None): | |
163 | """return a repository object for the specified path""" |
|
163 | """return a repository object for the specified path""" | |
164 | obj = _peerlookup(path).instance(ui, path, create, intents=intents, |
|
164 | obj = _peerlookup(path).instance(ui, path, create, intents=intents, | |
165 | createopts=createopts) |
|
165 | createopts=createopts) | |
166 | ui = getattr(obj, "ui", ui) |
|
166 | ui = getattr(obj, "ui", ui) | |
167 | if ui.configbool('devel', 'debug.extensions'): |
|
167 | if ui.configbool('devel', 'debug.extensions'): | |
168 | log = lambda msg, *values: ui.debug('debug.extensions: ', |
|
168 | log = lambda msg, *values: ui.debug('debug.extensions: ', | |
169 | msg % values, label='debug.extensions') |
|
169 | msg % values, label='debug.extensions') | |
170 | else: |
|
170 | else: | |
171 | log = lambda *a, **kw: None |
|
171 | log = lambda *a, **kw: None | |
172 | for f in presetupfuncs or []: |
|
172 | for f in presetupfuncs or []: | |
173 | f(ui, obj) |
|
173 | f(ui, obj) | |
174 | log('- executing reposetup hooks\n') |
|
174 | log('- executing reposetup hooks\n') | |
175 | with util.timedcm('all reposetup') as allreposetupstats: |
|
175 | with util.timedcm('all reposetup') as allreposetupstats: | |
176 | for name, module in extensions.extensions(ui): |
|
176 | for name, module in extensions.extensions(ui): | |
177 | log(' - running reposetup for %s\n' % (name,)) |
|
177 | log(' - running reposetup for %s\n' % (name,)) | |
178 | hook = getattr(module, 'reposetup', None) |
|
178 | hook = getattr(module, 'reposetup', None) | |
179 | if hook: |
|
179 | if hook: | |
180 | with util.timedcm('reposetup %r', name) as stats: |
|
180 | with util.timedcm('reposetup %r', name) as stats: | |
181 | hook(ui, obj) |
|
181 | hook(ui, obj) | |
182 | log(' > reposetup for %r took %s\n', name, stats) |
|
182 | log(' > reposetup for %r took %s\n', name, stats) | |
183 | log('> all reposetup took %s\n', allreposetupstats) |
|
183 | log('> all reposetup took %s\n', allreposetupstats) | |
184 | if not obj.local(): |
|
184 | if not obj.local(): | |
185 | for f in wirepeersetupfuncs: |
|
185 | for f in wirepeersetupfuncs: | |
186 | f(ui, obj) |
|
186 | f(ui, obj) | |
187 | return obj |
|
187 | return obj | |
188 |
|
188 | |||
189 | def repository(ui, path='', create=False, presetupfuncs=None, intents=None, |
|
189 | def repository(ui, path='', create=False, presetupfuncs=None, intents=None, | |
190 | createopts=None): |
|
190 | createopts=None): | |
191 | """return a repository object for the specified path""" |
|
191 | """return a repository object for the specified path""" | |
192 | peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs, |
|
192 | peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs, | |
193 | intents=intents, createopts=createopts) |
|
193 | intents=intents, createopts=createopts) | |
194 | repo = peer.local() |
|
194 | repo = peer.local() | |
195 | if not repo: |
|
195 | if not repo: | |
196 | raise error.Abort(_("repository '%s' is not local") % |
|
196 | raise error.Abort(_("repository '%s' is not local") % | |
197 | (path or peer.url())) |
|
197 | (path or peer.url())) | |
198 | return repo.filtered('visible') |
|
198 | return repo.filtered('visible') | |
199 |
|
199 | |||
200 | def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): |
|
200 | def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): | |
201 | '''return a repository peer for the specified path''' |
|
201 | '''return a repository peer for the specified path''' | |
202 | rui = remoteui(uiorrepo, opts) |
|
202 | rui = remoteui(uiorrepo, opts) | |
203 | return _peerorrepo(rui, path, create, intents=intents, |
|
203 | return _peerorrepo(rui, path, create, intents=intents, | |
204 | createopts=createopts).peer() |
|
204 | createopts=createopts).peer() | |
205 |
|
205 | |||
206 | def defaultdest(source): |
|
206 | def defaultdest(source): | |
207 | '''return default destination of clone if none is given |
|
207 | '''return default destination of clone if none is given | |
208 |
|
208 | |||
209 | >>> defaultdest(b'foo') |
|
209 | >>> defaultdest(b'foo') | |
210 | 'foo' |
|
210 | 'foo' | |
211 | >>> defaultdest(b'/foo/bar') |
|
211 | >>> defaultdest(b'/foo/bar') | |
212 | 'bar' |
|
212 | 'bar' | |
213 | >>> defaultdest(b'/') |
|
213 | >>> defaultdest(b'/') | |
214 | '' |
|
214 | '' | |
215 | >>> defaultdest(b'') |
|
215 | >>> defaultdest(b'') | |
216 | '' |
|
216 | '' | |
217 | >>> defaultdest(b'http://example.org/') |
|
217 | >>> defaultdest(b'http://example.org/') | |
218 | '' |
|
218 | '' | |
219 | >>> defaultdest(b'http://example.org/foo/') |
|
219 | >>> defaultdest(b'http://example.org/foo/') | |
220 | 'foo' |
|
220 | 'foo' | |
221 | ''' |
|
221 | ''' | |
222 | path = util.url(source).path |
|
222 | path = util.url(source).path | |
223 | if not path: |
|
223 | if not path: | |
224 | return '' |
|
224 | return '' | |
225 | return os.path.basename(os.path.normpath(path)) |
|
225 | return os.path.basename(os.path.normpath(path)) | |
226 |
|
226 | |||
227 | def sharedreposource(repo): |
|
227 | def sharedreposource(repo): | |
228 | """Returns repository object for source repository of a shared repo. |
|
228 | """Returns repository object for source repository of a shared repo. | |
229 |
|
229 | |||
230 | If repo is not a shared repository, returns None. |
|
230 | If repo is not a shared repository, returns None. | |
231 | """ |
|
231 | """ | |
232 | if repo.sharedpath == repo.path: |
|
232 | if repo.sharedpath == repo.path: | |
233 | return None |
|
233 | return None | |
234 |
|
234 | |||
235 | if util.safehasattr(repo, 'srcrepo') and repo.srcrepo: |
|
235 | if util.safehasattr(repo, 'srcrepo') and repo.srcrepo: | |
236 | return repo.srcrepo |
|
236 | return repo.srcrepo | |
237 |
|
237 | |||
238 | # the sharedpath always ends in the .hg; we want the path to the repo |
|
238 | # the sharedpath always ends in the .hg; we want the path to the repo | |
239 | source = repo.vfs.split(repo.sharedpath)[0] |
|
239 | source = repo.vfs.split(repo.sharedpath)[0] | |
240 | srcurl, branches = parseurl(source) |
|
240 | srcurl, branches = parseurl(source) | |
241 | srcrepo = repository(repo.ui, srcurl) |
|
241 | srcrepo = repository(repo.ui, srcurl) | |
242 | repo.srcrepo = srcrepo |
|
242 | repo.srcrepo = srcrepo | |
243 | return srcrepo |
|
243 | return srcrepo | |
244 |
|
244 | |||
245 | def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None, |
|
245 | def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None, | |
246 | relative=False): |
|
246 | relative=False): | |
247 | '''create a shared repository''' |
|
247 | '''create a shared repository''' | |
248 |
|
248 | |||
249 | if not islocal(source): |
|
249 | if not islocal(source): | |
250 | raise error.Abort(_('can only share local repositories')) |
|
250 | raise error.Abort(_('can only share local repositories')) | |
251 |
|
251 | |||
252 | if not dest: |
|
252 | if not dest: | |
253 | dest = defaultdest(source) |
|
253 | dest = defaultdest(source) | |
254 | else: |
|
254 | else: | |
255 | dest = ui.expandpath(dest) |
|
255 | dest = ui.expandpath(dest) | |
256 |
|
256 | |||
257 | if isinstance(source, bytes): |
|
257 | if isinstance(source, bytes): | |
258 | origsource = ui.expandpath(source) |
|
258 | origsource = ui.expandpath(source) | |
259 | source, branches = parseurl(origsource) |
|
259 | source, branches = parseurl(origsource) | |
260 | srcrepo = repository(ui, source) |
|
260 | srcrepo = repository(ui, source) | |
261 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) |
|
261 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) | |
262 | else: |
|
262 | else: | |
263 | srcrepo = source.local() |
|
263 | srcrepo = source.local() | |
264 | origsource = source = srcrepo.url() |
|
264 | origsource = source = srcrepo.url() | |
265 | checkout = None |
|
265 | checkout = None | |
266 |
|
266 | |||
267 | sharedpath = srcrepo.sharedpath # if our source is already sharing |
|
267 | sharedpath = srcrepo.sharedpath # if our source is already sharing | |
268 |
|
268 | |||
269 | destwvfs = vfsmod.vfs(dest, realpath=True) |
|
269 | destwvfs = vfsmod.vfs(dest, realpath=True) | |
270 | destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True) |
|
270 | destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True) | |
271 |
|
271 | |||
272 | if destvfs.lexists(): |
|
272 | if destvfs.lexists(): | |
273 | raise error.Abort(_('destination already exists')) |
|
273 | raise error.Abort(_('destination already exists')) | |
274 |
|
274 | |||
275 | if not destwvfs.isdir(): |
|
275 | if not destwvfs.isdir(): | |
276 | destwvfs.makedirs() |
|
276 | destwvfs.makedirs() | |
277 | destvfs.makedir() |
|
277 | destvfs.makedir() | |
278 |
|
278 | |||
279 | requirements = '' |
|
279 | requirements = '' | |
280 | try: |
|
280 | try: | |
281 | requirements = srcrepo.vfs.read('requires') |
|
281 | requirements = srcrepo.vfs.read('requires') | |
282 | except IOError as inst: |
|
282 | except IOError as inst: | |
283 | if inst.errno != errno.ENOENT: |
|
283 | if inst.errno != errno.ENOENT: | |
284 | raise |
|
284 | raise | |
285 |
|
285 | |||
286 | if relative: |
|
286 | if relative: | |
287 | try: |
|
287 | try: | |
288 | sharedpath = os.path.relpath(sharedpath, destvfs.base) |
|
288 | sharedpath = os.path.relpath(sharedpath, destvfs.base) | |
289 | requirements += 'relshared\n' |
|
289 | requirements += 'relshared\n' | |
290 | except (IOError, ValueError) as e: |
|
290 | except (IOError, ValueError) as e: | |
291 | # ValueError is raised on Windows if the drive letters differ on |
|
291 | # ValueError is raised on Windows if the drive letters differ on | |
292 | # each path |
|
292 | # each path | |
293 | raise error.Abort(_('cannot calculate relative path'), |
|
293 | raise error.Abort(_('cannot calculate relative path'), | |
294 | hint=stringutil.forcebytestr(e)) |
|
294 | hint=stringutil.forcebytestr(e)) | |
295 | else: |
|
295 | else: | |
296 | requirements += 'shared\n' |
|
296 | requirements += 'shared\n' | |
297 |
|
297 | |||
298 | destvfs.write('requires', requirements) |
|
298 | destvfs.write('requires', requirements) | |
299 | destvfs.write('sharedpath', sharedpath) |
|
299 | destvfs.write('sharedpath', sharedpath) | |
300 |
|
300 | |||
301 | r = repository(ui, destwvfs.base) |
|
301 | r = repository(ui, destwvfs.base) | |
302 | postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath) |
|
302 | postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath) | |
303 | _postshareupdate(r, update, checkout=checkout) |
|
303 | _postshareupdate(r, update, checkout=checkout) | |
304 | return r |
|
304 | return r | |
305 |
|
305 | |||
306 | def unshare(ui, repo): |
|
306 | def unshare(ui, repo): | |
307 | """convert a shared repository to a normal one |
|
307 | """convert a shared repository to a normal one | |
308 |
|
308 | |||
309 | Copy the store data to the repo and remove the sharedpath data. |
|
309 | Copy the store data to the repo and remove the sharedpath data. | |
310 | """ |
|
310 | """ | |
311 |
|
311 | |||
312 | destlock = lock = None |
|
312 | destlock = lock = None | |
313 | lock = repo.lock() |
|
313 | lock = repo.lock() | |
314 | try: |
|
314 | try: | |
315 | # we use locks here because if we race with commit, we |
|
315 | # we use locks here because if we race with commit, we | |
316 | # can end up with extra data in the cloned revlogs that's |
|
316 | # can end up with extra data in the cloned revlogs that's | |
317 | # not pointed to by changesets, thus causing verify to |
|
317 | # not pointed to by changesets, thus causing verify to | |
318 | # fail |
|
318 | # fail | |
319 |
|
319 | |||
320 | destlock = copystore(ui, repo, repo.path) |
|
320 | destlock = copystore(ui, repo, repo.path) | |
321 |
|
321 | |||
322 | sharefile = repo.vfs.join('sharedpath') |
|
322 | sharefile = repo.vfs.join('sharedpath') | |
323 | util.rename(sharefile, sharefile + '.old') |
|
323 | util.rename(sharefile, sharefile + '.old') | |
324 |
|
324 | |||
325 | repo.requirements.discard('shared') |
|
325 | repo.requirements.discard('shared') | |
326 | repo.requirements.discard('relshared') |
|
326 | repo.requirements.discard('relshared') | |
327 | repo._writerequirements() |
|
327 | repo._writerequirements() | |
328 | finally: |
|
328 | finally: | |
329 | destlock and destlock.release() |
|
329 | destlock and destlock.release() | |
330 | lock and lock.release() |
|
330 | lock and lock.release() | |
331 |
|
331 | |||
332 | # update store, spath, svfs and sjoin of repo |
|
332 | # update store, spath, svfs and sjoin of repo | |
333 | repo.unfiltered().__init__(repo.baseui, repo.root) |
|
333 | repo.unfiltered().__init__(repo.baseui, repo.root) | |
334 |
|
334 | |||
335 | # TODO: figure out how to access subrepos that exist, but were previously |
|
335 | # TODO: figure out how to access subrepos that exist, but were previously | |
336 | # removed from .hgsub |
|
336 | # removed from .hgsub | |
337 | c = repo['.'] |
|
337 | c = repo['.'] | |
338 | subs = c.substate |
|
338 | subs = c.substate | |
339 | for s in sorted(subs): |
|
339 | for s in sorted(subs): | |
340 | c.sub(s).unshare() |
|
340 | c.sub(s).unshare() | |
341 |
|
341 | |||
342 | def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None): |
|
342 | def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None): | |
343 | """Called after a new shared repo is created. |
|
343 | """Called after a new shared repo is created. | |
344 |
|
344 | |||
345 | The new repo only has a requirements file and pointer to the source. |
|
345 | The new repo only has a requirements file and pointer to the source. | |
346 | This function configures additional shared data. |
|
346 | This function configures additional shared data. | |
347 |
|
347 | |||
348 | Extensions can wrap this function and write additional entries to |
|
348 | Extensions can wrap this function and write additional entries to | |
349 | destrepo/.hg/shared to indicate additional pieces of data to be shared. |
|
349 | destrepo/.hg/shared to indicate additional pieces of data to be shared. | |
350 | """ |
|
350 | """ | |
351 | default = defaultpath or sourcerepo.ui.config('paths', 'default') |
|
351 | default = defaultpath or sourcerepo.ui.config('paths', 'default') | |
352 | if default: |
|
352 | if default: | |
353 | template = ('[paths]\n' |
|
353 | template = ('[paths]\n' | |
354 | 'default = %s\n') |
|
354 | 'default = %s\n') | |
355 | destrepo.vfs.write('hgrc', util.tonativeeol(template % default)) |
|
355 | destrepo.vfs.write('hgrc', util.tonativeeol(template % default)) | |
356 |
|
356 | |||
357 | with destrepo.wlock(): |
|
357 | with destrepo.wlock(): | |
358 | if bookmarks: |
|
358 | if bookmarks: | |
359 | destrepo.vfs.write('shared', sharedbookmarks + '\n') |
|
359 | destrepo.vfs.write('shared', sharedbookmarks + '\n') | |
360 |
|
360 | |||
361 | def _postshareupdate(repo, update, checkout=None): |
|
361 | def _postshareupdate(repo, update, checkout=None): | |
362 | """Maybe perform a working directory update after a shared repo is created. |
|
362 | """Maybe perform a working directory update after a shared repo is created. | |
363 |
|
363 | |||
364 | ``update`` can be a boolean or a revision to update to. |
|
364 | ``update`` can be a boolean or a revision to update to. | |
365 | """ |
|
365 | """ | |
366 | if not update: |
|
366 | if not update: | |
367 | return |
|
367 | return | |
368 |
|
368 | |||
369 | repo.ui.status(_("updating working directory\n")) |
|
369 | repo.ui.status(_("updating working directory\n")) | |
370 | if update is not True: |
|
370 | if update is not True: | |
371 | checkout = update |
|
371 | checkout = update | |
372 | for test in (checkout, 'default', 'tip'): |
|
372 | for test in (checkout, 'default', 'tip'): | |
373 | if test is None: |
|
373 | if test is None: | |
374 | continue |
|
374 | continue | |
375 | try: |
|
375 | try: | |
376 | uprev = repo.lookup(test) |
|
376 | uprev = repo.lookup(test) | |
377 | break |
|
377 | break | |
378 | except error.RepoLookupError: |
|
378 | except error.RepoLookupError: | |
379 | continue |
|
379 | continue | |
380 | _update(repo, uprev) |
|
380 | _update(repo, uprev) | |
381 |
|
381 | |||
382 | def copystore(ui, srcrepo, destpath): |
|
382 | def copystore(ui, srcrepo, destpath): | |
383 | '''copy files from store of srcrepo in destpath |
|
383 | '''copy files from store of srcrepo in destpath | |
384 |
|
384 | |||
385 | returns destlock |
|
385 | returns destlock | |
386 | ''' |
|
386 | ''' | |
387 | destlock = None |
|
387 | destlock = None | |
388 | try: |
|
388 | try: | |
389 | hardlink = None |
|
389 | hardlink = None | |
390 | topic = _('linking') if hardlink else _('copying') |
|
390 | topic = _('linking') if hardlink else _('copying') | |
391 | with ui.makeprogress(topic) as progress: |
|
391 | with ui.makeprogress(topic) as progress: | |
392 | num = 0 |
|
392 | num = 0 | |
393 | srcpublishing = srcrepo.publishing() |
|
393 | srcpublishing = srcrepo.publishing() | |
394 | srcvfs = vfsmod.vfs(srcrepo.sharedpath) |
|
394 | srcvfs = vfsmod.vfs(srcrepo.sharedpath) | |
395 | dstvfs = vfsmod.vfs(destpath) |
|
395 | dstvfs = vfsmod.vfs(destpath) | |
396 | for f in srcrepo.store.copylist(): |
|
396 | for f in srcrepo.store.copylist(): | |
397 | if srcpublishing and f.endswith('phaseroots'): |
|
397 | if srcpublishing and f.endswith('phaseroots'): | |
398 | continue |
|
398 | continue | |
399 | dstbase = os.path.dirname(f) |
|
399 | dstbase = os.path.dirname(f) | |
400 | if dstbase and not dstvfs.exists(dstbase): |
|
400 | if dstbase and not dstvfs.exists(dstbase): | |
401 | dstvfs.mkdir(dstbase) |
|
401 | dstvfs.mkdir(dstbase) | |
402 | if srcvfs.exists(f): |
|
402 | if srcvfs.exists(f): | |
403 | if f.endswith('data'): |
|
403 | if f.endswith('data'): | |
404 | # 'dstbase' may be empty (e.g. revlog format 0) |
|
404 | # 'dstbase' may be empty (e.g. revlog format 0) | |
405 | lockfile = os.path.join(dstbase, "lock") |
|
405 | lockfile = os.path.join(dstbase, "lock") | |
406 | # lock to avoid premature writing to the target |
|
406 | # lock to avoid premature writing to the target | |
407 | destlock = lock.lock(dstvfs, lockfile) |
|
407 | destlock = lock.lock(dstvfs, lockfile) | |
408 | hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f), |
|
408 | hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f), | |
409 | hardlink, progress) |
|
409 | hardlink, progress) | |
410 | num += n |
|
410 | num += n | |
411 | if hardlink: |
|
411 | if hardlink: | |
412 | ui.debug("linked %d files\n" % num) |
|
412 | ui.debug("linked %d files\n" % num) | |
413 | else: |
|
413 | else: | |
414 | ui.debug("copied %d files\n" % num) |
|
414 | ui.debug("copied %d files\n" % num) | |
415 | return destlock |
|
415 | return destlock | |
416 | except: # re-raises |
|
416 | except: # re-raises | |
417 | release(destlock) |
|
417 | release(destlock) | |
418 | raise |
|
418 | raise | |
419 |
|
419 | |||
420 | def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False, |
|
420 | def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False, | |
421 | rev=None, update=True, stream=False): |
|
421 | rev=None, update=True, stream=False): | |
422 | """Perform a clone using a shared repo. |
|
422 | """Perform a clone using a shared repo. | |
423 |
|
423 | |||
424 | The store for the repository will be located at <sharepath>/.hg. The |
|
424 | The store for the repository will be located at <sharepath>/.hg. The | |
425 | specified revisions will be cloned or pulled from "source". A shared repo |
|
425 | specified revisions will be cloned or pulled from "source". A shared repo | |
426 | will be created at "dest" and a working copy will be created if "update" is |
|
426 | will be created at "dest" and a working copy will be created if "update" is | |
427 | True. |
|
427 | True. | |
428 | """ |
|
428 | """ | |
429 | revs = None |
|
429 | revs = None | |
430 | if rev: |
|
430 | if rev: | |
431 | if not srcpeer.capable('lookup'): |
|
431 | if not srcpeer.capable('lookup'): | |
432 | raise error.Abort(_("src repository does not support " |
|
432 | raise error.Abort(_("src repository does not support " | |
433 | "revision lookup and so doesn't " |
|
433 | "revision lookup and so doesn't " | |
434 | "support clone by revision")) |
|
434 | "support clone by revision")) | |
435 |
|
435 | |||
436 | # TODO this is batchable. |
|
436 | # TODO this is batchable. | |
437 | remoterevs = [] |
|
437 | remoterevs = [] | |
438 | for r in rev: |
|
438 | for r in rev: | |
439 | with srcpeer.commandexecutor() as e: |
|
439 | with srcpeer.commandexecutor() as e: | |
440 | remoterevs.append(e.callcommand('lookup', { |
|
440 | remoterevs.append(e.callcommand('lookup', { | |
441 | 'key': r, |
|
441 | 'key': r, | |
442 | }).result()) |
|
442 | }).result()) | |
443 | revs = remoterevs |
|
443 | revs = remoterevs | |
444 |
|
444 | |||
445 | # Obtain a lock before checking for or cloning the pooled repo otherwise |
|
445 | # Obtain a lock before checking for or cloning the pooled repo otherwise | |
446 | # 2 clients may race creating or populating it. |
|
446 | # 2 clients may race creating or populating it. | |
447 | pooldir = os.path.dirname(sharepath) |
|
447 | pooldir = os.path.dirname(sharepath) | |
448 | # lock class requires the directory to exist. |
|
448 | # lock class requires the directory to exist. | |
449 | try: |
|
449 | try: | |
450 | util.makedir(pooldir, False) |
|
450 | util.makedir(pooldir, False) | |
451 | except OSError as e: |
|
451 | except OSError as e: | |
452 | if e.errno != errno.EEXIST: |
|
452 | if e.errno != errno.EEXIST: | |
453 | raise |
|
453 | raise | |
454 |
|
454 | |||
455 | poolvfs = vfsmod.vfs(pooldir) |
|
455 | poolvfs = vfsmod.vfs(pooldir) | |
456 | basename = os.path.basename(sharepath) |
|
456 | basename = os.path.basename(sharepath) | |
457 |
|
457 | |||
458 | with lock.lock(poolvfs, '%s.lock' % basename): |
|
458 | with lock.lock(poolvfs, '%s.lock' % basename): | |
459 | if os.path.exists(sharepath): |
|
459 | if os.path.exists(sharepath): | |
460 | ui.status(_('(sharing from existing pooled repository %s)\n') % |
|
460 | ui.status(_('(sharing from existing pooled repository %s)\n') % | |
461 | basename) |
|
461 | basename) | |
462 | else: |
|
462 | else: | |
463 | ui.status(_('(sharing from new pooled repository %s)\n') % basename) |
|
463 | ui.status(_('(sharing from new pooled repository %s)\n') % basename) | |
464 | # Always use pull mode because hardlinks in share mode don't work |
|
464 | # Always use pull mode because hardlinks in share mode don't work | |
465 | # well. Never update because working copies aren't necessary in |
|
465 | # well. Never update because working copies aren't necessary in | |
466 | # share mode. |
|
466 | # share mode. | |
467 | clone(ui, peeropts, source, dest=sharepath, pull=True, |
|
467 | clone(ui, peeropts, source, dest=sharepath, pull=True, | |
468 | revs=rev, update=False, stream=stream) |
|
468 | revs=rev, update=False, stream=stream) | |
469 |
|
469 | |||
470 | # Resolve the value to put in [paths] section for the source. |
|
470 | # Resolve the value to put in [paths] section for the source. | |
471 | if islocal(source): |
|
471 | if islocal(source): | |
472 | defaultpath = os.path.abspath(util.urllocalpath(source)) |
|
472 | defaultpath = os.path.abspath(util.urllocalpath(source)) | |
473 | else: |
|
473 | else: | |
474 | defaultpath = source |
|
474 | defaultpath = source | |
475 |
|
475 | |||
476 | sharerepo = repository(ui, path=sharepath) |
|
476 | sharerepo = repository(ui, path=sharepath) | |
477 | share(ui, sharerepo, dest=dest, update=False, bookmarks=False, |
|
477 | share(ui, sharerepo, dest=dest, update=False, bookmarks=False, | |
478 | defaultpath=defaultpath) |
|
478 | defaultpath=defaultpath) | |
479 |
|
479 | |||
480 | # We need to perform a pull against the dest repo to fetch bookmarks |
|
480 | # We need to perform a pull against the dest repo to fetch bookmarks | |
481 | # and other non-store data that isn't shared by default. In the case of |
|
481 | # and other non-store data that isn't shared by default. In the case of | |
482 | # non-existing shared repo, this means we pull from the remote twice. This |
|
482 | # non-existing shared repo, this means we pull from the remote twice. This | |
483 | # is a bit weird. But at the time it was implemented, there wasn't an easy |
|
483 | # is a bit weird. But at the time it was implemented, there wasn't an easy | |
484 | # way to pull just non-changegroup data. |
|
484 | # way to pull just non-changegroup data. | |
485 | destrepo = repository(ui, path=dest) |
|
485 | destrepo = repository(ui, path=dest) | |
486 | exchange.pull(destrepo, srcpeer, heads=revs) |
|
486 | exchange.pull(destrepo, srcpeer, heads=revs) | |
487 |
|
487 | |||
488 | _postshareupdate(destrepo, update) |
|
488 | _postshareupdate(destrepo, update) | |
489 |
|
489 | |||
490 | return srcpeer, peer(ui, peeropts, dest) |
|
490 | return srcpeer, peer(ui, peeropts, dest) | |
491 |
|
491 | |||
492 | # Recomputing branch cache might be slow on big repos, |
|
492 | # Recomputing branch cache might be slow on big repos, | |
493 | # so just copy it |
|
493 | # so just copy it | |
494 | def _copycache(srcrepo, dstcachedir, fname): |
|
494 | def _copycache(srcrepo, dstcachedir, fname): | |
495 | """copy a cache from srcrepo to destcachedir (if it exists)""" |
|
495 | """copy a cache from srcrepo to destcachedir (if it exists)""" | |
496 | srcbranchcache = srcrepo.vfs.join('cache/%s' % fname) |
|
496 | srcbranchcache = srcrepo.vfs.join('cache/%s' % fname) | |
497 | dstbranchcache = os.path.join(dstcachedir, fname) |
|
497 | dstbranchcache = os.path.join(dstcachedir, fname) | |
498 | if os.path.exists(srcbranchcache): |
|
498 | if os.path.exists(srcbranchcache): | |
499 | if not os.path.exists(dstcachedir): |
|
499 | if not os.path.exists(dstcachedir): | |
500 | os.mkdir(dstcachedir) |
|
500 | os.mkdir(dstcachedir) | |
501 | util.copyfile(srcbranchcache, dstbranchcache) |
|
501 | util.copyfile(srcbranchcache, dstbranchcache) | |
502 |
|
502 | |||
503 | def clone(ui, peeropts, source, dest=None, pull=False, revs=None, |
|
503 | def clone(ui, peeropts, source, dest=None, pull=False, revs=None, | |
504 | update=True, stream=False, branch=None, shareopts=None, |
|
504 | update=True, stream=False, branch=None, shareopts=None, | |
505 | storeincludepats=None, storeexcludepats=None): |
|
505 | storeincludepats=None, storeexcludepats=None): | |
506 | """Make a copy of an existing repository. |
|
506 | """Make a copy of an existing repository. | |
507 |
|
507 | |||
508 | Create a copy of an existing repository in a new directory. The |
|
508 | Create a copy of an existing repository in a new directory. The | |
509 | source and destination are URLs, as passed to the repository |
|
509 | source and destination are URLs, as passed to the repository | |
510 | function. Returns a pair of repository peers, the source and |
|
510 | function. Returns a pair of repository peers, the source and | |
511 | newly created destination. |
|
511 | newly created destination. | |
512 |
|
512 | |||
513 | The location of the source is added to the new repository's |
|
513 | The location of the source is added to the new repository's | |
514 | .hg/hgrc file, as the default to be used for future pulls and |
|
514 | .hg/hgrc file, as the default to be used for future pulls and | |
515 | pushes. |
|
515 | pushes. | |
516 |
|
516 | |||
517 | If an exception is raised, the partly cloned/updated destination |
|
517 | If an exception is raised, the partly cloned/updated destination | |
518 | repository will be deleted. |
|
518 | repository will be deleted. | |
519 |
|
519 | |||
520 | Arguments: |
|
520 | Arguments: | |
521 |
|
521 | |||
522 | source: repository object or URL |
|
522 | source: repository object or URL | |
523 |
|
523 | |||
524 | dest: URL of destination repository to create (defaults to base |
|
524 | dest: URL of destination repository to create (defaults to base | |
525 | name of source repository) |
|
525 | name of source repository) | |
526 |
|
526 | |||
527 | pull: always pull from source repository, even in local case or if the |
|
527 | pull: always pull from source repository, even in local case or if the | |
528 | server prefers streaming |
|
528 | server prefers streaming | |
529 |
|
529 | |||
530 | stream: stream raw data uncompressed from repository (fast over |
|
530 | stream: stream raw data uncompressed from repository (fast over | |
531 | LAN, slow over WAN) |
|
531 | LAN, slow over WAN) | |
532 |
|
532 | |||
533 | revs: revision to clone up to (implies pull=True) |
|
533 | revs: revision to clone up to (implies pull=True) | |
534 |
|
534 | |||
535 | update: update working directory after clone completes, if |
|
535 | update: update working directory after clone completes, if | |
536 | destination is local repository (True means update to default rev, |
|
536 | destination is local repository (True means update to default rev, | |
537 | anything else is treated as a revision) |
|
537 | anything else is treated as a revision) | |
538 |
|
538 | |||
539 | branch: branches to clone |
|
539 | branch: branches to clone | |
540 |
|
540 | |||
541 | shareopts: dict of options to control auto sharing behavior. The "pool" key |
|
541 | shareopts: dict of options to control auto sharing behavior. The "pool" key | |
542 | activates auto sharing mode and defines the directory for stores. The |
|
542 | activates auto sharing mode and defines the directory for stores. The | |
543 | "mode" key determines how to construct the directory name of the shared |
|
543 | "mode" key determines how to construct the directory name of the shared | |
544 | repository. "identity" means the name is derived from the node of the first |
|
544 | repository. "identity" means the name is derived from the node of the first | |
545 | changeset in the repository. "remote" means the name is derived from the |
|
545 | changeset in the repository. "remote" means the name is derived from the | |
546 | remote's path/URL. Defaults to "identity." |
|
546 | remote's path/URL. Defaults to "identity." | |
547 |
|
547 | |||
548 | storeincludepats and storeexcludepats: sets of file patterns to include and |
|
548 | storeincludepats and storeexcludepats: sets of file patterns to include and | |
549 | exclude in the repository copy, respectively. If not defined, all files |
|
549 | exclude in the repository copy, respectively. If not defined, all files | |
550 | will be included (a "full" clone). Otherwise a "narrow" clone containing |
|
550 | will be included (a "full" clone). Otherwise a "narrow" clone containing | |
551 | only the requested files will be performed. If ``storeincludepats`` is not |
|
551 | only the requested files will be performed. If ``storeincludepats`` is not | |
552 | defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be |
|
552 | defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be | |
553 | ``path:.``. If both are empty sets, no files will be cloned. |
|
553 | ``path:.``. If both are empty sets, no files will be cloned. | |
554 | """ |
|
554 | """ | |
555 |
|
555 | |||
556 | if isinstance(source, bytes): |
|
556 | if isinstance(source, bytes): | |
557 | origsource = ui.expandpath(source) |
|
557 | origsource = ui.expandpath(source) | |
558 | source, branches = parseurl(origsource, branch) |
|
558 | source, branches = parseurl(origsource, branch) | |
559 | srcpeer = peer(ui, peeropts, source) |
|
559 | srcpeer = peer(ui, peeropts, source) | |
560 | else: |
|
560 | else: | |
561 | srcpeer = source.peer() # in case we were called with a localrepo |
|
561 | srcpeer = source.peer() # in case we were called with a localrepo | |
562 | branches = (None, branch or []) |
|
562 | branches = (None, branch or []) | |
563 | origsource = source = srcpeer.url() |
|
563 | origsource = source = srcpeer.url() | |
564 | revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) |
|
564 | revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) | |
565 |
|
565 | |||
566 | if dest is None: |
|
566 | if dest is None: | |
567 | dest = defaultdest(source) |
|
567 | dest = defaultdest(source) | |
568 | if dest: |
|
568 | if dest: | |
569 | ui.status(_("destination directory: %s\n") % dest) |
|
569 | ui.status(_("destination directory: %s\n") % dest) | |
570 | else: |
|
570 | else: | |
571 | dest = ui.expandpath(dest) |
|
571 | dest = ui.expandpath(dest) | |
572 |
|
572 | |||
573 | dest = util.urllocalpath(dest) |
|
573 | dest = util.urllocalpath(dest) | |
574 | source = util.urllocalpath(source) |
|
574 | source = util.urllocalpath(source) | |
575 |
|
575 | |||
576 | if not dest: |
|
576 | if not dest: | |
577 | raise error.Abort(_("empty destination path is not valid")) |
|
577 | raise error.Abort(_("empty destination path is not valid")) | |
578 |
|
578 | |||
579 | destvfs = vfsmod.vfs(dest, expandpath=True) |
|
579 | destvfs = vfsmod.vfs(dest, expandpath=True) | |
580 | if destvfs.lexists(): |
|
580 | if destvfs.lexists(): | |
581 | if not destvfs.isdir(): |
|
581 | if not destvfs.isdir(): | |
582 | raise error.Abort(_("destination '%s' already exists") % dest) |
|
582 | raise error.Abort(_("destination '%s' already exists") % dest) | |
583 | elif destvfs.listdir(): |
|
583 | elif destvfs.listdir(): | |
584 | raise error.Abort(_("destination '%s' is not empty") % dest) |
|
584 | raise error.Abort(_("destination '%s' is not empty") % dest) | |
585 |
|
585 | |||
586 | createopts = {} |
|
586 | createopts = {} | |
587 | narrow = False |
|
587 | narrow = False | |
588 |
|
588 | |||
589 | if storeincludepats is not None: |
|
589 | if storeincludepats is not None: | |
590 | narrowspec.validatepatterns(storeincludepats) |
|
590 | narrowspec.validatepatterns(storeincludepats) | |
591 | narrow = True |
|
591 | narrow = True | |
592 |
|
592 | |||
593 | if storeexcludepats is not None: |
|
593 | if storeexcludepats is not None: | |
594 | narrowspec.validatepatterns(storeexcludepats) |
|
594 | narrowspec.validatepatterns(storeexcludepats) | |
595 | narrow = True |
|
595 | narrow = True | |
596 |
|
596 | |||
597 | if narrow: |
|
597 | if narrow: | |
598 | # Include everything by default if only exclusion patterns defined. |
|
598 | # Include everything by default if only exclusion patterns defined. | |
599 | if storeexcludepats and not storeincludepats: |
|
599 | if storeexcludepats and not storeincludepats: | |
600 | storeincludepats = {'path:.'} |
|
600 | storeincludepats = {'path:.'} | |
601 |
|
601 | |||
602 | createopts['narrowfiles'] = True |
|
602 | createopts['narrowfiles'] = True | |
603 |
|
603 | |||
604 | shareopts = shareopts or {} |
|
604 | shareopts = shareopts or {} | |
605 | sharepool = shareopts.get('pool') |
|
605 | sharepool = shareopts.get('pool') | |
606 | sharenamemode = shareopts.get('mode') |
|
606 | sharenamemode = shareopts.get('mode') | |
607 | if sharepool and islocal(dest): |
|
607 | if sharepool and islocal(dest): | |
608 | sharepath = None |
|
608 | sharepath = None | |
609 | if sharenamemode == 'identity': |
|
609 | if sharenamemode == 'identity': | |
610 | # Resolve the name from the initial changeset in the remote |
|
610 | # Resolve the name from the initial changeset in the remote | |
611 | # repository. This returns nullid when the remote is empty. It |
|
611 | # repository. This returns nullid when the remote is empty. It | |
612 | # raises RepoLookupError if revision 0 is filtered or otherwise |
|
612 | # raises RepoLookupError if revision 0 is filtered or otherwise | |
613 | # not available. If we fail to resolve, sharing is not enabled. |
|
613 | # not available. If we fail to resolve, sharing is not enabled. | |
614 | try: |
|
614 | try: | |
615 | with srcpeer.commandexecutor() as e: |
|
615 | with srcpeer.commandexecutor() as e: | |
616 | rootnode = e.callcommand('lookup', { |
|
616 | rootnode = e.callcommand('lookup', { | |
617 | 'key': '0', |
|
617 | 'key': '0', | |
618 | }).result() |
|
618 | }).result() | |
619 |
|
619 | |||
620 | if rootnode != node.nullid: |
|
620 | if rootnode != node.nullid: | |
621 | sharepath = os.path.join(sharepool, node.hex(rootnode)) |
|
621 | sharepath = os.path.join(sharepool, node.hex(rootnode)) | |
622 | else: |
|
622 | else: | |
623 | ui.status(_('(not using pooled storage: ' |
|
623 | ui.status(_('(not using pooled storage: ' | |
624 | 'remote appears to be empty)\n')) |
|
624 | 'remote appears to be empty)\n')) | |
625 | except error.RepoLookupError: |
|
625 | except error.RepoLookupError: | |
626 | ui.status(_('(not using pooled storage: ' |
|
626 | ui.status(_('(not using pooled storage: ' | |
627 | 'unable to resolve identity of remote)\n')) |
|
627 | 'unable to resolve identity of remote)\n')) | |
628 | elif sharenamemode == 'remote': |
|
628 | elif sharenamemode == 'remote': | |
629 | sharepath = os.path.join( |
|
629 | sharepath = os.path.join( | |
630 | sharepool, node.hex(hashlib.sha1(source).digest())) |
|
630 | sharepool, node.hex(hashlib.sha1(source).digest())) | |
631 | else: |
|
631 | else: | |
632 | raise error.Abort(_('unknown share naming mode: %s') % |
|
632 | raise error.Abort(_('unknown share naming mode: %s') % | |
633 | sharenamemode) |
|
633 | sharenamemode) | |
634 |
|
634 | |||
635 | # TODO this is a somewhat arbitrary restriction. |
|
635 | # TODO this is a somewhat arbitrary restriction. | |
636 | if narrow: |
|
636 | if narrow: | |
637 | ui.status(_('(pooled storage not supported for narrow clones)\n')) |
|
637 | ui.status(_('(pooled storage not supported for narrow clones)\n')) | |
638 | sharepath = None |
|
638 | sharepath = None | |
639 |
|
639 | |||
640 | if sharepath: |
|
640 | if sharepath: | |
641 | return clonewithshare(ui, peeropts, sharepath, source, srcpeer, |
|
641 | return clonewithshare(ui, peeropts, sharepath, source, srcpeer, | |
642 | dest, pull=pull, rev=revs, update=update, |
|
642 | dest, pull=pull, rev=revs, update=update, | |
643 | stream=stream) |
|
643 | stream=stream) | |
644 |
|
644 | |||
645 | srclock = destlock = cleandir = None |
|
645 | srclock = destlock = cleandir = None | |
646 | srcrepo = srcpeer.local() |
|
646 | srcrepo = srcpeer.local() | |
647 | try: |
|
647 | try: | |
648 | abspath = origsource |
|
648 | abspath = origsource | |
649 | if islocal(origsource): |
|
649 | if islocal(origsource): | |
650 | abspath = os.path.abspath(util.urllocalpath(origsource)) |
|
650 | abspath = os.path.abspath(util.urllocalpath(origsource)) | |
651 |
|
651 | |||
652 | if islocal(dest): |
|
652 | if islocal(dest): | |
653 | cleandir = dest |
|
653 | cleandir = dest | |
654 |
|
654 | |||
655 | copy = False |
|
655 | copy = False | |
656 | if (srcrepo and srcrepo.cancopy() and islocal(dest) |
|
656 | if (srcrepo and srcrepo.cancopy() and islocal(dest) | |
657 | and not phases.hassecret(srcrepo)): |
|
657 | and not phases.hassecret(srcrepo)): | |
658 | copy = not pull and not revs |
|
658 | copy = not pull and not revs | |
659 |
|
659 | |||
660 | # TODO this is a somewhat arbitrary restriction. |
|
660 | # TODO this is a somewhat arbitrary restriction. | |
661 | if narrow: |
|
661 | if narrow: | |
662 | copy = False |
|
662 | copy = False | |
663 |
|
663 | |||
664 | if copy: |
|
664 | if copy: | |
665 | try: |
|
665 | try: | |
666 | # we use a lock here because if we race with commit, we |
|
666 | # we use a lock here because if we race with commit, we | |
667 | # can end up with extra data in the cloned revlogs that's |
|
667 | # can end up with extra data in the cloned revlogs that's | |
668 | # not pointed to by changesets, thus causing verify to |
|
668 | # not pointed to by changesets, thus causing verify to | |
669 | # fail |
|
669 | # fail | |
670 | srclock = srcrepo.lock(wait=False) |
|
670 | srclock = srcrepo.lock(wait=False) | |
671 | except error.LockError: |
|
671 | except error.LockError: | |
672 | copy = False |
|
672 | copy = False | |
673 |
|
673 | |||
674 | if copy: |
|
674 | if copy: | |
675 | srcrepo.hook('preoutgoing', throw=True, source='clone') |
|
675 | srcrepo.hook('preoutgoing', throw=True, source='clone') | |
676 | hgdir = os.path.realpath(os.path.join(dest, ".hg")) |
|
676 | hgdir = os.path.realpath(os.path.join(dest, ".hg")) | |
677 | if not os.path.exists(dest): |
|
677 | if not os.path.exists(dest): | |
678 | util.makedirs(dest) |
|
678 | util.makedirs(dest) | |
679 | else: |
|
679 | else: | |
680 | # only clean up directories we create ourselves |
|
680 | # only clean up directories we create ourselves | |
681 | cleandir = hgdir |
|
681 | cleandir = hgdir | |
682 | try: |
|
682 | try: | |
683 | destpath = hgdir |
|
683 | destpath = hgdir | |
684 | util.makedir(destpath, notindexed=True) |
|
684 | util.makedir(destpath, notindexed=True) | |
685 | except OSError as inst: |
|
685 | except OSError as inst: | |
686 | if inst.errno == errno.EEXIST: |
|
686 | if inst.errno == errno.EEXIST: | |
687 | cleandir = None |
|
687 | cleandir = None | |
688 | raise error.Abort(_("destination '%s' already exists") |
|
688 | raise error.Abort(_("destination '%s' already exists") | |
689 | % dest) |
|
689 | % dest) | |
690 | raise |
|
690 | raise | |
691 |
|
691 | |||
692 | destlock = copystore(ui, srcrepo, destpath) |
|
692 | destlock = copystore(ui, srcrepo, destpath) | |
693 | # copy bookmarks over |
|
693 | # copy bookmarks over | |
694 | srcbookmarks = srcrepo.vfs.join('bookmarks') |
|
694 | srcbookmarks = srcrepo.vfs.join('bookmarks') | |
695 | dstbookmarks = os.path.join(destpath, 'bookmarks') |
|
695 | dstbookmarks = os.path.join(destpath, 'bookmarks') | |
696 | if os.path.exists(srcbookmarks): |
|
696 | if os.path.exists(srcbookmarks): | |
697 | util.copyfile(srcbookmarks, dstbookmarks) |
|
697 | util.copyfile(srcbookmarks, dstbookmarks) | |
698 |
|
698 | |||
699 | dstcachedir = os.path.join(destpath, 'cache') |
|
699 | dstcachedir = os.path.join(destpath, 'cache') | |
700 | for cache in cacheutil.cachetocopy(srcrepo): |
|
700 | for cache in cacheutil.cachetocopy(srcrepo): | |
701 | _copycache(srcrepo, dstcachedir, cache) |
|
701 | _copycache(srcrepo, dstcachedir, cache) | |
702 |
|
702 | |||
703 | # we need to re-init the repo after manually copying the data |
|
703 | # we need to re-init the repo after manually copying the data | |
704 | # into it |
|
704 | # into it | |
705 | destpeer = peer(srcrepo, peeropts, dest) |
|
705 | destpeer = peer(srcrepo, peeropts, dest) | |
706 | srcrepo.hook('outgoing', source='clone', |
|
706 | srcrepo.hook('outgoing', source='clone', | |
707 | node=node.hex(node.nullid)) |
|
707 | node=node.hex(node.nullid)) | |
708 | else: |
|
708 | else: | |
709 | try: |
|
709 | try: | |
710 | # only pass ui when no srcrepo |
|
710 | # only pass ui when no srcrepo | |
711 | destpeer = peer(srcrepo or ui, peeropts, dest, create=True, |
|
711 | destpeer = peer(srcrepo or ui, peeropts, dest, create=True, | |
712 | createopts=createopts) |
|
712 | createopts=createopts) | |
713 | except OSError as inst: |
|
713 | except OSError as inst: | |
714 | if inst.errno == errno.EEXIST: |
|
714 | if inst.errno == errno.EEXIST: | |
715 | cleandir = None |
|
715 | cleandir = None | |
716 | raise error.Abort(_("destination '%s' already exists") |
|
716 | raise error.Abort(_("destination '%s' already exists") | |
717 | % dest) |
|
717 | % dest) | |
718 | raise |
|
718 | raise | |
719 |
|
719 | |||
720 | if revs: |
|
720 | if revs: | |
721 | if not srcpeer.capable('lookup'): |
|
721 | if not srcpeer.capable('lookup'): | |
722 | raise error.Abort(_("src repository does not support " |
|
722 | raise error.Abort(_("src repository does not support " | |
723 | "revision lookup and so doesn't " |
|
723 | "revision lookup and so doesn't " | |
724 | "support clone by revision")) |
|
724 | "support clone by revision")) | |
725 |
|
725 | |||
726 | # TODO this is batchable. |
|
726 | # TODO this is batchable. | |
727 | remoterevs = [] |
|
727 | remoterevs = [] | |
728 | for rev in revs: |
|
728 | for rev in revs: | |
729 | with srcpeer.commandexecutor() as e: |
|
729 | with srcpeer.commandexecutor() as e: | |
730 | remoterevs.append(e.callcommand('lookup', { |
|
730 | remoterevs.append(e.callcommand('lookup', { | |
731 | 'key': rev, |
|
731 | 'key': rev, | |
732 | }).result()) |
|
732 | }).result()) | |
733 | revs = remoterevs |
|
733 | revs = remoterevs | |
734 |
|
734 | |||
735 | checkout = revs[0] |
|
735 | checkout = revs[0] | |
736 | else: |
|
736 | else: | |
737 | revs = None |
|
737 | revs = None | |
738 | local = destpeer.local() |
|
738 | local = destpeer.local() | |
739 | if local: |
|
739 | if local: | |
|
740 | if narrow: | |||
|
741 | with local.lock(): | |||
|
742 | local.setnarrowpats(storeincludepats, storeexcludepats) | |||
|
743 | ||||
740 | u = util.url(abspath) |
|
744 | u = util.url(abspath) | |
741 | defaulturl = bytes(u) |
|
745 | defaulturl = bytes(u) | |
742 | local.ui.setconfig('paths', 'default', defaulturl, 'clone') |
|
746 | local.ui.setconfig('paths', 'default', defaulturl, 'clone') | |
743 | if not stream: |
|
747 | if not stream: | |
744 | if pull: |
|
748 | if pull: | |
745 | stream = False |
|
749 | stream = False | |
746 | else: |
|
750 | else: | |
747 | stream = None |
|
751 | stream = None | |
748 | # internal config: ui.quietbookmarkmove |
|
752 | # internal config: ui.quietbookmarkmove | |
749 | overrides = {('ui', 'quietbookmarkmove'): True} |
|
753 | overrides = {('ui', 'quietbookmarkmove'): True} | |
750 | with local.ui.configoverride(overrides, 'clone'): |
|
754 | with local.ui.configoverride(overrides, 'clone'): | |
751 | exchange.pull(local, srcpeer, revs, |
|
755 | exchange.pull(local, srcpeer, revs, | |
752 | streamclonerequested=stream, |
|
756 | streamclonerequested=stream, | |
753 | includepats=storeincludepats, |
|
757 | includepats=storeincludepats, | |
754 | excludepats=storeexcludepats) |
|
758 | excludepats=storeexcludepats) | |
755 | elif srcrepo: |
|
759 | elif srcrepo: | |
756 | # TODO lift restriction once exchange.push() accepts narrow |
|
760 | # TODO lift restriction once exchange.push() accepts narrow | |
757 | # push. |
|
761 | # push. | |
758 | if narrow: |
|
762 | if narrow: | |
759 | raise error.Abort(_('narrow clone not available for ' |
|
763 | raise error.Abort(_('narrow clone not available for ' | |
760 | 'remote destinations')) |
|
764 | 'remote destinations')) | |
761 |
|
765 | |||
762 | exchange.push(srcrepo, destpeer, revs=revs, |
|
766 | exchange.push(srcrepo, destpeer, revs=revs, | |
763 | bookmarks=srcrepo._bookmarks.keys()) |
|
767 | bookmarks=srcrepo._bookmarks.keys()) | |
764 | else: |
|
768 | else: | |
765 | raise error.Abort(_("clone from remote to remote not supported") |
|
769 | raise error.Abort(_("clone from remote to remote not supported") | |
766 | ) |
|
770 | ) | |
767 |
|
771 | |||
768 | cleandir = None |
|
772 | cleandir = None | |
769 |
|
773 | |||
770 | destrepo = destpeer.local() |
|
774 | destrepo = destpeer.local() | |
771 | if destrepo: |
|
775 | if destrepo: | |
772 | template = uimod.samplehgrcs['cloned'] |
|
776 | template = uimod.samplehgrcs['cloned'] | |
773 | u = util.url(abspath) |
|
777 | u = util.url(abspath) | |
774 | u.passwd = None |
|
778 | u.passwd = None | |
775 | defaulturl = bytes(u) |
|
779 | defaulturl = bytes(u) | |
776 | destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl)) |
|
780 | destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl)) | |
777 | destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone') |
|
781 | destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone') | |
778 |
|
782 | |||
779 | if ui.configbool('experimental', 'remotenames'): |
|
783 | if ui.configbool('experimental', 'remotenames'): | |
780 | logexchange.pullremotenames(destrepo, srcpeer) |
|
784 | logexchange.pullremotenames(destrepo, srcpeer) | |
781 |
|
785 | |||
782 | if update: |
|
786 | if update: | |
783 | if update is not True: |
|
787 | if update is not True: | |
784 | with srcpeer.commandexecutor() as e: |
|
788 | with srcpeer.commandexecutor() as e: | |
785 | checkout = e.callcommand('lookup', { |
|
789 | checkout = e.callcommand('lookup', { | |
786 | 'key': update, |
|
790 | 'key': update, | |
787 | }).result() |
|
791 | }).result() | |
788 |
|
792 | |||
789 | uprev = None |
|
793 | uprev = None | |
790 | status = None |
|
794 | status = None | |
791 | if checkout is not None: |
|
795 | if checkout is not None: | |
792 | # Some extensions (at least hg-git and hg-subversion) have |
|
796 | # Some extensions (at least hg-git and hg-subversion) have | |
793 | # a peer.lookup() implementation that returns a name instead |
|
797 | # a peer.lookup() implementation that returns a name instead | |
794 | # of a nodeid. We work around it here until we've figured |
|
798 | # of a nodeid. We work around it here until we've figured | |
795 | # out a better solution. |
|
799 | # out a better solution. | |
796 | if len(checkout) == 20 and checkout in destrepo: |
|
800 | if len(checkout) == 20 and checkout in destrepo: | |
797 | uprev = checkout |
|
801 | uprev = checkout | |
798 | elif scmutil.isrevsymbol(destrepo, checkout): |
|
802 | elif scmutil.isrevsymbol(destrepo, checkout): | |
799 | uprev = scmutil.revsymbol(destrepo, checkout).node() |
|
803 | uprev = scmutil.revsymbol(destrepo, checkout).node() | |
800 | else: |
|
804 | else: | |
801 | if update is not True: |
|
805 | if update is not True: | |
802 | try: |
|
806 | try: | |
803 | uprev = destrepo.lookup(update) |
|
807 | uprev = destrepo.lookup(update) | |
804 | except error.RepoLookupError: |
|
808 | except error.RepoLookupError: | |
805 | pass |
|
809 | pass | |
806 | if uprev is None: |
|
810 | if uprev is None: | |
807 | try: |
|
811 | try: | |
808 | uprev = destrepo._bookmarks['@'] |
|
812 | uprev = destrepo._bookmarks['@'] | |
809 | update = '@' |
|
813 | update = '@' | |
810 | bn = destrepo[uprev].branch() |
|
814 | bn = destrepo[uprev].branch() | |
811 | if bn == 'default': |
|
815 | if bn == 'default': | |
812 | status = _("updating to bookmark @\n") |
|
816 | status = _("updating to bookmark @\n") | |
813 | else: |
|
817 | else: | |
814 | status = (_("updating to bookmark @ on branch %s\n") |
|
818 | status = (_("updating to bookmark @ on branch %s\n") | |
815 | % bn) |
|
819 | % bn) | |
816 | except KeyError: |
|
820 | except KeyError: | |
817 | try: |
|
821 | try: | |
818 | uprev = destrepo.branchtip('default') |
|
822 | uprev = destrepo.branchtip('default') | |
819 | except error.RepoLookupError: |
|
823 | except error.RepoLookupError: | |
820 | uprev = destrepo.lookup('tip') |
|
824 | uprev = destrepo.lookup('tip') | |
821 | if not status: |
|
825 | if not status: | |
822 | bn = destrepo[uprev].branch() |
|
826 | bn = destrepo[uprev].branch() | |
823 | status = _("updating to branch %s\n") % bn |
|
827 | status = _("updating to branch %s\n") % bn | |
824 | destrepo.ui.status(status) |
|
828 | destrepo.ui.status(status) | |
825 | _update(destrepo, uprev) |
|
829 | _update(destrepo, uprev) | |
826 | if update in destrepo._bookmarks: |
|
830 | if update in destrepo._bookmarks: | |
827 | bookmarks.activate(destrepo, update) |
|
831 | bookmarks.activate(destrepo, update) | |
828 | finally: |
|
832 | finally: | |
829 | release(srclock, destlock) |
|
833 | release(srclock, destlock) | |
830 | if cleandir is not None: |
|
834 | if cleandir is not None: | |
831 | shutil.rmtree(cleandir, True) |
|
835 | shutil.rmtree(cleandir, True) | |
832 | if srcpeer is not None: |
|
836 | if srcpeer is not None: | |
833 | srcpeer.close() |
|
837 | srcpeer.close() | |
834 | return srcpeer, destpeer |
|
838 | return srcpeer, destpeer | |
835 |
|
839 | |||
836 | def _showstats(repo, stats, quietempty=False): |
|
840 | def _showstats(repo, stats, quietempty=False): | |
837 | if quietempty and stats.isempty(): |
|
841 | if quietempty and stats.isempty(): | |
838 | return |
|
842 | return | |
839 | repo.ui.status(_("%d files updated, %d files merged, " |
|
843 | repo.ui.status(_("%d files updated, %d files merged, " | |
840 | "%d files removed, %d files unresolved\n") % ( |
|
844 | "%d files removed, %d files unresolved\n") % ( | |
841 | stats.updatedcount, stats.mergedcount, |
|
845 | stats.updatedcount, stats.mergedcount, | |
842 | stats.removedcount, stats.unresolvedcount)) |
|
846 | stats.removedcount, stats.unresolvedcount)) | |
843 |
|
847 | |||
844 | def updaterepo(repo, node, overwrite, updatecheck=None): |
|
848 | def updaterepo(repo, node, overwrite, updatecheck=None): | |
845 | """Update the working directory to node. |
|
849 | """Update the working directory to node. | |
846 |
|
850 | |||
847 | When overwrite is set, changes are clobbered, merged else |
|
851 | When overwrite is set, changes are clobbered, merged else | |
848 |
|
852 | |||
849 | returns stats (see pydoc mercurial.merge.applyupdates)""" |
|
853 | returns stats (see pydoc mercurial.merge.applyupdates)""" | |
850 | return mergemod.update(repo, node, False, overwrite, |
|
854 | return mergemod.update(repo, node, False, overwrite, | |
851 | labels=['working copy', 'destination'], |
|
855 | labels=['working copy', 'destination'], | |
852 | updatecheck=updatecheck) |
|
856 | updatecheck=updatecheck) | |
853 |
|
857 | |||
854 | def update(repo, node, quietempty=False, updatecheck=None): |
|
858 | def update(repo, node, quietempty=False, updatecheck=None): | |
855 | """update the working directory to node""" |
|
859 | """update the working directory to node""" | |
856 | stats = updaterepo(repo, node, False, updatecheck=updatecheck) |
|
860 | stats = updaterepo(repo, node, False, updatecheck=updatecheck) | |
857 | _showstats(repo, stats, quietempty) |
|
861 | _showstats(repo, stats, quietempty) | |
858 | if stats.unresolvedcount: |
|
862 | if stats.unresolvedcount: | |
859 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) |
|
863 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) | |
860 | return stats.unresolvedcount > 0 |
|
864 | return stats.unresolvedcount > 0 | |
861 |
|
865 | |||
862 | # naming conflict in clone() |
|
866 | # naming conflict in clone() | |
863 | _update = update |
|
867 | _update = update | |
864 |
|
868 | |||
865 | def clean(repo, node, show_stats=True, quietempty=False): |
|
869 | def clean(repo, node, show_stats=True, quietempty=False): | |
866 | """forcibly switch the working directory to node, clobbering changes""" |
|
870 | """forcibly switch the working directory to node, clobbering changes""" | |
867 | stats = updaterepo(repo, node, True) |
|
871 | stats = updaterepo(repo, node, True) | |
868 | repo.vfs.unlinkpath('graftstate', ignoremissing=True) |
|
872 | repo.vfs.unlinkpath('graftstate', ignoremissing=True) | |
869 | if show_stats: |
|
873 | if show_stats: | |
870 | _showstats(repo, stats, quietempty) |
|
874 | _showstats(repo, stats, quietempty) | |
871 | return stats.unresolvedcount > 0 |
|
875 | return stats.unresolvedcount > 0 | |
872 |
|
876 | |||
873 | # naming conflict in updatetotally() |
|
877 | # naming conflict in updatetotally() | |
874 | _clean = clean |
|
878 | _clean = clean | |
875 |
|
879 | |||
876 | def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): |
|
880 | def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): | |
877 | """Update the working directory with extra care for non-file components |
|
881 | """Update the working directory with extra care for non-file components | |
878 |
|
882 | |||
879 | This takes care of non-file components below: |
|
883 | This takes care of non-file components below: | |
880 |
|
884 | |||
881 | :bookmark: might be advanced or (in)activated |
|
885 | :bookmark: might be advanced or (in)activated | |
882 |
|
886 | |||
883 | This takes arguments below: |
|
887 | This takes arguments below: | |
884 |
|
888 | |||
885 | :checkout: to which revision the working directory is updated |
|
889 | :checkout: to which revision the working directory is updated | |
886 | :brev: a name, which might be a bookmark to be activated after updating |
|
890 | :brev: a name, which might be a bookmark to be activated after updating | |
887 | :clean: whether changes in the working directory can be discarded |
|
891 | :clean: whether changes in the working directory can be discarded | |
888 | :updatecheck: how to deal with a dirty working directory |
|
892 | :updatecheck: how to deal with a dirty working directory | |
889 |
|
893 | |||
890 | Valid values for updatecheck are (None => linear): |
|
894 | Valid values for updatecheck are (None => linear): | |
891 |
|
895 | |||
892 | * abort: abort if the working directory is dirty |
|
896 | * abort: abort if the working directory is dirty | |
893 | * none: don't check (merge working directory changes into destination) |
|
897 | * none: don't check (merge working directory changes into destination) | |
894 | * linear: check that update is linear before merging working directory |
|
898 | * linear: check that update is linear before merging working directory | |
895 | changes into destination |
|
899 | changes into destination | |
896 | * noconflict: check that the update does not result in file merges |
|
900 | * noconflict: check that the update does not result in file merges | |
897 |
|
901 | |||
898 | This returns whether conflict is detected at updating or not. |
|
902 | This returns whether conflict is detected at updating or not. | |
899 | """ |
|
903 | """ | |
900 | if updatecheck is None: |
|
904 | if updatecheck is None: | |
901 | updatecheck = ui.config('commands', 'update.check') |
|
905 | updatecheck = ui.config('commands', 'update.check') | |
902 | if updatecheck not in ('abort', 'none', 'linear', 'noconflict'): |
|
906 | if updatecheck not in ('abort', 'none', 'linear', 'noconflict'): | |
903 | # If not configured, or invalid value configured |
|
907 | # If not configured, or invalid value configured | |
904 | updatecheck = 'linear' |
|
908 | updatecheck = 'linear' | |
905 | with repo.wlock(): |
|
909 | with repo.wlock(): | |
906 | movemarkfrom = None |
|
910 | movemarkfrom = None | |
907 | warndest = False |
|
911 | warndest = False | |
908 | if checkout is None: |
|
912 | if checkout is None: | |
909 | updata = destutil.destupdate(repo, clean=clean) |
|
913 | updata = destutil.destupdate(repo, clean=clean) | |
910 | checkout, movemarkfrom, brev = updata |
|
914 | checkout, movemarkfrom, brev = updata | |
911 | warndest = True |
|
915 | warndest = True | |
912 |
|
916 | |||
913 | if clean: |
|
917 | if clean: | |
914 | ret = _clean(repo, checkout) |
|
918 | ret = _clean(repo, checkout) | |
915 | else: |
|
919 | else: | |
916 | if updatecheck == 'abort': |
|
920 | if updatecheck == 'abort': | |
917 | cmdutil.bailifchanged(repo, merge=False) |
|
921 | cmdutil.bailifchanged(repo, merge=False) | |
918 | updatecheck = 'none' |
|
922 | updatecheck = 'none' | |
919 | ret = _update(repo, checkout, updatecheck=updatecheck) |
|
923 | ret = _update(repo, checkout, updatecheck=updatecheck) | |
920 |
|
924 | |||
921 | if not ret and movemarkfrom: |
|
925 | if not ret and movemarkfrom: | |
922 | if movemarkfrom == repo['.'].node(): |
|
926 | if movemarkfrom == repo['.'].node(): | |
923 | pass # no-op update |
|
927 | pass # no-op update | |
924 | elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): |
|
928 | elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): | |
925 | b = ui.label(repo._activebookmark, 'bookmarks.active') |
|
929 | b = ui.label(repo._activebookmark, 'bookmarks.active') | |
926 | ui.status(_("updating bookmark %s\n") % b) |
|
930 | ui.status(_("updating bookmark %s\n") % b) | |
927 | else: |
|
931 | else: | |
928 | # this can happen with a non-linear update |
|
932 | # this can happen with a non-linear update | |
929 | b = ui.label(repo._activebookmark, 'bookmarks') |
|
933 | b = ui.label(repo._activebookmark, 'bookmarks') | |
930 | ui.status(_("(leaving bookmark %s)\n") % b) |
|
934 | ui.status(_("(leaving bookmark %s)\n") % b) | |
931 | bookmarks.deactivate(repo) |
|
935 | bookmarks.deactivate(repo) | |
932 | elif brev in repo._bookmarks: |
|
936 | elif brev in repo._bookmarks: | |
933 | if brev != repo._activebookmark: |
|
937 | if brev != repo._activebookmark: | |
934 | b = ui.label(brev, 'bookmarks.active') |
|
938 | b = ui.label(brev, 'bookmarks.active') | |
935 | ui.status(_("(activating bookmark %s)\n") % b) |
|
939 | ui.status(_("(activating bookmark %s)\n") % b) | |
936 | bookmarks.activate(repo, brev) |
|
940 | bookmarks.activate(repo, brev) | |
937 | elif brev: |
|
941 | elif brev: | |
938 | if repo._activebookmark: |
|
942 | if repo._activebookmark: | |
939 | b = ui.label(repo._activebookmark, 'bookmarks') |
|
943 | b = ui.label(repo._activebookmark, 'bookmarks') | |
940 | ui.status(_("(leaving bookmark %s)\n") % b) |
|
944 | ui.status(_("(leaving bookmark %s)\n") % b) | |
941 | bookmarks.deactivate(repo) |
|
945 | bookmarks.deactivate(repo) | |
942 |
|
946 | |||
943 | if warndest: |
|
947 | if warndest: | |
944 | destutil.statusotherdests(ui, repo) |
|
948 | destutil.statusotherdests(ui, repo) | |
945 |
|
949 | |||
946 | return ret |
|
950 | return ret | |
947 |
|
951 | |||
948 | def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None, |
|
952 | def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None, | |
949 | abort=False): |
|
953 | abort=False): | |
950 | """Branch merge with node, resolving changes. Return true if any |
|
954 | """Branch merge with node, resolving changes. Return true if any | |
951 | unresolved conflicts.""" |
|
955 | unresolved conflicts.""" | |
952 | if not abort: |
|
956 | if not abort: | |
953 | stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce, |
|
957 | stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce, | |
954 | labels=labels) |
|
958 | labels=labels) | |
955 | else: |
|
959 | else: | |
956 | ms = mergemod.mergestate.read(repo) |
|
960 | ms = mergemod.mergestate.read(repo) | |
957 | if ms.active(): |
|
961 | if ms.active(): | |
958 | # there were conflicts |
|
962 | # there were conflicts | |
959 | node = ms.localctx.hex() |
|
963 | node = ms.localctx.hex() | |
960 | else: |
|
964 | else: | |
961 | # there were no conficts, mergestate was not stored |
|
965 | # there were no conficts, mergestate was not stored | |
962 | node = repo['.'].hex() |
|
966 | node = repo['.'].hex() | |
963 |
|
967 | |||
964 | repo.ui.status(_("aborting the merge, updating back to" |
|
968 | repo.ui.status(_("aborting the merge, updating back to" | |
965 | " %s\n") % node[:12]) |
|
969 | " %s\n") % node[:12]) | |
966 | stats = mergemod.update(repo, node, branchmerge=False, force=True, |
|
970 | stats = mergemod.update(repo, node, branchmerge=False, force=True, | |
967 | labels=labels) |
|
971 | labels=labels) | |
968 |
|
972 | |||
969 | _showstats(repo, stats) |
|
973 | _showstats(repo, stats) | |
970 | if stats.unresolvedcount: |
|
974 | if stats.unresolvedcount: | |
971 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " |
|
975 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " | |
972 | "or 'hg merge --abort' to abandon\n")) |
|
976 | "or 'hg merge --abort' to abandon\n")) | |
973 | elif remind and not abort: |
|
977 | elif remind and not abort: | |
974 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) |
|
978 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) | |
975 | return stats.unresolvedcount > 0 |
|
979 | return stats.unresolvedcount > 0 | |
976 |
|
980 | |||
977 | def _incoming(displaychlist, subreporecurse, ui, repo, source, |
|
981 | def _incoming(displaychlist, subreporecurse, ui, repo, source, | |
978 | opts, buffered=False): |
|
982 | opts, buffered=False): | |
979 | """ |
|
983 | """ | |
980 | Helper for incoming / gincoming. |
|
984 | Helper for incoming / gincoming. | |
981 | displaychlist gets called with |
|
985 | displaychlist gets called with | |
982 | (remoterepo, incomingchangesetlist, displayer) parameters, |
|
986 | (remoterepo, incomingchangesetlist, displayer) parameters, | |
983 | and is supposed to contain only code that can't be unified. |
|
987 | and is supposed to contain only code that can't be unified. | |
984 | """ |
|
988 | """ | |
985 | source, branches = parseurl(ui.expandpath(source), opts.get('branch')) |
|
989 | source, branches = parseurl(ui.expandpath(source), opts.get('branch')) | |
986 | other = peer(repo, opts, source) |
|
990 | other = peer(repo, opts, source) | |
987 | ui.status(_('comparing with %s\n') % util.hidepassword(source)) |
|
991 | ui.status(_('comparing with %s\n') % util.hidepassword(source)) | |
988 | revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev')) |
|
992 | revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev')) | |
989 |
|
993 | |||
990 | if revs: |
|
994 | if revs: | |
991 | revs = [other.lookup(rev) for rev in revs] |
|
995 | revs = [other.lookup(rev) for rev in revs] | |
992 | other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other, |
|
996 | other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other, | |
993 | revs, opts["bundle"], opts["force"]) |
|
997 | revs, opts["bundle"], opts["force"]) | |
994 | try: |
|
998 | try: | |
995 | if not chlist: |
|
999 | if not chlist: | |
996 | ui.status(_("no changes found\n")) |
|
1000 | ui.status(_("no changes found\n")) | |
997 | return subreporecurse() |
|
1001 | return subreporecurse() | |
998 | ui.pager('incoming') |
|
1002 | ui.pager('incoming') | |
999 | displayer = logcmdutil.changesetdisplayer(ui, other, opts, |
|
1003 | displayer = logcmdutil.changesetdisplayer(ui, other, opts, | |
1000 | buffered=buffered) |
|
1004 | buffered=buffered) | |
1001 | displaychlist(other, chlist, displayer) |
|
1005 | displaychlist(other, chlist, displayer) | |
1002 | displayer.close() |
|
1006 | displayer.close() | |
1003 | finally: |
|
1007 | finally: | |
1004 | cleanupfn() |
|
1008 | cleanupfn() | |
1005 | subreporecurse() |
|
1009 | subreporecurse() | |
1006 | return 0 # exit code is zero since we found incoming changes |
|
1010 | return 0 # exit code is zero since we found incoming changes | |
1007 |
|
1011 | |||
1008 | def incoming(ui, repo, source, opts): |
|
1012 | def incoming(ui, repo, source, opts): | |
1009 | def subreporecurse(): |
|
1013 | def subreporecurse(): | |
1010 | ret = 1 |
|
1014 | ret = 1 | |
1011 | if opts.get('subrepos'): |
|
1015 | if opts.get('subrepos'): | |
1012 | ctx = repo[None] |
|
1016 | ctx = repo[None] | |
1013 | for subpath in sorted(ctx.substate): |
|
1017 | for subpath in sorted(ctx.substate): | |
1014 | sub = ctx.sub(subpath) |
|
1018 | sub = ctx.sub(subpath) | |
1015 | ret = min(ret, sub.incoming(ui, source, opts)) |
|
1019 | ret = min(ret, sub.incoming(ui, source, opts)) | |
1016 | return ret |
|
1020 | return ret | |
1017 |
|
1021 | |||
1018 | def display(other, chlist, displayer): |
|
1022 | def display(other, chlist, displayer): | |
1019 | limit = logcmdutil.getlimit(opts) |
|
1023 | limit = logcmdutil.getlimit(opts) | |
1020 | if opts.get('newest_first'): |
|
1024 | if opts.get('newest_first'): | |
1021 | chlist.reverse() |
|
1025 | chlist.reverse() | |
1022 | count = 0 |
|
1026 | count = 0 | |
1023 | for n in chlist: |
|
1027 | for n in chlist: | |
1024 | if limit is not None and count >= limit: |
|
1028 | if limit is not None and count >= limit: | |
1025 | break |
|
1029 | break | |
1026 | parents = [p for p in other.changelog.parents(n) if p != nullid] |
|
1030 | parents = [p for p in other.changelog.parents(n) if p != nullid] | |
1027 | if opts.get('no_merges') and len(parents) == 2: |
|
1031 | if opts.get('no_merges') and len(parents) == 2: | |
1028 | continue |
|
1032 | continue | |
1029 | count += 1 |
|
1033 | count += 1 | |
1030 | displayer.show(other[n]) |
|
1034 | displayer.show(other[n]) | |
1031 | return _incoming(display, subreporecurse, ui, repo, source, opts) |
|
1035 | return _incoming(display, subreporecurse, ui, repo, source, opts) | |
1032 |
|
1036 | |||
1033 | def _outgoing(ui, repo, dest, opts): |
|
1037 | def _outgoing(ui, repo, dest, opts): | |
1034 | path = ui.paths.getpath(dest, default=('default-push', 'default')) |
|
1038 | path = ui.paths.getpath(dest, default=('default-push', 'default')) | |
1035 | if not path: |
|
1039 | if not path: | |
1036 | raise error.Abort(_('default repository not configured!'), |
|
1040 | raise error.Abort(_('default repository not configured!'), | |
1037 | hint=_("see 'hg help config.paths'")) |
|
1041 | hint=_("see 'hg help config.paths'")) | |
1038 | dest = path.pushloc or path.loc |
|
1042 | dest = path.pushloc or path.loc | |
1039 | branches = path.branch, opts.get('branch') or [] |
|
1043 | branches = path.branch, opts.get('branch') or [] | |
1040 |
|
1044 | |||
1041 | ui.status(_('comparing with %s\n') % util.hidepassword(dest)) |
|
1045 | ui.status(_('comparing with %s\n') % util.hidepassword(dest)) | |
1042 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) |
|
1046 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) | |
1043 | if revs: |
|
1047 | if revs: | |
1044 | revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] |
|
1048 | revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] | |
1045 |
|
1049 | |||
1046 | other = peer(repo, opts, dest) |
|
1050 | other = peer(repo, opts, dest) | |
1047 | outgoing = discovery.findcommonoutgoing(repo, other, revs, |
|
1051 | outgoing = discovery.findcommonoutgoing(repo, other, revs, | |
1048 | force=opts.get('force')) |
|
1052 | force=opts.get('force')) | |
1049 | o = outgoing.missing |
|
1053 | o = outgoing.missing | |
1050 | if not o: |
|
1054 | if not o: | |
1051 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
|
1055 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) | |
1052 | return o, other |
|
1056 | return o, other | |
1053 |
|
1057 | |||
1054 | def outgoing(ui, repo, dest, opts): |
|
1058 | def outgoing(ui, repo, dest, opts): | |
1055 | def recurse(): |
|
1059 | def recurse(): | |
1056 | ret = 1 |
|
1060 | ret = 1 | |
1057 | if opts.get('subrepos'): |
|
1061 | if opts.get('subrepos'): | |
1058 | ctx = repo[None] |
|
1062 | ctx = repo[None] | |
1059 | for subpath in sorted(ctx.substate): |
|
1063 | for subpath in sorted(ctx.substate): | |
1060 | sub = ctx.sub(subpath) |
|
1064 | sub = ctx.sub(subpath) | |
1061 | ret = min(ret, sub.outgoing(ui, dest, opts)) |
|
1065 | ret = min(ret, sub.outgoing(ui, dest, opts)) | |
1062 | return ret |
|
1066 | return ret | |
1063 |
|
1067 | |||
1064 | limit = logcmdutil.getlimit(opts) |
|
1068 | limit = logcmdutil.getlimit(opts) | |
1065 | o, other = _outgoing(ui, repo, dest, opts) |
|
1069 | o, other = _outgoing(ui, repo, dest, opts) | |
1066 | if not o: |
|
1070 | if not o: | |
1067 | cmdutil.outgoinghooks(ui, repo, other, opts, o) |
|
1071 | cmdutil.outgoinghooks(ui, repo, other, opts, o) | |
1068 | return recurse() |
|
1072 | return recurse() | |
1069 |
|
1073 | |||
1070 | if opts.get('newest_first'): |
|
1074 | if opts.get('newest_first'): | |
1071 | o.reverse() |
|
1075 | o.reverse() | |
1072 | ui.pager('outgoing') |
|
1076 | ui.pager('outgoing') | |
1073 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
1077 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) | |
1074 | count = 0 |
|
1078 | count = 0 | |
1075 | for n in o: |
|
1079 | for n in o: | |
1076 | if limit is not None and count >= limit: |
|
1080 | if limit is not None and count >= limit: | |
1077 | break |
|
1081 | break | |
1078 | parents = [p for p in repo.changelog.parents(n) if p != nullid] |
|
1082 | parents = [p for p in repo.changelog.parents(n) if p != nullid] | |
1079 | if opts.get('no_merges') and len(parents) == 2: |
|
1083 | if opts.get('no_merges') and len(parents) == 2: | |
1080 | continue |
|
1084 | continue | |
1081 | count += 1 |
|
1085 | count += 1 | |
1082 | displayer.show(repo[n]) |
|
1086 | displayer.show(repo[n]) | |
1083 | displayer.close() |
|
1087 | displayer.close() | |
1084 | cmdutil.outgoinghooks(ui, repo, other, opts, o) |
|
1088 | cmdutil.outgoinghooks(ui, repo, other, opts, o) | |
1085 | recurse() |
|
1089 | recurse() | |
1086 | return 0 # exit code is zero since we found outgoing changes |
|
1090 | return 0 # exit code is zero since we found outgoing changes | |
1087 |
|
1091 | |||
1088 | def verify(repo): |
|
1092 | def verify(repo): | |
1089 | """verify the consistency of a repository""" |
|
1093 | """verify the consistency of a repository""" | |
1090 | ret = verifymod.verify(repo) |
|
1094 | ret = verifymod.verify(repo) | |
1091 |
|
1095 | |||
1092 | # Broken subrepo references in hidden csets don't seem worth worrying about, |
|
1096 | # Broken subrepo references in hidden csets don't seem worth worrying about, | |
1093 | # since they can't be pushed/pulled, and --hidden can be used if they are a |
|
1097 | # since they can't be pushed/pulled, and --hidden can be used if they are a | |
1094 | # concern. |
|
1098 | # concern. | |
1095 |
|
1099 | |||
1096 | # pathto() is needed for -R case |
|
1100 | # pathto() is needed for -R case | |
1097 | revs = repo.revs("filelog(%s)", |
|
1101 | revs = repo.revs("filelog(%s)", | |
1098 | util.pathto(repo.root, repo.getcwd(), '.hgsubstate')) |
|
1102 | util.pathto(repo.root, repo.getcwd(), '.hgsubstate')) | |
1099 |
|
1103 | |||
1100 | if revs: |
|
1104 | if revs: | |
1101 | repo.ui.status(_('checking subrepo links\n')) |
|
1105 | repo.ui.status(_('checking subrepo links\n')) | |
1102 | for rev in revs: |
|
1106 | for rev in revs: | |
1103 | ctx = repo[rev] |
|
1107 | ctx = repo[rev] | |
1104 | try: |
|
1108 | try: | |
1105 | for subpath in ctx.substate: |
|
1109 | for subpath in ctx.substate: | |
1106 | try: |
|
1110 | try: | |
1107 | ret = (ctx.sub(subpath, allowcreate=False).verify() |
|
1111 | ret = (ctx.sub(subpath, allowcreate=False).verify() | |
1108 | or ret) |
|
1112 | or ret) | |
1109 | except error.RepoError as e: |
|
1113 | except error.RepoError as e: | |
1110 | repo.ui.warn(('%d: %s\n') % (rev, e)) |
|
1114 | repo.ui.warn(('%d: %s\n') % (rev, e)) | |
1111 | except Exception: |
|
1115 | except Exception: | |
1112 | repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') % |
|
1116 | repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') % | |
1113 | node.short(ctx.node())) |
|
1117 | node.short(ctx.node())) | |
1114 |
|
1118 | |||
1115 | return ret |
|
1119 | return ret | |
1116 |
|
1120 | |||
1117 | def remoteui(src, opts): |
|
1121 | def remoteui(src, opts): | |
1118 | 'build a remote ui from ui or repo and opts' |
|
1122 | 'build a remote ui from ui or repo and opts' | |
1119 | if util.safehasattr(src, 'baseui'): # looks like a repository |
|
1123 | if util.safehasattr(src, 'baseui'): # looks like a repository | |
1120 | dst = src.baseui.copy() # drop repo-specific config |
|
1124 | dst = src.baseui.copy() # drop repo-specific config | |
1121 | src = src.ui # copy target options from repo |
|
1125 | src = src.ui # copy target options from repo | |
1122 | else: # assume it's a global ui object |
|
1126 | else: # assume it's a global ui object | |
1123 | dst = src.copy() # keep all global options |
|
1127 | dst = src.copy() # keep all global options | |
1124 |
|
1128 | |||
1125 | # copy ssh-specific options |
|
1129 | # copy ssh-specific options | |
1126 | for o in 'ssh', 'remotecmd': |
|
1130 | for o in 'ssh', 'remotecmd': | |
1127 | v = opts.get(o) or src.config('ui', o) |
|
1131 | v = opts.get(o) or src.config('ui', o) | |
1128 | if v: |
|
1132 | if v: | |
1129 | dst.setconfig("ui", o, v, 'copied') |
|
1133 | dst.setconfig("ui", o, v, 'copied') | |
1130 |
|
1134 | |||
1131 | # copy bundle-specific options |
|
1135 | # copy bundle-specific options | |
1132 | r = src.config('bundle', 'mainreporoot') |
|
1136 | r = src.config('bundle', 'mainreporoot') | |
1133 | if r: |
|
1137 | if r: | |
1134 | dst.setconfig('bundle', 'mainreporoot', r, 'copied') |
|
1138 | dst.setconfig('bundle', 'mainreporoot', r, 'copied') | |
1135 |
|
1139 | |||
1136 | # copy selected local settings to the remote ui |
|
1140 | # copy selected local settings to the remote ui | |
1137 | for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'): |
|
1141 | for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'): | |
1138 | for key, val in src.configitems(sect): |
|
1142 | for key, val in src.configitems(sect): | |
1139 | dst.setconfig(sect, key, val, 'copied') |
|
1143 | dst.setconfig(sect, key, val, 'copied') | |
1140 | v = src.config('web', 'cacerts') |
|
1144 | v = src.config('web', 'cacerts') | |
1141 | if v: |
|
1145 | if v: | |
1142 | dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied') |
|
1146 | dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied') | |
1143 |
|
1147 | |||
1144 | return dst |
|
1148 | return dst | |
1145 |
|
1149 | |||
1146 | # Files of interest |
|
1150 | # Files of interest | |
1147 | # Used to check if the repository has changed looking at mtime and size of |
|
1151 | # Used to check if the repository has changed looking at mtime and size of | |
1148 | # these files. |
|
1152 | # these files. | |
1149 | foi = [('spath', '00changelog.i'), |
|
1153 | foi = [('spath', '00changelog.i'), | |
1150 | ('spath', 'phaseroots'), # ! phase can change content at the same size |
|
1154 | ('spath', 'phaseroots'), # ! phase can change content at the same size | |
1151 | ('spath', 'obsstore'), |
|
1155 | ('spath', 'obsstore'), | |
1152 | ('path', 'bookmarks'), # ! bookmark can change content at the same size |
|
1156 | ('path', 'bookmarks'), # ! bookmark can change content at the same size | |
1153 | ] |
|
1157 | ] | |
1154 |
|
1158 | |||
1155 | class cachedlocalrepo(object): |
|
1159 | class cachedlocalrepo(object): | |
1156 | """Holds a localrepository that can be cached and reused.""" |
|
1160 | """Holds a localrepository that can be cached and reused.""" | |
1157 |
|
1161 | |||
1158 | def __init__(self, repo): |
|
1162 | def __init__(self, repo): | |
1159 | """Create a new cached repo from an existing repo. |
|
1163 | """Create a new cached repo from an existing repo. | |
1160 |
|
1164 | |||
1161 | We assume the passed in repo was recently created. If the |
|
1165 | We assume the passed in repo was recently created. If the | |
1162 | repo has changed between when it was created and when it was |
|
1166 | repo has changed between when it was created and when it was | |
1163 | turned into a cache, it may not refresh properly. |
|
1167 | turned into a cache, it may not refresh properly. | |
1164 | """ |
|
1168 | """ | |
1165 | assert isinstance(repo, localrepo.localrepository) |
|
1169 | assert isinstance(repo, localrepo.localrepository) | |
1166 | self._repo = repo |
|
1170 | self._repo = repo | |
1167 | self._state, self.mtime = self._repostate() |
|
1171 | self._state, self.mtime = self._repostate() | |
1168 | self._filtername = repo.filtername |
|
1172 | self._filtername = repo.filtername | |
1169 |
|
1173 | |||
1170 | def fetch(self): |
|
1174 | def fetch(self): | |
1171 | """Refresh (if necessary) and return a repository. |
|
1175 | """Refresh (if necessary) and return a repository. | |
1172 |
|
1176 | |||
1173 | If the cached instance is out of date, it will be recreated |
|
1177 | If the cached instance is out of date, it will be recreated | |
1174 | automatically and returned. |
|
1178 | automatically and returned. | |
1175 |
|
1179 | |||
1176 | Returns a tuple of the repo and a boolean indicating whether a new |
|
1180 | Returns a tuple of the repo and a boolean indicating whether a new | |
1177 | repo instance was created. |
|
1181 | repo instance was created. | |
1178 | """ |
|
1182 | """ | |
1179 | # We compare the mtimes and sizes of some well-known files to |
|
1183 | # We compare the mtimes and sizes of some well-known files to | |
1180 | # determine if the repo changed. This is not precise, as mtimes |
|
1184 | # determine if the repo changed. This is not precise, as mtimes | |
1181 | # are susceptible to clock skew and imprecise filesystems and |
|
1185 | # are susceptible to clock skew and imprecise filesystems and | |
1182 | # file content can change while maintaining the same size. |
|
1186 | # file content can change while maintaining the same size. | |
1183 |
|
1187 | |||
1184 | state, mtime = self._repostate() |
|
1188 | state, mtime = self._repostate() | |
1185 | if state == self._state: |
|
1189 | if state == self._state: | |
1186 | return self._repo, False |
|
1190 | return self._repo, False | |
1187 |
|
1191 | |||
1188 | repo = repository(self._repo.baseui, self._repo.url()) |
|
1192 | repo = repository(self._repo.baseui, self._repo.url()) | |
1189 | if self._filtername: |
|
1193 | if self._filtername: | |
1190 | self._repo = repo.filtered(self._filtername) |
|
1194 | self._repo = repo.filtered(self._filtername) | |
1191 | else: |
|
1195 | else: | |
1192 | self._repo = repo.unfiltered() |
|
1196 | self._repo = repo.unfiltered() | |
1193 | self._state = state |
|
1197 | self._state = state | |
1194 | self.mtime = mtime |
|
1198 | self.mtime = mtime | |
1195 |
|
1199 | |||
1196 | return self._repo, True |
|
1200 | return self._repo, True | |
1197 |
|
1201 | |||
1198 | def _repostate(self): |
|
1202 | def _repostate(self): | |
1199 | state = [] |
|
1203 | state = [] | |
1200 | maxmtime = -1 |
|
1204 | maxmtime = -1 | |
1201 | for attr, fname in foi: |
|
1205 | for attr, fname in foi: | |
1202 | prefix = getattr(self._repo, attr) |
|
1206 | prefix = getattr(self._repo, attr) | |
1203 | p = os.path.join(prefix, fname) |
|
1207 | p = os.path.join(prefix, fname) | |
1204 | try: |
|
1208 | try: | |
1205 | st = os.stat(p) |
|
1209 | st = os.stat(p) | |
1206 | except OSError: |
|
1210 | except OSError: | |
1207 | st = os.stat(prefix) |
|
1211 | st = os.stat(prefix) | |
1208 | state.append((st[stat.ST_MTIME], st.st_size)) |
|
1212 | state.append((st[stat.ST_MTIME], st.st_size)) | |
1209 | maxmtime = max(maxmtime, st[stat.ST_MTIME]) |
|
1213 | maxmtime = max(maxmtime, st[stat.ST_MTIME]) | |
1210 |
|
1214 | |||
1211 | return tuple(state), maxmtime |
|
1215 | return tuple(state), maxmtime | |
1212 |
|
1216 | |||
1213 | def copy(self): |
|
1217 | def copy(self): | |
1214 | """Obtain a copy of this class instance. |
|
1218 | """Obtain a copy of this class instance. | |
1215 |
|
1219 | |||
1216 | A new localrepository instance is obtained. The new instance should be |
|
1220 | A new localrepository instance is obtained. The new instance should be | |
1217 | completely independent of the original. |
|
1221 | completely independent of the original. | |
1218 | """ |
|
1222 | """ | |
1219 | repo = repository(self._repo.baseui, self._repo.origroot) |
|
1223 | repo = repository(self._repo.baseui, self._repo.origroot) | |
1220 | if self._filtername: |
|
1224 | if self._filtername: | |
1221 | repo = repo.filtered(self._filtername) |
|
1225 | repo = repo.filtered(self._filtername) | |
1222 | else: |
|
1226 | else: | |
1223 | repo = repo.unfiltered() |
|
1227 | repo = repo.unfiltered() | |
1224 | c = cachedlocalrepo(repo) |
|
1228 | c = cachedlocalrepo(repo) | |
1225 | c._state = self._state |
|
1229 | c._state = self._state | |
1226 | c.mtime = self.mtime |
|
1230 | c.mtime = self.mtime | |
1227 | return c |
|
1231 | return c |
General Comments 0
You need to be logged in to leave comments.
Login now