Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,1166 +1,1165 b'' | |||||
1 | # absorb.py |
|
1 | # absorb.py | |
2 | # |
|
2 | # | |
3 | # Copyright 2016 Facebook, Inc. |
|
3 | # Copyright 2016 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | """apply working directory changes to changesets (EXPERIMENTAL) |
|
8 | """apply working directory changes to changesets (EXPERIMENTAL) | |
9 |
|
9 | |||
10 | The absorb extension provides a command to use annotate information to |
|
10 | The absorb extension provides a command to use annotate information to | |
11 | amend modified chunks into the corresponding non-public changesets. |
|
11 | amend modified chunks into the corresponding non-public changesets. | |
12 |
|
12 | |||
13 | :: |
|
13 | :: | |
14 |
|
14 | |||
15 | [absorb] |
|
15 | [absorb] | |
16 | # only check 50 recent non-public changesets at most |
|
16 | # only check 50 recent non-public changesets at most | |
17 | max-stack-size = 50 |
|
17 | max-stack-size = 50 | |
18 | # whether to add noise to new commits to avoid obsolescence cycle |
|
18 | # whether to add noise to new commits to avoid obsolescence cycle | |
19 | add-noise = 1 |
|
19 | add-noise = 1 | |
20 | # make `amend --correlated` a shortcut to the main command |
|
20 | # make `amend --correlated` a shortcut to the main command | |
21 | amend-flag = correlated |
|
21 | amend-flag = correlated | |
22 |
|
22 | |||
23 | [color] |
|
23 | [color] | |
24 | absorb.description = yellow |
|
24 | absorb.description = yellow | |
25 | absorb.node = blue bold |
|
25 | absorb.node = blue bold | |
26 | absorb.path = bold |
|
26 | absorb.path = bold | |
27 | """ |
|
27 | """ | |
28 |
|
28 | |||
29 | # TODO: |
|
29 | # TODO: | |
30 | # * Rename config items to [commands] namespace |
|
30 | # * Rename config items to [commands] namespace | |
31 | # * Converge getdraftstack() with other code in core |
|
31 | # * Converge getdraftstack() with other code in core | |
32 | # * move many attributes on fixupstate to be private |
|
32 | # * move many attributes on fixupstate to be private | |
33 |
|
33 | |||
34 | from __future__ import absolute_import |
|
34 | from __future__ import absolute_import | |
35 |
|
35 | |||
36 | import collections |
|
36 | import collections | |
37 |
|
37 | |||
38 | from mercurial.i18n import _ |
|
38 | from mercurial.i18n import _ | |
39 | from mercurial.node import ( |
|
39 | from mercurial.node import ( | |
40 | hex, |
|
40 | hex, | |
41 | nullid, |
|
|||
42 | short, |
|
41 | short, | |
43 | ) |
|
42 | ) | |
44 | from mercurial import ( |
|
43 | from mercurial import ( | |
45 | cmdutil, |
|
44 | cmdutil, | |
46 | commands, |
|
45 | commands, | |
47 | context, |
|
46 | context, | |
48 | crecord, |
|
47 | crecord, | |
49 | error, |
|
48 | error, | |
50 | linelog, |
|
49 | linelog, | |
51 | mdiff, |
|
50 | mdiff, | |
52 | obsolete, |
|
51 | obsolete, | |
53 | patch, |
|
52 | patch, | |
54 | phases, |
|
53 | phases, | |
55 | pycompat, |
|
54 | pycompat, | |
56 | registrar, |
|
55 | registrar, | |
57 | rewriteutil, |
|
56 | rewriteutil, | |
58 | scmutil, |
|
57 | scmutil, | |
59 | util, |
|
58 | util, | |
60 | ) |
|
59 | ) | |
61 | from mercurial.utils import stringutil |
|
60 | from mercurial.utils import stringutil | |
62 |
|
61 | |||
63 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
62 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
64 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
63 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
65 | # be specifying the version(s) of Mercurial they are tested with, or |
|
64 | # be specifying the version(s) of Mercurial they are tested with, or | |
66 | # leave the attribute unspecified. |
|
65 | # leave the attribute unspecified. | |
67 | testedwith = b'ships-with-hg-core' |
|
66 | testedwith = b'ships-with-hg-core' | |
68 |
|
67 | |||
69 | cmdtable = {} |
|
68 | cmdtable = {} | |
70 | command = registrar.command(cmdtable) |
|
69 | command = registrar.command(cmdtable) | |
71 |
|
70 | |||
72 | configtable = {} |
|
71 | configtable = {} | |
73 | configitem = registrar.configitem(configtable) |
|
72 | configitem = registrar.configitem(configtable) | |
74 |
|
73 | |||
75 | configitem(b'absorb', b'add-noise', default=True) |
|
74 | configitem(b'absorb', b'add-noise', default=True) | |
76 | configitem(b'absorb', b'amend-flag', default=None) |
|
75 | configitem(b'absorb', b'amend-flag', default=None) | |
77 | configitem(b'absorb', b'max-stack-size', default=50) |
|
76 | configitem(b'absorb', b'max-stack-size', default=50) | |
78 |
|
77 | |||
79 | colortable = { |
|
78 | colortable = { | |
80 | b'absorb.description': b'yellow', |
|
79 | b'absorb.description': b'yellow', | |
81 | b'absorb.node': b'blue bold', |
|
80 | b'absorb.node': b'blue bold', | |
82 | b'absorb.path': b'bold', |
|
81 | b'absorb.path': b'bold', | |
83 | } |
|
82 | } | |
84 |
|
83 | |||
85 | defaultdict = collections.defaultdict |
|
84 | defaultdict = collections.defaultdict | |
86 |
|
85 | |||
87 |
|
86 | |||
88 | class nullui(object): |
|
87 | class nullui(object): | |
89 | """blank ui object doing nothing""" |
|
88 | """blank ui object doing nothing""" | |
90 |
|
89 | |||
91 | debugflag = False |
|
90 | debugflag = False | |
92 | verbose = False |
|
91 | verbose = False | |
93 | quiet = True |
|
92 | quiet = True | |
94 |
|
93 | |||
95 | def __getitem__(name): |
|
94 | def __getitem__(name): | |
96 | def nullfunc(*args, **kwds): |
|
95 | def nullfunc(*args, **kwds): | |
97 | return |
|
96 | return | |
98 |
|
97 | |||
99 | return nullfunc |
|
98 | return nullfunc | |
100 |
|
99 | |||
101 |
|
100 | |||
102 | class emptyfilecontext(object): |
|
101 | class emptyfilecontext(object): | |
103 | """minimal filecontext representing an empty file""" |
|
102 | """minimal filecontext representing an empty file""" | |
104 |
|
103 | |||
105 | def __init__(self, repo): |
|
104 | def __init__(self, repo): | |
106 | self._repo = repo |
|
105 | self._repo = repo | |
107 |
|
106 | |||
108 | def data(self): |
|
107 | def data(self): | |
109 | return b'' |
|
108 | return b'' | |
110 |
|
109 | |||
111 | def node(self): |
|
110 | def node(self): | |
112 | return nullid |
|
111 | return self._repo.nullid | |
113 |
|
112 | |||
114 |
|
113 | |||
115 | def uniq(lst): |
|
114 | def uniq(lst): | |
116 | """list -> list. remove duplicated items without changing the order""" |
|
115 | """list -> list. remove duplicated items without changing the order""" | |
117 | seen = set() |
|
116 | seen = set() | |
118 | result = [] |
|
117 | result = [] | |
119 | for x in lst: |
|
118 | for x in lst: | |
120 | if x not in seen: |
|
119 | if x not in seen: | |
121 | seen.add(x) |
|
120 | seen.add(x) | |
122 | result.append(x) |
|
121 | result.append(x) | |
123 | return result |
|
122 | return result | |
124 |
|
123 | |||
125 |
|
124 | |||
126 | def getdraftstack(headctx, limit=None): |
|
125 | def getdraftstack(headctx, limit=None): | |
127 | """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets. |
|
126 | """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets. | |
128 |
|
127 | |||
129 | changesets are sorted in topo order, oldest first. |
|
128 | changesets are sorted in topo order, oldest first. | |
130 | return at most limit items, if limit is a positive number. |
|
129 | return at most limit items, if limit is a positive number. | |
131 |
|
130 | |||
132 | merges are considered as non-draft as well. i.e. every commit |
|
131 | merges are considered as non-draft as well. i.e. every commit | |
133 | returned has and only has 1 parent. |
|
132 | returned has and only has 1 parent. | |
134 | """ |
|
133 | """ | |
135 | ctx = headctx |
|
134 | ctx = headctx | |
136 | result = [] |
|
135 | result = [] | |
137 | while ctx.phase() != phases.public: |
|
136 | while ctx.phase() != phases.public: | |
138 | if limit and len(result) >= limit: |
|
137 | if limit and len(result) >= limit: | |
139 | break |
|
138 | break | |
140 | parents = ctx.parents() |
|
139 | parents = ctx.parents() | |
141 | if len(parents) != 1: |
|
140 | if len(parents) != 1: | |
142 | break |
|
141 | break | |
143 | result.append(ctx) |
|
142 | result.append(ctx) | |
144 | ctx = parents[0] |
|
143 | ctx = parents[0] | |
145 | result.reverse() |
|
144 | result.reverse() | |
146 | return result |
|
145 | return result | |
147 |
|
146 | |||
148 |
|
147 | |||
149 | def getfilestack(stack, path, seenfctxs=None): |
|
148 | def getfilestack(stack, path, seenfctxs=None): | |
150 | """([ctx], str, set) -> [fctx], {ctx: fctx} |
|
149 | """([ctx], str, set) -> [fctx], {ctx: fctx} | |
151 |
|
150 | |||
152 | stack is a list of contexts, from old to new. usually they are what |
|
151 | stack is a list of contexts, from old to new. usually they are what | |
153 | "getdraftstack" returns. |
|
152 | "getdraftstack" returns. | |
154 |
|
153 | |||
155 | follows renames, but not copies. |
|
154 | follows renames, but not copies. | |
156 |
|
155 | |||
157 | seenfctxs is a set of filecontexts that will be considered "immutable". |
|
156 | seenfctxs is a set of filecontexts that will be considered "immutable". | |
158 | they are usually what this function returned in earlier calls, useful |
|
157 | they are usually what this function returned in earlier calls, useful | |
159 | to avoid issues that a file was "moved" to multiple places and was then |
|
158 | to avoid issues that a file was "moved" to multiple places and was then | |
160 | modified differently, like: "a" was copied to "b", "a" was also copied to |
|
159 | modified differently, like: "a" was copied to "b", "a" was also copied to | |
161 | "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a" |
|
160 | "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a" | |
162 | and we enforce only one of them to be able to affect "a"'s content. |
|
161 | and we enforce only one of them to be able to affect "a"'s content. | |
163 |
|
162 | |||
164 | return an empty list and an empty dict, if the specified path does not |
|
163 | return an empty list and an empty dict, if the specified path does not | |
165 | exist in stack[-1] (the top of the stack). |
|
164 | exist in stack[-1] (the top of the stack). | |
166 |
|
165 | |||
167 | otherwise, return a list of de-duplicated filecontexts, and the map to |
|
166 | otherwise, return a list of de-duplicated filecontexts, and the map to | |
168 | convert ctx in the stack to fctx, for possible mutable fctxs. the first item |
|
167 | convert ctx in the stack to fctx, for possible mutable fctxs. the first item | |
169 | of the list would be outside the stack and should be considered immutable. |
|
168 | of the list would be outside the stack and should be considered immutable. | |
170 | the remaining items are within the stack. |
|
169 | the remaining items are within the stack. | |
171 |
|
170 | |||
172 | for example, given the following changelog and corresponding filelog |
|
171 | for example, given the following changelog and corresponding filelog | |
173 | revisions: |
|
172 | revisions: | |
174 |
|
173 | |||
175 | changelog: 3----4----5----6----7 |
|
174 | changelog: 3----4----5----6----7 | |
176 | filelog: x 0----1----1----2 (x: no such file yet) |
|
175 | filelog: x 0----1----1----2 (x: no such file yet) | |
177 |
|
176 | |||
178 | - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2}) |
|
177 | - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2}) | |
179 | - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a |
|
178 | - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a | |
180 | dummy empty filecontext. |
|
179 | dummy empty filecontext. | |
181 | - if stack = [2], returns ([], {}) |
|
180 | - if stack = [2], returns ([], {}) | |
182 | - if stack = [7], returns ([1, 2], {7: 2}) |
|
181 | - if stack = [7], returns ([1, 2], {7: 2}) | |
183 | - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be |
|
182 | - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be | |
184 | removed, since 1 is immutable. |
|
183 | removed, since 1 is immutable. | |
185 | """ |
|
184 | """ | |
186 | if seenfctxs is None: |
|
185 | if seenfctxs is None: | |
187 | seenfctxs = set() |
|
186 | seenfctxs = set() | |
188 | assert stack |
|
187 | assert stack | |
189 |
|
188 | |||
190 | if path not in stack[-1]: |
|
189 | if path not in stack[-1]: | |
191 | return [], {} |
|
190 | return [], {} | |
192 |
|
191 | |||
193 | fctxs = [] |
|
192 | fctxs = [] | |
194 | fctxmap = {} |
|
193 | fctxmap = {} | |
195 |
|
194 | |||
196 | pctx = stack[0].p1() # the public (immutable) ctx we stop at |
|
195 | pctx = stack[0].p1() # the public (immutable) ctx we stop at | |
197 | for ctx in reversed(stack): |
|
196 | for ctx in reversed(stack): | |
198 | if path not in ctx: # the file is added in the next commit |
|
197 | if path not in ctx: # the file is added in the next commit | |
199 | pctx = ctx |
|
198 | pctx = ctx | |
200 | break |
|
199 | break | |
201 | fctx = ctx[path] |
|
200 | fctx = ctx[path] | |
202 | fctxs.append(fctx) |
|
201 | fctxs.append(fctx) | |
203 | if fctx in seenfctxs: # treat fctx as the immutable one |
|
202 | if fctx in seenfctxs: # treat fctx as the immutable one | |
204 | pctx = None # do not add another immutable fctx |
|
203 | pctx = None # do not add another immutable fctx | |
205 | break |
|
204 | break | |
206 | fctxmap[ctx] = fctx # only for mutable fctxs |
|
205 | fctxmap[ctx] = fctx # only for mutable fctxs | |
207 | copy = fctx.copysource() |
|
206 | copy = fctx.copysource() | |
208 | if copy: |
|
207 | if copy: | |
209 | path = copy # follow rename |
|
208 | path = copy # follow rename | |
210 | if path in ctx: # but do not follow copy |
|
209 | if path in ctx: # but do not follow copy | |
211 | pctx = ctx.p1() |
|
210 | pctx = ctx.p1() | |
212 | break |
|
211 | break | |
213 |
|
212 | |||
214 | if pctx is not None: # need an extra immutable fctx |
|
213 | if pctx is not None: # need an extra immutable fctx | |
215 | if path in pctx: |
|
214 | if path in pctx: | |
216 | fctxs.append(pctx[path]) |
|
215 | fctxs.append(pctx[path]) | |
217 | else: |
|
216 | else: | |
218 | fctxs.append(emptyfilecontext(pctx.repo())) |
|
217 | fctxs.append(emptyfilecontext(pctx.repo())) | |
219 |
|
218 | |||
220 | fctxs.reverse() |
|
219 | fctxs.reverse() | |
221 | # note: we rely on a property of hg: filerev is not reused for linear |
|
220 | # note: we rely on a property of hg: filerev is not reused for linear | |
222 | # history. i.e. it's impossible to have: |
|
221 | # history. i.e. it's impossible to have: | |
223 | # changelog: 4----5----6 (linear, no merges) |
|
222 | # changelog: 4----5----6 (linear, no merges) | |
224 | # filelog: 1----2----1 |
|
223 | # filelog: 1----2----1 | |
225 | # ^ reuse filerev (impossible) |
|
224 | # ^ reuse filerev (impossible) | |
226 | # because parents are part of the hash. if that's not true, we need to |
|
225 | # because parents are part of the hash. if that's not true, we need to | |
227 | # remove uniq and find a different way to identify fctxs. |
|
226 | # remove uniq and find a different way to identify fctxs. | |
228 | return uniq(fctxs), fctxmap |
|
227 | return uniq(fctxs), fctxmap | |
229 |
|
228 | |||
230 |
|
229 | |||
231 | class overlaystore(patch.filestore): |
|
230 | class overlaystore(patch.filestore): | |
232 | """read-only, hybrid store based on a dict and ctx. |
|
231 | """read-only, hybrid store based on a dict and ctx. | |
233 | memworkingcopy: {path: content}, overrides file contents. |
|
232 | memworkingcopy: {path: content}, overrides file contents. | |
234 | """ |
|
233 | """ | |
235 |
|
234 | |||
236 | def __init__(self, basectx, memworkingcopy): |
|
235 | def __init__(self, basectx, memworkingcopy): | |
237 | self.basectx = basectx |
|
236 | self.basectx = basectx | |
238 | self.memworkingcopy = memworkingcopy |
|
237 | self.memworkingcopy = memworkingcopy | |
239 |
|
238 | |||
240 | def getfile(self, path): |
|
239 | def getfile(self, path): | |
241 | """comply with mercurial.patch.filestore.getfile""" |
|
240 | """comply with mercurial.patch.filestore.getfile""" | |
242 | if path not in self.basectx: |
|
241 | if path not in self.basectx: | |
243 | return None, None, None |
|
242 | return None, None, None | |
244 | fctx = self.basectx[path] |
|
243 | fctx = self.basectx[path] | |
245 | if path in self.memworkingcopy: |
|
244 | if path in self.memworkingcopy: | |
246 | content = self.memworkingcopy[path] |
|
245 | content = self.memworkingcopy[path] | |
247 | else: |
|
246 | else: | |
248 | content = fctx.data() |
|
247 | content = fctx.data() | |
249 | mode = (fctx.islink(), fctx.isexec()) |
|
248 | mode = (fctx.islink(), fctx.isexec()) | |
250 | copy = fctx.copysource() |
|
249 | copy = fctx.copysource() | |
251 | return content, mode, copy |
|
250 | return content, mode, copy | |
252 |
|
251 | |||
253 |
|
252 | |||
254 | def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None): |
|
253 | def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None): | |
255 | """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx |
|
254 | """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx | |
256 | memworkingcopy overrides file contents. |
|
255 | memworkingcopy overrides file contents. | |
257 | """ |
|
256 | """ | |
258 | # parents must contain 2 items: (node1, node2) |
|
257 | # parents must contain 2 items: (node1, node2) | |
259 | if parents is None: |
|
258 | if parents is None: | |
260 | parents = ctx.repo().changelog.parents(ctx.node()) |
|
259 | parents = ctx.repo().changelog.parents(ctx.node()) | |
261 | if extra is None: |
|
260 | if extra is None: | |
262 | extra = ctx.extra() |
|
261 | extra = ctx.extra() | |
263 | if desc is None: |
|
262 | if desc is None: | |
264 | desc = ctx.description() |
|
263 | desc = ctx.description() | |
265 | date = ctx.date() |
|
264 | date = ctx.date() | |
266 | user = ctx.user() |
|
265 | user = ctx.user() | |
267 | files = set(ctx.files()).union(memworkingcopy) |
|
266 | files = set(ctx.files()).union(memworkingcopy) | |
268 | store = overlaystore(ctx, memworkingcopy) |
|
267 | store = overlaystore(ctx, memworkingcopy) | |
269 | return context.memctx( |
|
268 | return context.memctx( | |
270 | repo=ctx.repo(), |
|
269 | repo=ctx.repo(), | |
271 | parents=parents, |
|
270 | parents=parents, | |
272 | text=desc, |
|
271 | text=desc, | |
273 | files=files, |
|
272 | files=files, | |
274 | filectxfn=store, |
|
273 | filectxfn=store, | |
275 | user=user, |
|
274 | user=user, | |
276 | date=date, |
|
275 | date=date, | |
277 | branch=None, |
|
276 | branch=None, | |
278 | extra=extra, |
|
277 | extra=extra, | |
279 | ) |
|
278 | ) | |
280 |
|
279 | |||
281 |
|
280 | |||
282 | class filefixupstate(object): |
|
281 | class filefixupstate(object): | |
283 | """state needed to apply fixups to a single file |
|
282 | """state needed to apply fixups to a single file | |
284 |
|
283 | |||
285 | internally, it keeps file contents of several revisions and a linelog. |
|
284 | internally, it keeps file contents of several revisions and a linelog. | |
286 |
|
285 | |||
287 | the linelog uses odd revision numbers for original contents (fctxs passed |
|
286 | the linelog uses odd revision numbers for original contents (fctxs passed | |
288 | to __init__), and even revision numbers for fixups, like: |
|
287 | to __init__), and even revision numbers for fixups, like: | |
289 |
|
288 | |||
290 | linelog rev 1: self.fctxs[0] (from an immutable "public" changeset) |
|
289 | linelog rev 1: self.fctxs[0] (from an immutable "public" changeset) | |
291 | linelog rev 2: fixups made to self.fctxs[0] |
|
290 | linelog rev 2: fixups made to self.fctxs[0] | |
292 | linelog rev 3: self.fctxs[1] (a child of fctxs[0]) |
|
291 | linelog rev 3: self.fctxs[1] (a child of fctxs[0]) | |
293 | linelog rev 4: fixups made to self.fctxs[1] |
|
292 | linelog rev 4: fixups made to self.fctxs[1] | |
294 | ... |
|
293 | ... | |
295 |
|
294 | |||
296 | a typical use is like: |
|
295 | a typical use is like: | |
297 |
|
296 | |||
298 | 1. call diffwith, to calculate self.fixups |
|
297 | 1. call diffwith, to calculate self.fixups | |
299 | 2. (optionally), present self.fixups to the user, or change it |
|
298 | 2. (optionally), present self.fixups to the user, or change it | |
300 | 3. call apply, to apply changes |
|
299 | 3. call apply, to apply changes | |
301 | 4. read results from "finalcontents", or call getfinalcontent |
|
300 | 4. read results from "finalcontents", or call getfinalcontent | |
302 | """ |
|
301 | """ | |
303 |
|
302 | |||
304 | def __init__(self, fctxs, path, ui=None, opts=None): |
|
303 | def __init__(self, fctxs, path, ui=None, opts=None): | |
305 | """([fctx], ui or None) -> None |
|
304 | """([fctx], ui or None) -> None | |
306 |
|
305 | |||
307 | fctxs should be linear, and sorted by topo order - oldest first. |
|
306 | fctxs should be linear, and sorted by topo order - oldest first. | |
308 | fctxs[0] will be considered as "immutable" and will not be changed. |
|
307 | fctxs[0] will be considered as "immutable" and will not be changed. | |
309 | """ |
|
308 | """ | |
310 | self.fctxs = fctxs |
|
309 | self.fctxs = fctxs | |
311 | self.path = path |
|
310 | self.path = path | |
312 | self.ui = ui or nullui() |
|
311 | self.ui = ui or nullui() | |
313 | self.opts = opts or {} |
|
312 | self.opts = opts or {} | |
314 |
|
313 | |||
315 | # following fields are built from fctxs. they exist for perf reason |
|
314 | # following fields are built from fctxs. they exist for perf reason | |
316 | self.contents = [f.data() for f in fctxs] |
|
315 | self.contents = [f.data() for f in fctxs] | |
317 | self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents) |
|
316 | self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents) | |
318 | self.linelog = self._buildlinelog() |
|
317 | self.linelog = self._buildlinelog() | |
319 | if self.ui.debugflag: |
|
318 | if self.ui.debugflag: | |
320 | assert self._checkoutlinelog() == self.contents |
|
319 | assert self._checkoutlinelog() == self.contents | |
321 |
|
320 | |||
322 | # following fields will be filled later |
|
321 | # following fields will be filled later | |
323 | self.chunkstats = [0, 0] # [adopted, total : int] |
|
322 | self.chunkstats = [0, 0] # [adopted, total : int] | |
324 | self.targetlines = [] # [str] |
|
323 | self.targetlines = [] # [str] | |
325 | self.fixups = [] # [(linelog rev, a1, a2, b1, b2)] |
|
324 | self.fixups = [] # [(linelog rev, a1, a2, b1, b2)] | |
326 | self.finalcontents = [] # [str] |
|
325 | self.finalcontents = [] # [str] | |
327 | self.ctxaffected = set() |
|
326 | self.ctxaffected = set() | |
328 |
|
327 | |||
329 | def diffwith(self, targetfctx, fm=None): |
|
328 | def diffwith(self, targetfctx, fm=None): | |
330 | """calculate fixups needed by examining the differences between |
|
329 | """calculate fixups needed by examining the differences between | |
331 | self.fctxs[-1] and targetfctx, chunk by chunk. |
|
330 | self.fctxs[-1] and targetfctx, chunk by chunk. | |
332 |
|
331 | |||
333 | targetfctx is the target state we move towards. we may or may not be |
|
332 | targetfctx is the target state we move towards. we may or may not be | |
334 | able to get there because not all modified chunks can be amended into |
|
333 | able to get there because not all modified chunks can be amended into | |
335 | a non-public fctx unambiguously. |
|
334 | a non-public fctx unambiguously. | |
336 |
|
335 | |||
337 | call this only once, before apply(). |
|
336 | call this only once, before apply(). | |
338 |
|
337 | |||
339 | update self.fixups, self.chunkstats, and self.targetlines. |
|
338 | update self.fixups, self.chunkstats, and self.targetlines. | |
340 | """ |
|
339 | """ | |
341 | a = self.contents[-1] |
|
340 | a = self.contents[-1] | |
342 | alines = self.contentlines[-1] |
|
341 | alines = self.contentlines[-1] | |
343 | b = targetfctx.data() |
|
342 | b = targetfctx.data() | |
344 | blines = mdiff.splitnewlines(b) |
|
343 | blines = mdiff.splitnewlines(b) | |
345 | self.targetlines = blines |
|
344 | self.targetlines = blines | |
346 |
|
345 | |||
347 | self.linelog.annotate(self.linelog.maxrev) |
|
346 | self.linelog.annotate(self.linelog.maxrev) | |
348 | annotated = self.linelog.annotateresult # [(linelog rev, linenum)] |
|
347 | annotated = self.linelog.annotateresult # [(linelog rev, linenum)] | |
349 | assert len(annotated) == len(alines) |
|
348 | assert len(annotated) == len(alines) | |
350 | # add a dummy end line to make insertion at the end easier |
|
349 | # add a dummy end line to make insertion at the end easier | |
351 | if annotated: |
|
350 | if annotated: | |
352 | dummyendline = (annotated[-1][0], annotated[-1][1] + 1) |
|
351 | dummyendline = (annotated[-1][0], annotated[-1][1] + 1) | |
353 | annotated.append(dummyendline) |
|
352 | annotated.append(dummyendline) | |
354 |
|
353 | |||
355 | # analyse diff blocks |
|
354 | # analyse diff blocks | |
356 | for chunk in self._alldiffchunks(a, b, alines, blines): |
|
355 | for chunk in self._alldiffchunks(a, b, alines, blines): | |
357 | newfixups = self._analysediffchunk(chunk, annotated) |
|
356 | newfixups = self._analysediffchunk(chunk, annotated) | |
358 | self.chunkstats[0] += bool(newfixups) # 1 or 0 |
|
357 | self.chunkstats[0] += bool(newfixups) # 1 or 0 | |
359 | self.chunkstats[1] += 1 |
|
358 | self.chunkstats[1] += 1 | |
360 | self.fixups += newfixups |
|
359 | self.fixups += newfixups | |
361 | if fm is not None: |
|
360 | if fm is not None: | |
362 | self._showchanges(fm, alines, blines, chunk, newfixups) |
|
361 | self._showchanges(fm, alines, blines, chunk, newfixups) | |
363 |
|
362 | |||
364 | def apply(self): |
|
363 | def apply(self): | |
365 | """apply self.fixups. update self.linelog, self.finalcontents. |
|
364 | """apply self.fixups. update self.linelog, self.finalcontents. | |
366 |
|
365 | |||
367 | call this only once, before getfinalcontent(), after diffwith(). |
|
366 | call this only once, before getfinalcontent(), after diffwith(). | |
368 | """ |
|
367 | """ | |
369 | # the following is unnecessary, as it's done by "diffwith": |
|
368 | # the following is unnecessary, as it's done by "diffwith": | |
370 | # self.linelog.annotate(self.linelog.maxrev) |
|
369 | # self.linelog.annotate(self.linelog.maxrev) | |
371 | for rev, a1, a2, b1, b2 in reversed(self.fixups): |
|
370 | for rev, a1, a2, b1, b2 in reversed(self.fixups): | |
372 | blines = self.targetlines[b1:b2] |
|
371 | blines = self.targetlines[b1:b2] | |
373 | if self.ui.debugflag: |
|
372 | if self.ui.debugflag: | |
374 | idx = (max(rev - 1, 0)) // 2 |
|
373 | idx = (max(rev - 1, 0)) // 2 | |
375 | self.ui.write( |
|
374 | self.ui.write( | |
376 | _(b'%s: chunk %d:%d -> %d lines\n') |
|
375 | _(b'%s: chunk %d:%d -> %d lines\n') | |
377 | % (short(self.fctxs[idx].node()), a1, a2, len(blines)) |
|
376 | % (short(self.fctxs[idx].node()), a1, a2, len(blines)) | |
378 | ) |
|
377 | ) | |
379 | self.linelog.replacelines(rev, a1, a2, b1, b2) |
|
378 | self.linelog.replacelines(rev, a1, a2, b1, b2) | |
380 | if self.opts.get(b'edit_lines', False): |
|
379 | if self.opts.get(b'edit_lines', False): | |
381 | self.finalcontents = self._checkoutlinelogwithedits() |
|
380 | self.finalcontents = self._checkoutlinelogwithedits() | |
382 | else: |
|
381 | else: | |
383 | self.finalcontents = self._checkoutlinelog() |
|
382 | self.finalcontents = self._checkoutlinelog() | |
384 |
|
383 | |||
385 | def getfinalcontent(self, fctx): |
|
384 | def getfinalcontent(self, fctx): | |
386 | """(fctx) -> str. get modified file content for a given filecontext""" |
|
385 | """(fctx) -> str. get modified file content for a given filecontext""" | |
387 | idx = self.fctxs.index(fctx) |
|
386 | idx = self.fctxs.index(fctx) | |
388 | return self.finalcontents[idx] |
|
387 | return self.finalcontents[idx] | |
389 |
|
388 | |||
390 | def _analysediffchunk(self, chunk, annotated): |
|
389 | def _analysediffchunk(self, chunk, annotated): | |
391 | """analyse a different chunk and return new fixups found |
|
390 | """analyse a different chunk and return new fixups found | |
392 |
|
391 | |||
393 | return [] if no lines from the chunk can be safely applied. |
|
392 | return [] if no lines from the chunk can be safely applied. | |
394 |
|
393 | |||
395 | the chunk (or lines) cannot be safely applied, if, for example: |
|
394 | the chunk (or lines) cannot be safely applied, if, for example: | |
396 | - the modified (deleted) lines belong to a public changeset |
|
395 | - the modified (deleted) lines belong to a public changeset | |
397 | (self.fctxs[0]) |
|
396 | (self.fctxs[0]) | |
398 | - the chunk is a pure insertion and the adjacent lines (at most 2 |
|
397 | - the chunk is a pure insertion and the adjacent lines (at most 2 | |
399 | lines) belong to different non-public changesets, or do not belong |
|
398 | lines) belong to different non-public changesets, or do not belong | |
400 | to any non-public changesets. |
|
399 | to any non-public changesets. | |
401 | - the chunk is modifying lines from different changesets. |
|
400 | - the chunk is modifying lines from different changesets. | |
402 | in this case, if the number of lines deleted equals to the number |
|
401 | in this case, if the number of lines deleted equals to the number | |
403 | of lines added, assume it's a simple 1:1 map (could be wrong). |
|
402 | of lines added, assume it's a simple 1:1 map (could be wrong). | |
404 | otherwise, give up. |
|
403 | otherwise, give up. | |
405 | - the chunk is modifying lines from a single non-public changeset, |
|
404 | - the chunk is modifying lines from a single non-public changeset, | |
406 | but other revisions touch the area as well. i.e. the lines are |
|
405 | but other revisions touch the area as well. i.e. the lines are | |
407 | not continuous as seen from the linelog. |
|
406 | not continuous as seen from the linelog. | |
408 | """ |
|
407 | """ | |
409 | a1, a2, b1, b2 = chunk |
|
408 | a1, a2, b1, b2 = chunk | |
410 | # find involved indexes from annotate result |
|
409 | # find involved indexes from annotate result | |
411 | involved = annotated[a1:a2] |
|
410 | involved = annotated[a1:a2] | |
412 | if not involved and annotated: # a1 == a2 and a is not empty |
|
411 | if not involved and annotated: # a1 == a2 and a is not empty | |
413 | # pure insertion, check nearby lines. ignore lines belong |
|
412 | # pure insertion, check nearby lines. ignore lines belong | |
414 | # to the public (first) changeset (i.e. annotated[i][0] == 1) |
|
413 | # to the public (first) changeset (i.e. annotated[i][0] == 1) | |
415 | nearbylinenums = {a2, max(0, a1 - 1)} |
|
414 | nearbylinenums = {a2, max(0, a1 - 1)} | |
416 | involved = [ |
|
415 | involved = [ | |
417 | annotated[i] for i in nearbylinenums if annotated[i][0] != 1 |
|
416 | annotated[i] for i in nearbylinenums if annotated[i][0] != 1 | |
418 | ] |
|
417 | ] | |
419 | involvedrevs = list({r for r, l in involved}) |
|
418 | involvedrevs = list({r for r, l in involved}) | |
420 | newfixups = [] |
|
419 | newfixups = [] | |
421 | if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True): |
|
420 | if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True): | |
422 | # chunk belongs to a single revision |
|
421 | # chunk belongs to a single revision | |
423 | rev = involvedrevs[0] |
|
422 | rev = involvedrevs[0] | |
424 | if rev > 1: |
|
423 | if rev > 1: | |
425 | fixuprev = rev + 1 |
|
424 | fixuprev = rev + 1 | |
426 | newfixups.append((fixuprev, a1, a2, b1, b2)) |
|
425 | newfixups.append((fixuprev, a1, a2, b1, b2)) | |
427 | elif a2 - a1 == b2 - b1 or b1 == b2: |
|
426 | elif a2 - a1 == b2 - b1 or b1 == b2: | |
428 | # 1:1 line mapping, or chunk was deleted |
|
427 | # 1:1 line mapping, or chunk was deleted | |
429 | for i in pycompat.xrange(a1, a2): |
|
428 | for i in pycompat.xrange(a1, a2): | |
430 | rev, linenum = annotated[i] |
|
429 | rev, linenum = annotated[i] | |
431 | if rev > 1: |
|
430 | if rev > 1: | |
432 | if b1 == b2: # deletion, simply remove that single line |
|
431 | if b1 == b2: # deletion, simply remove that single line | |
433 | nb1 = nb2 = 0 |
|
432 | nb1 = nb2 = 0 | |
434 | else: # 1:1 line mapping, change the corresponding rev |
|
433 | else: # 1:1 line mapping, change the corresponding rev | |
435 | nb1 = b1 + i - a1 |
|
434 | nb1 = b1 + i - a1 | |
436 | nb2 = nb1 + 1 |
|
435 | nb2 = nb1 + 1 | |
437 | fixuprev = rev + 1 |
|
436 | fixuprev = rev + 1 | |
438 | newfixups.append((fixuprev, i, i + 1, nb1, nb2)) |
|
437 | newfixups.append((fixuprev, i, i + 1, nb1, nb2)) | |
439 | return self._optimizefixups(newfixups) |
|
438 | return self._optimizefixups(newfixups) | |
440 |
|
439 | |||
441 | @staticmethod |
|
440 | @staticmethod | |
442 | def _alldiffchunks(a, b, alines, blines): |
|
441 | def _alldiffchunks(a, b, alines, blines): | |
443 | """like mdiff.allblocks, but only care about differences""" |
|
442 | """like mdiff.allblocks, but only care about differences""" | |
444 | blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines) |
|
443 | blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines) | |
445 | for chunk, btype in blocks: |
|
444 | for chunk, btype in blocks: | |
446 | if btype != b'!': |
|
445 | if btype != b'!': | |
447 | continue |
|
446 | continue | |
448 | yield chunk |
|
447 | yield chunk | |
449 |
|
448 | |||
450 | def _buildlinelog(self): |
|
449 | def _buildlinelog(self): | |
451 | """calculate the initial linelog based on self.content{,line}s. |
|
450 | """calculate the initial linelog based on self.content{,line}s. | |
452 | this is similar to running a partial "annotate". |
|
451 | this is similar to running a partial "annotate". | |
453 | """ |
|
452 | """ | |
454 | llog = linelog.linelog() |
|
453 | llog = linelog.linelog() | |
455 | a, alines = b'', [] |
|
454 | a, alines = b'', [] | |
456 | for i in pycompat.xrange(len(self.contents)): |
|
455 | for i in pycompat.xrange(len(self.contents)): | |
457 | b, blines = self.contents[i], self.contentlines[i] |
|
456 | b, blines = self.contents[i], self.contentlines[i] | |
458 | llrev = i * 2 + 1 |
|
457 | llrev = i * 2 + 1 | |
459 | chunks = self._alldiffchunks(a, b, alines, blines) |
|
458 | chunks = self._alldiffchunks(a, b, alines, blines) | |
460 | for a1, a2, b1, b2 in reversed(list(chunks)): |
|
459 | for a1, a2, b1, b2 in reversed(list(chunks)): | |
461 | llog.replacelines(llrev, a1, a2, b1, b2) |
|
460 | llog.replacelines(llrev, a1, a2, b1, b2) | |
462 | a, alines = b, blines |
|
461 | a, alines = b, blines | |
463 | return llog |
|
462 | return llog | |
464 |
|
463 | |||
465 | def _checkoutlinelog(self): |
|
464 | def _checkoutlinelog(self): | |
466 | """() -> [str]. check out file contents from linelog""" |
|
465 | """() -> [str]. check out file contents from linelog""" | |
467 | contents = [] |
|
466 | contents = [] | |
468 | for i in pycompat.xrange(len(self.contents)): |
|
467 | for i in pycompat.xrange(len(self.contents)): | |
469 | rev = (i + 1) * 2 |
|
468 | rev = (i + 1) * 2 | |
470 | self.linelog.annotate(rev) |
|
469 | self.linelog.annotate(rev) | |
471 | content = b''.join(map(self._getline, self.linelog.annotateresult)) |
|
470 | content = b''.join(map(self._getline, self.linelog.annotateresult)) | |
472 | contents.append(content) |
|
471 | contents.append(content) | |
473 | return contents |
|
472 | return contents | |
474 |
|
473 | |||
475 | def _checkoutlinelogwithedits(self): |
|
474 | def _checkoutlinelogwithedits(self): | |
476 | """() -> [str]. prompt all lines for edit""" |
|
475 | """() -> [str]. prompt all lines for edit""" | |
477 | alllines = self.linelog.getalllines() |
|
476 | alllines = self.linelog.getalllines() | |
478 | # header |
|
477 | # header | |
479 | editortext = ( |
|
478 | editortext = ( | |
480 | _( |
|
479 | _( | |
481 | b'HG: editing %s\nHG: "y" means the line to the right ' |
|
480 | b'HG: editing %s\nHG: "y" means the line to the right ' | |
482 | b'exists in the changeset to the top\nHG:\n' |
|
481 | b'exists in the changeset to the top\nHG:\n' | |
483 | ) |
|
482 | ) | |
484 | % self.fctxs[-1].path() |
|
483 | % self.fctxs[-1].path() | |
485 | ) |
|
484 | ) | |
486 | # [(idx, fctx)]. hide the dummy emptyfilecontext |
|
485 | # [(idx, fctx)]. hide the dummy emptyfilecontext | |
487 | visiblefctxs = [ |
|
486 | visiblefctxs = [ | |
488 | (i, f) |
|
487 | (i, f) | |
489 | for i, f in enumerate(self.fctxs) |
|
488 | for i, f in enumerate(self.fctxs) | |
490 | if not isinstance(f, emptyfilecontext) |
|
489 | if not isinstance(f, emptyfilecontext) | |
491 | ] |
|
490 | ] | |
492 | for i, (j, f) in enumerate(visiblefctxs): |
|
491 | for i, (j, f) in enumerate(visiblefctxs): | |
493 | editortext += _(b'HG: %s/%s %s %s\n') % ( |
|
492 | editortext += _(b'HG: %s/%s %s %s\n') % ( | |
494 | b'|' * i, |
|
493 | b'|' * i, | |
495 | b'-' * (len(visiblefctxs) - i + 1), |
|
494 | b'-' * (len(visiblefctxs) - i + 1), | |
496 | short(f.node()), |
|
495 | short(f.node()), | |
497 | f.description().split(b'\n', 1)[0], |
|
496 | f.description().split(b'\n', 1)[0], | |
498 | ) |
|
497 | ) | |
499 | editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs)) |
|
498 | editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs)) | |
500 | # figure out the lifetime of a line, this is relatively inefficient, |
|
499 | # figure out the lifetime of a line, this is relatively inefficient, | |
501 | # but probably fine |
|
500 | # but probably fine | |
502 | lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}} |
|
501 | lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}} | |
503 | for i, f in visiblefctxs: |
|
502 | for i, f in visiblefctxs: | |
504 | self.linelog.annotate((i + 1) * 2) |
|
503 | self.linelog.annotate((i + 1) * 2) | |
505 | for l in self.linelog.annotateresult: |
|
504 | for l in self.linelog.annotateresult: | |
506 | lineset[l].add(i) |
|
505 | lineset[l].add(i) | |
507 | # append lines |
|
506 | # append lines | |
508 | for l in alllines: |
|
507 | for l in alllines: | |
509 | editortext += b' %s : %s' % ( |
|
508 | editortext += b' %s : %s' % ( | |
510 | b''.join( |
|
509 | b''.join( | |
511 | [ |
|
510 | [ | |
512 | (b'y' if i in lineset[l] else b' ') |
|
511 | (b'y' if i in lineset[l] else b' ') | |
513 | for i, _f in visiblefctxs |
|
512 | for i, _f in visiblefctxs | |
514 | ] |
|
513 | ] | |
515 | ), |
|
514 | ), | |
516 | self._getline(l), |
|
515 | self._getline(l), | |
517 | ) |
|
516 | ) | |
518 | # run editor |
|
517 | # run editor | |
519 | editedtext = self.ui.edit(editortext, b'', action=b'absorb') |
|
518 | editedtext = self.ui.edit(editortext, b'', action=b'absorb') | |
520 | if not editedtext: |
|
519 | if not editedtext: | |
521 | raise error.InputError(_(b'empty editor text')) |
|
520 | raise error.InputError(_(b'empty editor text')) | |
522 | # parse edited result |
|
521 | # parse edited result | |
523 | contents = [b''] * len(self.fctxs) |
|
522 | contents = [b''] * len(self.fctxs) | |
524 | leftpadpos = 4 |
|
523 | leftpadpos = 4 | |
525 | colonpos = leftpadpos + len(visiblefctxs) + 1 |
|
524 | colonpos = leftpadpos + len(visiblefctxs) + 1 | |
526 | for l in mdiff.splitnewlines(editedtext): |
|
525 | for l in mdiff.splitnewlines(editedtext): | |
527 | if l.startswith(b'HG:'): |
|
526 | if l.startswith(b'HG:'): | |
528 | continue |
|
527 | continue | |
529 | if l[colonpos - 1 : colonpos + 2] != b' : ': |
|
528 | if l[colonpos - 1 : colonpos + 2] != b' : ': | |
530 | raise error.InputError(_(b'malformed line: %s') % l) |
|
529 | raise error.InputError(_(b'malformed line: %s') % l) | |
531 | linecontent = l[colonpos + 2 :] |
|
530 | linecontent = l[colonpos + 2 :] | |
532 | for i, ch in enumerate( |
|
531 | for i, ch in enumerate( | |
533 | pycompat.bytestr(l[leftpadpos : colonpos - 1]) |
|
532 | pycompat.bytestr(l[leftpadpos : colonpos - 1]) | |
534 | ): |
|
533 | ): | |
535 | if ch == b'y': |
|
534 | if ch == b'y': | |
536 | contents[visiblefctxs[i][0]] += linecontent |
|
535 | contents[visiblefctxs[i][0]] += linecontent | |
537 | # chunkstats is hard to calculate if anything changes, therefore |
|
536 | # chunkstats is hard to calculate if anything changes, therefore | |
538 | # set them to just a simple value (1, 1). |
|
537 | # set them to just a simple value (1, 1). | |
539 | if editedtext != editortext: |
|
538 | if editedtext != editortext: | |
540 | self.chunkstats = [1, 1] |
|
539 | self.chunkstats = [1, 1] | |
541 | return contents |
|
540 | return contents | |
542 |
|
541 | |||
543 | def _getline(self, lineinfo): |
|
542 | def _getline(self, lineinfo): | |
544 | """((rev, linenum)) -> str. convert rev+line number to line content""" |
|
543 | """((rev, linenum)) -> str. convert rev+line number to line content""" | |
545 | rev, linenum = lineinfo |
|
544 | rev, linenum = lineinfo | |
546 | if rev & 1: # odd: original line taken from fctxs |
|
545 | if rev & 1: # odd: original line taken from fctxs | |
547 | return self.contentlines[rev // 2][linenum] |
|
546 | return self.contentlines[rev // 2][linenum] | |
548 | else: # even: fixup line from targetfctx |
|
547 | else: # even: fixup line from targetfctx | |
549 | return self.targetlines[linenum] |
|
548 | return self.targetlines[linenum] | |
550 |
|
549 | |||
551 | def _iscontinuous(self, a1, a2, closedinterval=False): |
|
550 | def _iscontinuous(self, a1, a2, closedinterval=False): | |
552 | """(a1, a2 : int) -> bool |
|
551 | """(a1, a2 : int) -> bool | |
553 |
|
552 | |||
554 | check if these lines are continuous. i.e. no other insertions or |
|
553 | check if these lines are continuous. i.e. no other insertions or | |
555 | deletions (from other revisions) among these lines. |
|
554 | deletions (from other revisions) among these lines. | |
556 |
|
555 | |||
557 | closedinterval decides whether a2 should be included or not. i.e. is |
|
556 | closedinterval decides whether a2 should be included or not. i.e. is | |
558 | it [a1, a2), or [a1, a2] ? |
|
557 | it [a1, a2), or [a1, a2] ? | |
559 | """ |
|
558 | """ | |
560 | if a1 >= a2: |
|
559 | if a1 >= a2: | |
561 | return True |
|
560 | return True | |
562 | llog = self.linelog |
|
561 | llog = self.linelog | |
563 | offset1 = llog.getoffset(a1) |
|
562 | offset1 = llog.getoffset(a1) | |
564 | offset2 = llog.getoffset(a2) + int(closedinterval) |
|
563 | offset2 = llog.getoffset(a2) + int(closedinterval) | |
565 | linesinbetween = llog.getalllines(offset1, offset2) |
|
564 | linesinbetween = llog.getalllines(offset1, offset2) | |
566 | return len(linesinbetween) == a2 - a1 + int(closedinterval) |
|
565 | return len(linesinbetween) == a2 - a1 + int(closedinterval) | |
567 |
|
566 | |||
568 | def _optimizefixups(self, fixups): |
|
567 | def _optimizefixups(self, fixups): | |
569 | """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)]. |
|
568 | """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)]. | |
570 | merge adjacent fixups to make them less fragmented. |
|
569 | merge adjacent fixups to make them less fragmented. | |
571 | """ |
|
570 | """ | |
572 | result = [] |
|
571 | result = [] | |
573 | pcurrentchunk = [[-1, -1, -1, -1, -1]] |
|
572 | pcurrentchunk = [[-1, -1, -1, -1, -1]] | |
574 |
|
573 | |||
575 | def pushchunk(): |
|
574 | def pushchunk(): | |
576 | if pcurrentchunk[0][0] != -1: |
|
575 | if pcurrentchunk[0][0] != -1: | |
577 | result.append(tuple(pcurrentchunk[0])) |
|
576 | result.append(tuple(pcurrentchunk[0])) | |
578 |
|
577 | |||
579 | for i, chunk in enumerate(fixups): |
|
578 | for i, chunk in enumerate(fixups): | |
580 | rev, a1, a2, b1, b2 = chunk |
|
579 | rev, a1, a2, b1, b2 = chunk | |
581 | lastrev = pcurrentchunk[0][0] |
|
580 | lastrev = pcurrentchunk[0][0] | |
582 | lasta2 = pcurrentchunk[0][2] |
|
581 | lasta2 = pcurrentchunk[0][2] | |
583 | lastb2 = pcurrentchunk[0][4] |
|
582 | lastb2 = pcurrentchunk[0][4] | |
584 | if ( |
|
583 | if ( | |
585 | a1 == lasta2 |
|
584 | a1 == lasta2 | |
586 | and b1 == lastb2 |
|
585 | and b1 == lastb2 | |
587 | and rev == lastrev |
|
586 | and rev == lastrev | |
588 | and self._iscontinuous(max(a1 - 1, 0), a1) |
|
587 | and self._iscontinuous(max(a1 - 1, 0), a1) | |
589 | ): |
|
588 | ): | |
590 | # merge into currentchunk |
|
589 | # merge into currentchunk | |
591 | pcurrentchunk[0][2] = a2 |
|
590 | pcurrentchunk[0][2] = a2 | |
592 | pcurrentchunk[0][4] = b2 |
|
591 | pcurrentchunk[0][4] = b2 | |
593 | else: |
|
592 | else: | |
594 | pushchunk() |
|
593 | pushchunk() | |
595 | pcurrentchunk[0] = list(chunk) |
|
594 | pcurrentchunk[0] = list(chunk) | |
596 | pushchunk() |
|
595 | pushchunk() | |
597 | return result |
|
596 | return result | |
598 |
|
597 | |||
599 | def _showchanges(self, fm, alines, blines, chunk, fixups): |
|
598 | def _showchanges(self, fm, alines, blines, chunk, fixups): | |
600 | def trim(line): |
|
599 | def trim(line): | |
601 | if line.endswith(b'\n'): |
|
600 | if line.endswith(b'\n'): | |
602 | line = line[:-1] |
|
601 | line = line[:-1] | |
603 | return line |
|
602 | return line | |
604 |
|
603 | |||
605 | # this is not optimized for perf but _showchanges only gets executed |
|
604 | # this is not optimized for perf but _showchanges only gets executed | |
606 | # with an extra command-line flag. |
|
605 | # with an extra command-line flag. | |
607 | a1, a2, b1, b2 = chunk |
|
606 | a1, a2, b1, b2 = chunk | |
608 | aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1) |
|
607 | aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1) | |
609 | for idx, fa1, fa2, fb1, fb2 in fixups: |
|
608 | for idx, fa1, fa2, fb1, fb2 in fixups: | |
610 | for i in pycompat.xrange(fa1, fa2): |
|
609 | for i in pycompat.xrange(fa1, fa2): | |
611 | aidxs[i - a1] = (max(idx, 1) - 1) // 2 |
|
610 | aidxs[i - a1] = (max(idx, 1) - 1) // 2 | |
612 | for i in pycompat.xrange(fb1, fb2): |
|
611 | for i in pycompat.xrange(fb1, fb2): | |
613 | bidxs[i - b1] = (max(idx, 1) - 1) // 2 |
|
612 | bidxs[i - b1] = (max(idx, 1) - 1) // 2 | |
614 |
|
613 | |||
615 | fm.startitem() |
|
614 | fm.startitem() | |
616 | fm.write( |
|
615 | fm.write( | |
617 | b'hunk', |
|
616 | b'hunk', | |
618 | b' %s\n', |
|
617 | b' %s\n', | |
619 | b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1), |
|
618 | b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1), | |
620 | label=b'diff.hunk', |
|
619 | label=b'diff.hunk', | |
621 | ) |
|
620 | ) | |
622 | fm.data(path=self.path, linetype=b'hunk') |
|
621 | fm.data(path=self.path, linetype=b'hunk') | |
623 |
|
622 | |||
624 | def writeline(idx, diffchar, line, linetype, linelabel): |
|
623 | def writeline(idx, diffchar, line, linetype, linelabel): | |
625 | fm.startitem() |
|
624 | fm.startitem() | |
626 | node = b'' |
|
625 | node = b'' | |
627 | if idx: |
|
626 | if idx: | |
628 | ctx = self.fctxs[idx] |
|
627 | ctx = self.fctxs[idx] | |
629 | fm.context(fctx=ctx) |
|
628 | fm.context(fctx=ctx) | |
630 | node = ctx.hex() |
|
629 | node = ctx.hex() | |
631 | self.ctxaffected.add(ctx.changectx()) |
|
630 | self.ctxaffected.add(ctx.changectx()) | |
632 | fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node') |
|
631 | fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node') | |
633 | fm.write( |
|
632 | fm.write( | |
634 | b'diffchar ' + linetype, |
|
633 | b'diffchar ' + linetype, | |
635 | b'%s%s\n', |
|
634 | b'%s%s\n', | |
636 | diffchar, |
|
635 | diffchar, | |
637 | line, |
|
636 | line, | |
638 | label=linelabel, |
|
637 | label=linelabel, | |
639 | ) |
|
638 | ) | |
640 | fm.data(path=self.path, linetype=linetype) |
|
639 | fm.data(path=self.path, linetype=linetype) | |
641 |
|
640 | |||
642 | for i in pycompat.xrange(a1, a2): |
|
641 | for i in pycompat.xrange(a1, a2): | |
643 | writeline( |
|
642 | writeline( | |
644 | aidxs[i - a1], |
|
643 | aidxs[i - a1], | |
645 | b'-', |
|
644 | b'-', | |
646 | trim(alines[i]), |
|
645 | trim(alines[i]), | |
647 | b'deleted', |
|
646 | b'deleted', | |
648 | b'diff.deleted', |
|
647 | b'diff.deleted', | |
649 | ) |
|
648 | ) | |
650 | for i in pycompat.xrange(b1, b2): |
|
649 | for i in pycompat.xrange(b1, b2): | |
651 | writeline( |
|
650 | writeline( | |
652 | bidxs[i - b1], |
|
651 | bidxs[i - b1], | |
653 | b'+', |
|
652 | b'+', | |
654 | trim(blines[i]), |
|
653 | trim(blines[i]), | |
655 | b'inserted', |
|
654 | b'inserted', | |
656 | b'diff.inserted', |
|
655 | b'diff.inserted', | |
657 | ) |
|
656 | ) | |
658 |
|
657 | |||
659 |
|
658 | |||
660 | class fixupstate(object): |
|
659 | class fixupstate(object): | |
661 | """state needed to run absorb |
|
660 | """state needed to run absorb | |
662 |
|
661 | |||
663 | internally, it keeps paths and filefixupstates. |
|
662 | internally, it keeps paths and filefixupstates. | |
664 |
|
663 | |||
665 | a typical use is like filefixupstates: |
|
664 | a typical use is like filefixupstates: | |
666 |
|
665 | |||
667 | 1. call diffwith, to calculate fixups |
|
666 | 1. call diffwith, to calculate fixups | |
668 | 2. (optionally), present fixups to the user, or edit fixups |
|
667 | 2. (optionally), present fixups to the user, or edit fixups | |
669 | 3. call apply, to apply changes to memory |
|
668 | 3. call apply, to apply changes to memory | |
670 | 4. call commit, to commit changes to hg database |
|
669 | 4. call commit, to commit changes to hg database | |
671 | """ |
|
670 | """ | |
672 |
|
671 | |||
673 | def __init__(self, stack, ui=None, opts=None): |
|
672 | def __init__(self, stack, ui=None, opts=None): | |
674 | """([ctx], ui or None) -> None |
|
673 | """([ctx], ui or None) -> None | |
675 |
|
674 | |||
676 | stack: should be linear, and sorted by topo order - oldest first. |
|
675 | stack: should be linear, and sorted by topo order - oldest first. | |
677 | all commits in stack are considered mutable. |
|
676 | all commits in stack are considered mutable. | |
678 | """ |
|
677 | """ | |
679 | assert stack |
|
678 | assert stack | |
680 | self.ui = ui or nullui() |
|
679 | self.ui = ui or nullui() | |
681 | self.opts = opts or {} |
|
680 | self.opts = opts or {} | |
682 | self.stack = stack |
|
681 | self.stack = stack | |
683 | self.repo = stack[-1].repo().unfiltered() |
|
682 | self.repo = stack[-1].repo().unfiltered() | |
684 |
|
683 | |||
685 | # following fields will be filled later |
|
684 | # following fields will be filled later | |
686 | self.paths = [] # [str] |
|
685 | self.paths = [] # [str] | |
687 | self.status = None # ctx.status output |
|
686 | self.status = None # ctx.status output | |
688 | self.fctxmap = {} # {path: {ctx: fctx}} |
|
687 | self.fctxmap = {} # {path: {ctx: fctx}} | |
689 | self.fixupmap = {} # {path: filefixupstate} |
|
688 | self.fixupmap = {} # {path: filefixupstate} | |
690 | self.replacemap = {} # {oldnode: newnode or None} |
|
689 | self.replacemap = {} # {oldnode: newnode or None} | |
691 | self.finalnode = None # head after all fixups |
|
690 | self.finalnode = None # head after all fixups | |
692 | self.ctxaffected = set() # ctx that will be absorbed into |
|
691 | self.ctxaffected = set() # ctx that will be absorbed into | |
693 |
|
692 | |||
694 | def diffwith(self, targetctx, match=None, fm=None): |
|
693 | def diffwith(self, targetctx, match=None, fm=None): | |
695 | """diff and prepare fixups. update self.fixupmap, self.paths""" |
|
694 | """diff and prepare fixups. update self.fixupmap, self.paths""" | |
696 | # only care about modified files |
|
695 | # only care about modified files | |
697 | self.status = self.stack[-1].status(targetctx, match) |
|
696 | self.status = self.stack[-1].status(targetctx, match) | |
698 | self.paths = [] |
|
697 | self.paths = [] | |
699 | # but if --edit-lines is used, the user may want to edit files |
|
698 | # but if --edit-lines is used, the user may want to edit files | |
700 | # even if they are not modified |
|
699 | # even if they are not modified | |
701 | editopt = self.opts.get(b'edit_lines') |
|
700 | editopt = self.opts.get(b'edit_lines') | |
702 | if not self.status.modified and editopt and match: |
|
701 | if not self.status.modified and editopt and match: | |
703 | interestingpaths = match.files() |
|
702 | interestingpaths = match.files() | |
704 | else: |
|
703 | else: | |
705 | interestingpaths = self.status.modified |
|
704 | interestingpaths = self.status.modified | |
706 | # prepare the filefixupstate |
|
705 | # prepare the filefixupstate | |
707 | seenfctxs = set() |
|
706 | seenfctxs = set() | |
708 | # sorting is necessary to eliminate ambiguity for the "double move" |
|
707 | # sorting is necessary to eliminate ambiguity for the "double move" | |
709 | # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A". |
|
708 | # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A". | |
710 | for path in sorted(interestingpaths): |
|
709 | for path in sorted(interestingpaths): | |
711 | self.ui.debug(b'calculating fixups for %s\n' % path) |
|
710 | self.ui.debug(b'calculating fixups for %s\n' % path) | |
712 | targetfctx = targetctx[path] |
|
711 | targetfctx = targetctx[path] | |
713 | fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs) |
|
712 | fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs) | |
714 | # ignore symbolic links or binary, or unchanged files |
|
713 | # ignore symbolic links or binary, or unchanged files | |
715 | if any( |
|
714 | if any( | |
716 | f.islink() or stringutil.binary(f.data()) |
|
715 | f.islink() or stringutil.binary(f.data()) | |
717 | for f in [targetfctx] + fctxs |
|
716 | for f in [targetfctx] + fctxs | |
718 | if not isinstance(f, emptyfilecontext) |
|
717 | if not isinstance(f, emptyfilecontext) | |
719 | ): |
|
718 | ): | |
720 | continue |
|
719 | continue | |
721 | if targetfctx.data() == fctxs[-1].data() and not editopt: |
|
720 | if targetfctx.data() == fctxs[-1].data() and not editopt: | |
722 | continue |
|
721 | continue | |
723 | seenfctxs.update(fctxs[1:]) |
|
722 | seenfctxs.update(fctxs[1:]) | |
724 | self.fctxmap[path] = ctx2fctx |
|
723 | self.fctxmap[path] = ctx2fctx | |
725 | fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts) |
|
724 | fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts) | |
726 | if fm is not None: |
|
725 | if fm is not None: | |
727 | fm.startitem() |
|
726 | fm.startitem() | |
728 | fm.plain(b'showing changes for ') |
|
727 | fm.plain(b'showing changes for ') | |
729 | fm.write(b'path', b'%s\n', path, label=b'absorb.path') |
|
728 | fm.write(b'path', b'%s\n', path, label=b'absorb.path') | |
730 | fm.data(linetype=b'path') |
|
729 | fm.data(linetype=b'path') | |
731 | fstate.diffwith(targetfctx, fm) |
|
730 | fstate.diffwith(targetfctx, fm) | |
732 | self.fixupmap[path] = fstate |
|
731 | self.fixupmap[path] = fstate | |
733 | self.paths.append(path) |
|
732 | self.paths.append(path) | |
734 | self.ctxaffected.update(fstate.ctxaffected) |
|
733 | self.ctxaffected.update(fstate.ctxaffected) | |
735 |
|
734 | |||
736 | def apply(self): |
|
735 | def apply(self): | |
737 | """apply fixups to individual filefixupstates""" |
|
736 | """apply fixups to individual filefixupstates""" | |
738 | for path, state in pycompat.iteritems(self.fixupmap): |
|
737 | for path, state in pycompat.iteritems(self.fixupmap): | |
739 | if self.ui.debugflag: |
|
738 | if self.ui.debugflag: | |
740 | self.ui.write(_(b'applying fixups to %s\n') % path) |
|
739 | self.ui.write(_(b'applying fixups to %s\n') % path) | |
741 | state.apply() |
|
740 | state.apply() | |
742 |
|
741 | |||
743 | @property |
|
742 | @property | |
744 | def chunkstats(self): |
|
743 | def chunkstats(self): | |
745 | """-> {path: chunkstats}. collect chunkstats from filefixupstates""" |
|
744 | """-> {path: chunkstats}. collect chunkstats from filefixupstates""" | |
746 | return { |
|
745 | return { | |
747 | path: state.chunkstats |
|
746 | path: state.chunkstats | |
748 | for path, state in pycompat.iteritems(self.fixupmap) |
|
747 | for path, state in pycompat.iteritems(self.fixupmap) | |
749 | } |
|
748 | } | |
750 |
|
749 | |||
751 | def commit(self): |
|
750 | def commit(self): | |
752 | """commit changes. update self.finalnode, self.replacemap""" |
|
751 | """commit changes. update self.finalnode, self.replacemap""" | |
753 | with self.repo.transaction(b'absorb') as tr: |
|
752 | with self.repo.transaction(b'absorb') as tr: | |
754 | self._commitstack() |
|
753 | self._commitstack() | |
755 | self._movebookmarks(tr) |
|
754 | self._movebookmarks(tr) | |
756 | if self.repo[b'.'].node() in self.replacemap: |
|
755 | if self.repo[b'.'].node() in self.replacemap: | |
757 | self._moveworkingdirectoryparent() |
|
756 | self._moveworkingdirectoryparent() | |
758 | self._cleanupoldcommits() |
|
757 | self._cleanupoldcommits() | |
759 | return self.finalnode |
|
758 | return self.finalnode | |
760 |
|
759 | |||
761 | def printchunkstats(self): |
|
760 | def printchunkstats(self): | |
762 | """print things like '1 of 2 chunk(s) applied'""" |
|
761 | """print things like '1 of 2 chunk(s) applied'""" | |
763 | ui = self.ui |
|
762 | ui = self.ui | |
764 | chunkstats = self.chunkstats |
|
763 | chunkstats = self.chunkstats | |
765 | if ui.verbose: |
|
764 | if ui.verbose: | |
766 | # chunkstats for each file |
|
765 | # chunkstats for each file | |
767 | for path, stat in pycompat.iteritems(chunkstats): |
|
766 | for path, stat in pycompat.iteritems(chunkstats): | |
768 | if stat[0]: |
|
767 | if stat[0]: | |
769 | ui.write( |
|
768 | ui.write( | |
770 | _(b'%s: %d of %d chunk(s) applied\n') |
|
769 | _(b'%s: %d of %d chunk(s) applied\n') | |
771 | % (path, stat[0], stat[1]) |
|
770 | % (path, stat[0], stat[1]) | |
772 | ) |
|
771 | ) | |
773 | elif not ui.quiet: |
|
772 | elif not ui.quiet: | |
774 | # a summary for all files |
|
773 | # a summary for all files | |
775 | stats = chunkstats.values() |
|
774 | stats = chunkstats.values() | |
776 | applied, total = (sum(s[i] for s in stats) for i in (0, 1)) |
|
775 | applied, total = (sum(s[i] for s in stats) for i in (0, 1)) | |
777 | ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total)) |
|
776 | ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total)) | |
778 |
|
777 | |||
779 | def _commitstack(self): |
|
778 | def _commitstack(self): | |
780 | """make new commits. update self.finalnode, self.replacemap. |
|
779 | """make new commits. update self.finalnode, self.replacemap. | |
781 | it is splitted from "commit" to avoid too much indentation. |
|
780 | it is splitted from "commit" to avoid too much indentation. | |
782 | """ |
|
781 | """ | |
783 | # last node (20-char) committed by us |
|
782 | # last node (20-char) committed by us | |
784 | lastcommitted = None |
|
783 | lastcommitted = None | |
785 | # p1 which overrides the parent of the next commit, "None" means use |
|
784 | # p1 which overrides the parent of the next commit, "None" means use | |
786 | # the original parent unchanged |
|
785 | # the original parent unchanged | |
787 | nextp1 = None |
|
786 | nextp1 = None | |
788 | for ctx in self.stack: |
|
787 | for ctx in self.stack: | |
789 | memworkingcopy = self._getnewfilecontents(ctx) |
|
788 | memworkingcopy = self._getnewfilecontents(ctx) | |
790 | if not memworkingcopy and not lastcommitted: |
|
789 | if not memworkingcopy and not lastcommitted: | |
791 | # nothing changed, nothing commited |
|
790 | # nothing changed, nothing commited | |
792 | nextp1 = ctx |
|
791 | nextp1 = ctx | |
793 | continue |
|
792 | continue | |
794 | willbecomenoop = ctx.files() and self._willbecomenoop( |
|
793 | willbecomenoop = ctx.files() and self._willbecomenoop( | |
795 | memworkingcopy, ctx, nextp1 |
|
794 | memworkingcopy, ctx, nextp1 | |
796 | ) |
|
795 | ) | |
797 | if self.skip_empty_successor and willbecomenoop: |
|
796 | if self.skip_empty_successor and willbecomenoop: | |
798 | # changeset is no longer necessary |
|
797 | # changeset is no longer necessary | |
799 | self.replacemap[ctx.node()] = None |
|
798 | self.replacemap[ctx.node()] = None | |
800 | msg = _(b'became empty and was dropped') |
|
799 | msg = _(b'became empty and was dropped') | |
801 | else: |
|
800 | else: | |
802 | # changeset needs re-commit |
|
801 | # changeset needs re-commit | |
803 | nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1) |
|
802 | nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1) | |
804 | lastcommitted = self.repo[nodestr] |
|
803 | lastcommitted = self.repo[nodestr] | |
805 | nextp1 = lastcommitted |
|
804 | nextp1 = lastcommitted | |
806 | self.replacemap[ctx.node()] = lastcommitted.node() |
|
805 | self.replacemap[ctx.node()] = lastcommitted.node() | |
807 | if memworkingcopy: |
|
806 | if memworkingcopy: | |
808 | if willbecomenoop: |
|
807 | if willbecomenoop: | |
809 | msg = _(b'%d file(s) changed, became empty as %s') |
|
808 | msg = _(b'%d file(s) changed, became empty as %s') | |
810 | else: |
|
809 | else: | |
811 | msg = _(b'%d file(s) changed, became %s') |
|
810 | msg = _(b'%d file(s) changed, became %s') | |
812 | msg = msg % ( |
|
811 | msg = msg % ( | |
813 | len(memworkingcopy), |
|
812 | len(memworkingcopy), | |
814 | self._ctx2str(lastcommitted), |
|
813 | self._ctx2str(lastcommitted), | |
815 | ) |
|
814 | ) | |
816 | else: |
|
815 | else: | |
817 | msg = _(b'became %s') % self._ctx2str(lastcommitted) |
|
816 | msg = _(b'became %s') % self._ctx2str(lastcommitted) | |
818 | if self.ui.verbose and msg: |
|
817 | if self.ui.verbose and msg: | |
819 | self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg)) |
|
818 | self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg)) | |
820 | self.finalnode = lastcommitted and lastcommitted.node() |
|
819 | self.finalnode = lastcommitted and lastcommitted.node() | |
821 |
|
820 | |||
822 | def _ctx2str(self, ctx): |
|
821 | def _ctx2str(self, ctx): | |
823 | if self.ui.debugflag: |
|
822 | if self.ui.debugflag: | |
824 | return b'%d:%s' % (ctx.rev(), ctx.hex()) |
|
823 | return b'%d:%s' % (ctx.rev(), ctx.hex()) | |
825 | else: |
|
824 | else: | |
826 | return b'%d:%s' % (ctx.rev(), short(ctx.node())) |
|
825 | return b'%d:%s' % (ctx.rev(), short(ctx.node())) | |
827 |
|
826 | |||
828 | def _getnewfilecontents(self, ctx): |
|
827 | def _getnewfilecontents(self, ctx): | |
829 | """(ctx) -> {path: str} |
|
828 | """(ctx) -> {path: str} | |
830 |
|
829 | |||
831 | fetch file contents from filefixupstates. |
|
830 | fetch file contents from filefixupstates. | |
832 | return the working copy overrides - files different from ctx. |
|
831 | return the working copy overrides - files different from ctx. | |
833 | """ |
|
832 | """ | |
834 | result = {} |
|
833 | result = {} | |
835 | for path in self.paths: |
|
834 | for path in self.paths: | |
836 | ctx2fctx = self.fctxmap[path] # {ctx: fctx} |
|
835 | ctx2fctx = self.fctxmap[path] # {ctx: fctx} | |
837 | if ctx not in ctx2fctx: |
|
836 | if ctx not in ctx2fctx: | |
838 | continue |
|
837 | continue | |
839 | fctx = ctx2fctx[ctx] |
|
838 | fctx = ctx2fctx[ctx] | |
840 | content = fctx.data() |
|
839 | content = fctx.data() | |
841 | newcontent = self.fixupmap[path].getfinalcontent(fctx) |
|
840 | newcontent = self.fixupmap[path].getfinalcontent(fctx) | |
842 | if content != newcontent: |
|
841 | if content != newcontent: | |
843 | result[fctx.path()] = newcontent |
|
842 | result[fctx.path()] = newcontent | |
844 | return result |
|
843 | return result | |
845 |
|
844 | |||
846 | def _movebookmarks(self, tr): |
|
845 | def _movebookmarks(self, tr): | |
847 | repo = self.repo |
|
846 | repo = self.repo | |
848 | needupdate = [ |
|
847 | needupdate = [ | |
849 | (name, self.replacemap[hsh]) |
|
848 | (name, self.replacemap[hsh]) | |
850 | for name, hsh in pycompat.iteritems(repo._bookmarks) |
|
849 | for name, hsh in pycompat.iteritems(repo._bookmarks) | |
851 | if hsh in self.replacemap |
|
850 | if hsh in self.replacemap | |
852 | ] |
|
851 | ] | |
853 | changes = [] |
|
852 | changes = [] | |
854 | for name, hsh in needupdate: |
|
853 | for name, hsh in needupdate: | |
855 | if hsh: |
|
854 | if hsh: | |
856 | changes.append((name, hsh)) |
|
855 | changes.append((name, hsh)) | |
857 | if self.ui.verbose: |
|
856 | if self.ui.verbose: | |
858 | self.ui.write( |
|
857 | self.ui.write( | |
859 | _(b'moving bookmark %s to %s\n') % (name, hex(hsh)) |
|
858 | _(b'moving bookmark %s to %s\n') % (name, hex(hsh)) | |
860 | ) |
|
859 | ) | |
861 | else: |
|
860 | else: | |
862 | changes.append((name, None)) |
|
861 | changes.append((name, None)) | |
863 | if self.ui.verbose: |
|
862 | if self.ui.verbose: | |
864 | self.ui.write(_(b'deleting bookmark %s\n') % name) |
|
863 | self.ui.write(_(b'deleting bookmark %s\n') % name) | |
865 | repo._bookmarks.applychanges(repo, tr, changes) |
|
864 | repo._bookmarks.applychanges(repo, tr, changes) | |
866 |
|
865 | |||
867 | def _moveworkingdirectoryparent(self): |
|
866 | def _moveworkingdirectoryparent(self): | |
868 | if not self.finalnode: |
|
867 | if not self.finalnode: | |
869 | # Find the latest not-{obsoleted,stripped} parent. |
|
868 | # Find the latest not-{obsoleted,stripped} parent. | |
870 | revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys()) |
|
869 | revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys()) | |
871 | ctx = self.repo[revs.first()] |
|
870 | ctx = self.repo[revs.first()] | |
872 | self.finalnode = ctx.node() |
|
871 | self.finalnode = ctx.node() | |
873 | else: |
|
872 | else: | |
874 | ctx = self.repo[self.finalnode] |
|
873 | ctx = self.repo[self.finalnode] | |
875 |
|
874 | |||
876 | dirstate = self.repo.dirstate |
|
875 | dirstate = self.repo.dirstate | |
877 | # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to |
|
876 | # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to | |
878 | # be slow. in absorb's case, no need to invalidate fsmonitorstate. |
|
877 | # be slow. in absorb's case, no need to invalidate fsmonitorstate. | |
879 | noop = lambda: 0 |
|
878 | noop = lambda: 0 | |
880 | restore = noop |
|
879 | restore = noop | |
881 | if util.safehasattr(dirstate, '_fsmonitorstate'): |
|
880 | if util.safehasattr(dirstate, '_fsmonitorstate'): | |
882 | bak = dirstate._fsmonitorstate.invalidate |
|
881 | bak = dirstate._fsmonitorstate.invalidate | |
883 |
|
882 | |||
884 | def restore(): |
|
883 | def restore(): | |
885 | dirstate._fsmonitorstate.invalidate = bak |
|
884 | dirstate._fsmonitorstate.invalidate = bak | |
886 |
|
885 | |||
887 | dirstate._fsmonitorstate.invalidate = noop |
|
886 | dirstate._fsmonitorstate.invalidate = noop | |
888 | try: |
|
887 | try: | |
889 | with dirstate.parentchange(): |
|
888 | with dirstate.parentchange(): | |
890 | dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths) |
|
889 | dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths) | |
891 | finally: |
|
890 | finally: | |
892 | restore() |
|
891 | restore() | |
893 |
|
892 | |||
894 | @staticmethod |
|
893 | @staticmethod | |
895 | def _willbecomenoop(memworkingcopy, ctx, pctx=None): |
|
894 | def _willbecomenoop(memworkingcopy, ctx, pctx=None): | |
896 | """({path: content}, ctx, ctx) -> bool. test if a commit will be noop |
|
895 | """({path: content}, ctx, ctx) -> bool. test if a commit will be noop | |
897 |
|
896 | |||
898 | if it will become an empty commit (does not change anything, after the |
|
897 | if it will become an empty commit (does not change anything, after the | |
899 | memworkingcopy overrides), return True. otherwise return False. |
|
898 | memworkingcopy overrides), return True. otherwise return False. | |
900 | """ |
|
899 | """ | |
901 | if not pctx: |
|
900 | if not pctx: | |
902 | parents = ctx.parents() |
|
901 | parents = ctx.parents() | |
903 | if len(parents) != 1: |
|
902 | if len(parents) != 1: | |
904 | return False |
|
903 | return False | |
905 | pctx = parents[0] |
|
904 | pctx = parents[0] | |
906 | if ctx.branch() != pctx.branch(): |
|
905 | if ctx.branch() != pctx.branch(): | |
907 | return False |
|
906 | return False | |
908 | if ctx.extra().get(b'close'): |
|
907 | if ctx.extra().get(b'close'): | |
909 | return False |
|
908 | return False | |
910 | # ctx changes more files (not a subset of memworkingcopy) |
|
909 | # ctx changes more files (not a subset of memworkingcopy) | |
911 | if not set(ctx.files()).issubset(set(memworkingcopy)): |
|
910 | if not set(ctx.files()).issubset(set(memworkingcopy)): | |
912 | return False |
|
911 | return False | |
913 | for path, content in pycompat.iteritems(memworkingcopy): |
|
912 | for path, content in pycompat.iteritems(memworkingcopy): | |
914 | if path not in pctx or path not in ctx: |
|
913 | if path not in pctx or path not in ctx: | |
915 | return False |
|
914 | return False | |
916 | fctx = ctx[path] |
|
915 | fctx = ctx[path] | |
917 | pfctx = pctx[path] |
|
916 | pfctx = pctx[path] | |
918 | if pfctx.flags() != fctx.flags(): |
|
917 | if pfctx.flags() != fctx.flags(): | |
919 | return False |
|
918 | return False | |
920 | if pfctx.data() != content: |
|
919 | if pfctx.data() != content: | |
921 | return False |
|
920 | return False | |
922 | return True |
|
921 | return True | |
923 |
|
922 | |||
924 | def _commitsingle(self, memworkingcopy, ctx, p1=None): |
|
923 | def _commitsingle(self, memworkingcopy, ctx, p1=None): | |
925 | """(ctx, {path: content}, node) -> node. make a single commit |
|
924 | """(ctx, {path: content}, node) -> node. make a single commit | |
926 |
|
925 | |||
927 | the commit is a clone from ctx, with a (optionally) different p1, and |
|
926 | the commit is a clone from ctx, with a (optionally) different p1, and | |
928 | different file contents replaced by memworkingcopy. |
|
927 | different file contents replaced by memworkingcopy. | |
929 | """ |
|
928 | """ | |
930 | parents = p1 and (p1, nullid) |
|
929 | parents = p1 and (p1, self.repo.nullid) | |
931 | extra = ctx.extra() |
|
930 | extra = ctx.extra() | |
932 | if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'): |
|
931 | if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'): | |
933 | extra[b'absorb_source'] = ctx.hex() |
|
932 | extra[b'absorb_source'] = ctx.hex() | |
934 |
|
933 | |||
935 | desc = rewriteutil.update_hash_refs( |
|
934 | desc = rewriteutil.update_hash_refs( | |
936 | ctx.repo(), |
|
935 | ctx.repo(), | |
937 | ctx.description(), |
|
936 | ctx.description(), | |
938 | { |
|
937 | { | |
939 | oldnode: [newnode] |
|
938 | oldnode: [newnode] | |
940 | for oldnode, newnode in self.replacemap.items() |
|
939 | for oldnode, newnode in self.replacemap.items() | |
941 | }, |
|
940 | }, | |
942 | ) |
|
941 | ) | |
943 | mctx = overlaycontext( |
|
942 | mctx = overlaycontext( | |
944 | memworkingcopy, ctx, parents, extra=extra, desc=desc |
|
943 | memworkingcopy, ctx, parents, extra=extra, desc=desc | |
945 | ) |
|
944 | ) | |
946 | return mctx.commit() |
|
945 | return mctx.commit() | |
947 |
|
946 | |||
948 | @util.propertycache |
|
947 | @util.propertycache | |
949 | def _useobsolete(self): |
|
948 | def _useobsolete(self): | |
950 | """() -> bool""" |
|
949 | """() -> bool""" | |
951 | return obsolete.isenabled(self.repo, obsolete.createmarkersopt) |
|
950 | return obsolete.isenabled(self.repo, obsolete.createmarkersopt) | |
952 |
|
951 | |||
953 | def _cleanupoldcommits(self): |
|
952 | def _cleanupoldcommits(self): | |
954 | replacements = { |
|
953 | replacements = { | |
955 | k: ([v] if v is not None else []) |
|
954 | k: ([v] if v is not None else []) | |
956 | for k, v in pycompat.iteritems(self.replacemap) |
|
955 | for k, v in pycompat.iteritems(self.replacemap) | |
957 | } |
|
956 | } | |
958 | if replacements: |
|
957 | if replacements: | |
959 | scmutil.cleanupnodes( |
|
958 | scmutil.cleanupnodes( | |
960 | self.repo, replacements, operation=b'absorb', fixphase=True |
|
959 | self.repo, replacements, operation=b'absorb', fixphase=True | |
961 | ) |
|
960 | ) | |
962 |
|
961 | |||
963 | @util.propertycache |
|
962 | @util.propertycache | |
964 | def skip_empty_successor(self): |
|
963 | def skip_empty_successor(self): | |
965 | return rewriteutil.skip_empty_successor(self.ui, b'absorb') |
|
964 | return rewriteutil.skip_empty_successor(self.ui, b'absorb') | |
966 |
|
965 | |||
967 |
|
966 | |||
968 | def _parsechunk(hunk): |
|
967 | def _parsechunk(hunk): | |
969 | """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))""" |
|
968 | """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))""" | |
970 | if type(hunk) not in (crecord.uihunk, patch.recordhunk): |
|
969 | if type(hunk) not in (crecord.uihunk, patch.recordhunk): | |
971 | return None, None |
|
970 | return None, None | |
972 | path = hunk.header.filename() |
|
971 | path = hunk.header.filename() | |
973 | a1 = hunk.fromline + len(hunk.before) - 1 |
|
972 | a1 = hunk.fromline + len(hunk.before) - 1 | |
974 | # remove before and after context |
|
973 | # remove before and after context | |
975 | hunk.before = hunk.after = [] |
|
974 | hunk.before = hunk.after = [] | |
976 | buf = util.stringio() |
|
975 | buf = util.stringio() | |
977 | hunk.write(buf) |
|
976 | hunk.write(buf) | |
978 | patchlines = mdiff.splitnewlines(buf.getvalue()) |
|
977 | patchlines = mdiff.splitnewlines(buf.getvalue()) | |
979 | # hunk.prettystr() will update hunk.removed |
|
978 | # hunk.prettystr() will update hunk.removed | |
980 | a2 = a1 + hunk.removed |
|
979 | a2 = a1 + hunk.removed | |
981 | blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')] |
|
980 | blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')] | |
982 | return path, (a1, a2, blines) |
|
981 | return path, (a1, a2, blines) | |
983 |
|
982 | |||
984 |
|
983 | |||
985 | def overlaydiffcontext(ctx, chunks): |
|
984 | def overlaydiffcontext(ctx, chunks): | |
986 | """(ctx, [crecord.uihunk]) -> memctx |
|
985 | """(ctx, [crecord.uihunk]) -> memctx | |
987 |
|
986 | |||
988 | return a memctx with some [1] patches (chunks) applied to ctx. |
|
987 | return a memctx with some [1] patches (chunks) applied to ctx. | |
989 | [1]: modifications are handled. renames, mode changes, etc. are ignored. |
|
988 | [1]: modifications are handled. renames, mode changes, etc. are ignored. | |
990 | """ |
|
989 | """ | |
991 | # sadly the applying-patch logic is hardly reusable, and messy: |
|
990 | # sadly the applying-patch logic is hardly reusable, and messy: | |
992 | # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it |
|
991 | # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it | |
993 | # needs a file stream of a patch and will re-parse it, while we have |
|
992 | # needs a file stream of a patch and will re-parse it, while we have | |
994 | # structured hunk objects at hand. |
|
993 | # structured hunk objects at hand. | |
995 | # 2. a lot of different implementations about "chunk" (patch.hunk, |
|
994 | # 2. a lot of different implementations about "chunk" (patch.hunk, | |
996 | # patch.recordhunk, crecord.uihunk) |
|
995 | # patch.recordhunk, crecord.uihunk) | |
997 | # as we only care about applying changes to modified files, no mode |
|
996 | # as we only care about applying changes to modified files, no mode | |
998 | # change, no binary diff, and no renames, it's probably okay to |
|
997 | # change, no binary diff, and no renames, it's probably okay to | |
999 | # re-invent the logic using much simpler code here. |
|
998 | # re-invent the logic using much simpler code here. | |
1000 | memworkingcopy = {} # {path: content} |
|
999 | memworkingcopy = {} # {path: content} | |
1001 | patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]} |
|
1000 | patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]} | |
1002 | for path, info in map(_parsechunk, chunks): |
|
1001 | for path, info in map(_parsechunk, chunks): | |
1003 | if not path or not info: |
|
1002 | if not path or not info: | |
1004 | continue |
|
1003 | continue | |
1005 | patchmap[path].append(info) |
|
1004 | patchmap[path].append(info) | |
1006 | for path, patches in pycompat.iteritems(patchmap): |
|
1005 | for path, patches in pycompat.iteritems(patchmap): | |
1007 | if path not in ctx or not patches: |
|
1006 | if path not in ctx or not patches: | |
1008 | continue |
|
1007 | continue | |
1009 | patches.sort(reverse=True) |
|
1008 | patches.sort(reverse=True) | |
1010 | lines = mdiff.splitnewlines(ctx[path].data()) |
|
1009 | lines = mdiff.splitnewlines(ctx[path].data()) | |
1011 | for a1, a2, blines in patches: |
|
1010 | for a1, a2, blines in patches: | |
1012 | lines[a1:a2] = blines |
|
1011 | lines[a1:a2] = blines | |
1013 | memworkingcopy[path] = b''.join(lines) |
|
1012 | memworkingcopy[path] = b''.join(lines) | |
1014 | return overlaycontext(memworkingcopy, ctx) |
|
1013 | return overlaycontext(memworkingcopy, ctx) | |
1015 |
|
1014 | |||
1016 |
|
1015 | |||
1017 | def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None): |
|
1016 | def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None): | |
1018 | """pick fixup chunks from targetctx, apply them to stack. |
|
1017 | """pick fixup chunks from targetctx, apply them to stack. | |
1019 |
|
1018 | |||
1020 | if targetctx is None, the working copy context will be used. |
|
1019 | if targetctx is None, the working copy context will be used. | |
1021 | if stack is None, the current draft stack will be used. |
|
1020 | if stack is None, the current draft stack will be used. | |
1022 | return fixupstate. |
|
1021 | return fixupstate. | |
1023 | """ |
|
1022 | """ | |
1024 | if stack is None: |
|
1023 | if stack is None: | |
1025 | limit = ui.configint(b'absorb', b'max-stack-size') |
|
1024 | limit = ui.configint(b'absorb', b'max-stack-size') | |
1026 | headctx = repo[b'.'] |
|
1025 | headctx = repo[b'.'] | |
1027 | if len(headctx.parents()) > 1: |
|
1026 | if len(headctx.parents()) > 1: | |
1028 | raise error.InputError(_(b'cannot absorb into a merge')) |
|
1027 | raise error.InputError(_(b'cannot absorb into a merge')) | |
1029 | stack = getdraftstack(headctx, limit) |
|
1028 | stack = getdraftstack(headctx, limit) | |
1030 | if limit and len(stack) >= limit: |
|
1029 | if limit and len(stack) >= limit: | |
1031 | ui.warn( |
|
1030 | ui.warn( | |
1032 | _( |
|
1031 | _( | |
1033 | b'absorb: only the recent %d changesets will ' |
|
1032 | b'absorb: only the recent %d changesets will ' | |
1034 | b'be analysed\n' |
|
1033 | b'be analysed\n' | |
1035 | ) |
|
1034 | ) | |
1036 | % limit |
|
1035 | % limit | |
1037 | ) |
|
1036 | ) | |
1038 | if not stack: |
|
1037 | if not stack: | |
1039 | raise error.InputError(_(b'no mutable changeset to change')) |
|
1038 | raise error.InputError(_(b'no mutable changeset to change')) | |
1040 | if targetctx is None: # default to working copy |
|
1039 | if targetctx is None: # default to working copy | |
1041 | targetctx = repo[None] |
|
1040 | targetctx = repo[None] | |
1042 | if pats is None: |
|
1041 | if pats is None: | |
1043 | pats = () |
|
1042 | pats = () | |
1044 | if opts is None: |
|
1043 | if opts is None: | |
1045 | opts = {} |
|
1044 | opts = {} | |
1046 | state = fixupstate(stack, ui=ui, opts=opts) |
|
1045 | state = fixupstate(stack, ui=ui, opts=opts) | |
1047 | matcher = scmutil.match(targetctx, pats, opts) |
|
1046 | matcher = scmutil.match(targetctx, pats, opts) | |
1048 | if opts.get(b'interactive'): |
|
1047 | if opts.get(b'interactive'): | |
1049 | diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher) |
|
1048 | diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher) | |
1050 | origchunks = patch.parsepatch(diff) |
|
1049 | origchunks = patch.parsepatch(diff) | |
1051 | chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0] |
|
1050 | chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0] | |
1052 | targetctx = overlaydiffcontext(stack[-1], chunks) |
|
1051 | targetctx = overlaydiffcontext(stack[-1], chunks) | |
1053 | fm = None |
|
1052 | fm = None | |
1054 | if opts.get(b'print_changes') or not opts.get(b'apply_changes'): |
|
1053 | if opts.get(b'print_changes') or not opts.get(b'apply_changes'): | |
1055 | fm = ui.formatter(b'absorb', opts) |
|
1054 | fm = ui.formatter(b'absorb', opts) | |
1056 | state.diffwith(targetctx, matcher, fm) |
|
1055 | state.diffwith(targetctx, matcher, fm) | |
1057 | if fm is not None: |
|
1056 | if fm is not None: | |
1058 | fm.startitem() |
|
1057 | fm.startitem() | |
1059 | fm.write( |
|
1058 | fm.write( | |
1060 | b"count", b"\n%d changesets affected\n", len(state.ctxaffected) |
|
1059 | b"count", b"\n%d changesets affected\n", len(state.ctxaffected) | |
1061 | ) |
|
1060 | ) | |
1062 | fm.data(linetype=b'summary') |
|
1061 | fm.data(linetype=b'summary') | |
1063 | for ctx in reversed(stack): |
|
1062 | for ctx in reversed(stack): | |
1064 | if ctx not in state.ctxaffected: |
|
1063 | if ctx not in state.ctxaffected: | |
1065 | continue |
|
1064 | continue | |
1066 | fm.startitem() |
|
1065 | fm.startitem() | |
1067 | fm.context(ctx=ctx) |
|
1066 | fm.context(ctx=ctx) | |
1068 | fm.data(linetype=b'changeset') |
|
1067 | fm.data(linetype=b'changeset') | |
1069 | fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node') |
|
1068 | fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node') | |
1070 | descfirstline = ctx.description().splitlines()[0] |
|
1069 | descfirstline = ctx.description().splitlines()[0] | |
1071 | fm.write( |
|
1070 | fm.write( | |
1072 | b'descfirstline', |
|
1071 | b'descfirstline', | |
1073 | b'%s\n', |
|
1072 | b'%s\n', | |
1074 | descfirstline, |
|
1073 | descfirstline, | |
1075 | label=b'absorb.description', |
|
1074 | label=b'absorb.description', | |
1076 | ) |
|
1075 | ) | |
1077 | fm.end() |
|
1076 | fm.end() | |
1078 | if not opts.get(b'dry_run'): |
|
1077 | if not opts.get(b'dry_run'): | |
1079 | if ( |
|
1078 | if ( | |
1080 | not opts.get(b'apply_changes') |
|
1079 | not opts.get(b'apply_changes') | |
1081 | and state.ctxaffected |
|
1080 | and state.ctxaffected | |
1082 | and ui.promptchoice( |
|
1081 | and ui.promptchoice( | |
1083 | b"apply changes (y/N)? $$ &Yes $$ &No", default=1 |
|
1082 | b"apply changes (y/N)? $$ &Yes $$ &No", default=1 | |
1084 | ) |
|
1083 | ) | |
1085 | ): |
|
1084 | ): | |
1086 | raise error.CanceledError(_(b'absorb cancelled\n')) |
|
1085 | raise error.CanceledError(_(b'absorb cancelled\n')) | |
1087 |
|
1086 | |||
1088 | state.apply() |
|
1087 | state.apply() | |
1089 | if state.commit(): |
|
1088 | if state.commit(): | |
1090 | state.printchunkstats() |
|
1089 | state.printchunkstats() | |
1091 | elif not ui.quiet: |
|
1090 | elif not ui.quiet: | |
1092 | ui.write(_(b'nothing applied\n')) |
|
1091 | ui.write(_(b'nothing applied\n')) | |
1093 | return state |
|
1092 | return state | |
1094 |
|
1093 | |||
1095 |
|
1094 | |||
1096 | @command( |
|
1095 | @command( | |
1097 | b'absorb', |
|
1096 | b'absorb', | |
1098 | [ |
|
1097 | [ | |
1099 | ( |
|
1098 | ( | |
1100 | b'a', |
|
1099 | b'a', | |
1101 | b'apply-changes', |
|
1100 | b'apply-changes', | |
1102 | None, |
|
1101 | None, | |
1103 | _(b'apply changes without prompting for confirmation'), |
|
1102 | _(b'apply changes without prompting for confirmation'), | |
1104 | ), |
|
1103 | ), | |
1105 | ( |
|
1104 | ( | |
1106 | b'p', |
|
1105 | b'p', | |
1107 | b'print-changes', |
|
1106 | b'print-changes', | |
1108 | None, |
|
1107 | None, | |
1109 | _(b'always print which changesets are modified by which changes'), |
|
1108 | _(b'always print which changesets are modified by which changes'), | |
1110 | ), |
|
1109 | ), | |
1111 | ( |
|
1110 | ( | |
1112 | b'i', |
|
1111 | b'i', | |
1113 | b'interactive', |
|
1112 | b'interactive', | |
1114 | None, |
|
1113 | None, | |
1115 | _(b'interactively select which chunks to apply'), |
|
1114 | _(b'interactively select which chunks to apply'), | |
1116 | ), |
|
1115 | ), | |
1117 | ( |
|
1116 | ( | |
1118 | b'e', |
|
1117 | b'e', | |
1119 | b'edit-lines', |
|
1118 | b'edit-lines', | |
1120 | None, |
|
1119 | None, | |
1121 | _( |
|
1120 | _( | |
1122 | b'edit what lines belong to which changesets before commit ' |
|
1121 | b'edit what lines belong to which changesets before commit ' | |
1123 | b'(EXPERIMENTAL)' |
|
1122 | b'(EXPERIMENTAL)' | |
1124 | ), |
|
1123 | ), | |
1125 | ), |
|
1124 | ), | |
1126 | ] |
|
1125 | ] | |
1127 | + commands.dryrunopts |
|
1126 | + commands.dryrunopts | |
1128 | + commands.templateopts |
|
1127 | + commands.templateopts | |
1129 | + commands.walkopts, |
|
1128 | + commands.walkopts, | |
1130 | _(b'hg absorb [OPTION] [FILE]...'), |
|
1129 | _(b'hg absorb [OPTION] [FILE]...'), | |
1131 | helpcategory=command.CATEGORY_COMMITTING, |
|
1130 | helpcategory=command.CATEGORY_COMMITTING, | |
1132 | helpbasic=True, |
|
1131 | helpbasic=True, | |
1133 | ) |
|
1132 | ) | |
1134 | def absorbcmd(ui, repo, *pats, **opts): |
|
1133 | def absorbcmd(ui, repo, *pats, **opts): | |
1135 | """incorporate corrections into the stack of draft changesets |
|
1134 | """incorporate corrections into the stack of draft changesets | |
1136 |
|
1135 | |||
1137 | absorb analyzes each change in your working directory and attempts to |
|
1136 | absorb analyzes each change in your working directory and attempts to | |
1138 | amend the changed lines into the changesets in your stack that first |
|
1137 | amend the changed lines into the changesets in your stack that first | |
1139 | introduced those lines. |
|
1138 | introduced those lines. | |
1140 |
|
1139 | |||
1141 | If absorb cannot find an unambiguous changeset to amend for a change, |
|
1140 | If absorb cannot find an unambiguous changeset to amend for a change, | |
1142 | that change will be left in the working directory, untouched. They can be |
|
1141 | that change will be left in the working directory, untouched. They can be | |
1143 | observed by :hg:`status` or :hg:`diff` afterwards. In other words, |
|
1142 | observed by :hg:`status` or :hg:`diff` afterwards. In other words, | |
1144 | absorb does not write to the working directory. |
|
1143 | absorb does not write to the working directory. | |
1145 |
|
1144 | |||
1146 | Changesets outside the revset `::. and not public() and not merge()` will |
|
1145 | Changesets outside the revset `::. and not public() and not merge()` will | |
1147 | not be changed. |
|
1146 | not be changed. | |
1148 |
|
1147 | |||
1149 | Changesets that become empty after applying the changes will be deleted. |
|
1148 | Changesets that become empty after applying the changes will be deleted. | |
1150 |
|
1149 | |||
1151 | By default, absorb will show what it plans to do and prompt for |
|
1150 | By default, absorb will show what it plans to do and prompt for | |
1152 | confirmation. If you are confident that the changes will be absorbed |
|
1151 | confirmation. If you are confident that the changes will be absorbed | |
1153 | to the correct place, run :hg:`absorb -a` to apply the changes |
|
1152 | to the correct place, run :hg:`absorb -a` to apply the changes | |
1154 | immediately. |
|
1153 | immediately. | |
1155 |
|
1154 | |||
1156 | Returns 0 on success, 1 if all chunks were ignored and nothing amended. |
|
1155 | Returns 0 on success, 1 if all chunks were ignored and nothing amended. | |
1157 | """ |
|
1156 | """ | |
1158 | opts = pycompat.byteskwargs(opts) |
|
1157 | opts = pycompat.byteskwargs(opts) | |
1159 |
|
1158 | |||
1160 | with repo.wlock(), repo.lock(): |
|
1159 | with repo.wlock(), repo.lock(): | |
1161 | if not opts[b'dry_run']: |
|
1160 | if not opts[b'dry_run']: | |
1162 | cmdutil.checkunfinished(repo) |
|
1161 | cmdutil.checkunfinished(repo) | |
1163 |
|
1162 | |||
1164 | state = absorb(ui, repo, pats=pats, opts=opts) |
|
1163 | state = absorb(ui, repo, pats=pats, opts=opts) | |
1165 | if sum(s[0] for s in state.chunkstats.values()) == 0: |
|
1164 | if sum(s[0] for s in state.chunkstats.values()) == 0: | |
1166 | return 1 |
|
1165 | return 1 |
@@ -1,531 +1,531 b'' | |||||
1 | # git.py - git support for the convert extension |
|
1 | # git.py - git support for the convert extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import os |
|
9 | import os | |
10 |
|
10 | |||
11 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
12 |
from mercurial.node import |
|
12 | from mercurial.node import sha1nodeconstants | |
13 | from mercurial import ( |
|
13 | from mercurial import ( | |
14 | config, |
|
14 | config, | |
15 | error, |
|
15 | error, | |
16 | pycompat, |
|
16 | pycompat, | |
17 | ) |
|
17 | ) | |
18 |
|
18 | |||
19 | from . import common |
|
19 | from . import common | |
20 |
|
20 | |||
21 |
|
21 | |||
22 | class submodule(object): |
|
22 | class submodule(object): | |
23 | def __init__(self, path, node, url): |
|
23 | def __init__(self, path, node, url): | |
24 | self.path = path |
|
24 | self.path = path | |
25 | self.node = node |
|
25 | self.node = node | |
26 | self.url = url |
|
26 | self.url = url | |
27 |
|
27 | |||
28 | def hgsub(self): |
|
28 | def hgsub(self): | |
29 | return b"%s = [git]%s" % (self.path, self.url) |
|
29 | return b"%s = [git]%s" % (self.path, self.url) | |
30 |
|
30 | |||
31 | def hgsubstate(self): |
|
31 | def hgsubstate(self): | |
32 | return b"%s %s" % (self.node, self.path) |
|
32 | return b"%s %s" % (self.node, self.path) | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | # Keys in extra fields that should not be copied if the user requests. |
|
35 | # Keys in extra fields that should not be copied if the user requests. | |
36 | bannedextrakeys = { |
|
36 | bannedextrakeys = { | |
37 | # Git commit object built-ins. |
|
37 | # Git commit object built-ins. | |
38 | b'tree', |
|
38 | b'tree', | |
39 | b'parent', |
|
39 | b'parent', | |
40 | b'author', |
|
40 | b'author', | |
41 | b'committer', |
|
41 | b'committer', | |
42 | # Mercurial built-ins. |
|
42 | # Mercurial built-ins. | |
43 | b'branch', |
|
43 | b'branch', | |
44 | b'close', |
|
44 | b'close', | |
45 | } |
|
45 | } | |
46 |
|
46 | |||
47 |
|
47 | |||
48 | class convert_git(common.converter_source, common.commandline): |
|
48 | class convert_git(common.converter_source, common.commandline): | |
49 | # Windows does not support GIT_DIR= construct while other systems |
|
49 | # Windows does not support GIT_DIR= construct while other systems | |
50 | # cannot remove environment variable. Just assume none have |
|
50 | # cannot remove environment variable. Just assume none have | |
51 | # both issues. |
|
51 | # both issues. | |
52 |
|
52 | |||
53 | def _gitcmd(self, cmd, *args, **kwargs): |
|
53 | def _gitcmd(self, cmd, *args, **kwargs): | |
54 | return cmd(b'--git-dir=%s' % self.path, *args, **kwargs) |
|
54 | return cmd(b'--git-dir=%s' % self.path, *args, **kwargs) | |
55 |
|
55 | |||
56 | def gitrun0(self, *args, **kwargs): |
|
56 | def gitrun0(self, *args, **kwargs): | |
57 | return self._gitcmd(self.run0, *args, **kwargs) |
|
57 | return self._gitcmd(self.run0, *args, **kwargs) | |
58 |
|
58 | |||
59 | def gitrun(self, *args, **kwargs): |
|
59 | def gitrun(self, *args, **kwargs): | |
60 | return self._gitcmd(self.run, *args, **kwargs) |
|
60 | return self._gitcmd(self.run, *args, **kwargs) | |
61 |
|
61 | |||
62 | def gitrunlines0(self, *args, **kwargs): |
|
62 | def gitrunlines0(self, *args, **kwargs): | |
63 | return self._gitcmd(self.runlines0, *args, **kwargs) |
|
63 | return self._gitcmd(self.runlines0, *args, **kwargs) | |
64 |
|
64 | |||
65 | def gitrunlines(self, *args, **kwargs): |
|
65 | def gitrunlines(self, *args, **kwargs): | |
66 | return self._gitcmd(self.runlines, *args, **kwargs) |
|
66 | return self._gitcmd(self.runlines, *args, **kwargs) | |
67 |
|
67 | |||
68 | def gitpipe(self, *args, **kwargs): |
|
68 | def gitpipe(self, *args, **kwargs): | |
69 | return self._gitcmd(self._run3, *args, **kwargs) |
|
69 | return self._gitcmd(self._run3, *args, **kwargs) | |
70 |
|
70 | |||
71 | def __init__(self, ui, repotype, path, revs=None): |
|
71 | def __init__(self, ui, repotype, path, revs=None): | |
72 | super(convert_git, self).__init__(ui, repotype, path, revs=revs) |
|
72 | super(convert_git, self).__init__(ui, repotype, path, revs=revs) | |
73 | common.commandline.__init__(self, ui, b'git') |
|
73 | common.commandline.__init__(self, ui, b'git') | |
74 |
|
74 | |||
75 | # Pass an absolute path to git to prevent from ever being interpreted |
|
75 | # Pass an absolute path to git to prevent from ever being interpreted | |
76 | # as a URL |
|
76 | # as a URL | |
77 | path = os.path.abspath(path) |
|
77 | path = os.path.abspath(path) | |
78 |
|
78 | |||
79 | if os.path.isdir(path + b"/.git"): |
|
79 | if os.path.isdir(path + b"/.git"): | |
80 | path += b"/.git" |
|
80 | path += b"/.git" | |
81 | if not os.path.exists(path + b"/objects"): |
|
81 | if not os.path.exists(path + b"/objects"): | |
82 | raise common.NoRepo( |
|
82 | raise common.NoRepo( | |
83 | _(b"%s does not look like a Git repository") % path |
|
83 | _(b"%s does not look like a Git repository") % path | |
84 | ) |
|
84 | ) | |
85 |
|
85 | |||
86 | # The default value (50) is based on the default for 'git diff'. |
|
86 | # The default value (50) is based on the default for 'git diff'. | |
87 | similarity = ui.configint(b'convert', b'git.similarity') |
|
87 | similarity = ui.configint(b'convert', b'git.similarity') | |
88 | if similarity < 0 or similarity > 100: |
|
88 | if similarity < 0 or similarity > 100: | |
89 | raise error.Abort(_(b'similarity must be between 0 and 100')) |
|
89 | raise error.Abort(_(b'similarity must be between 0 and 100')) | |
90 | if similarity > 0: |
|
90 | if similarity > 0: | |
91 | self.simopt = [b'-C%d%%' % similarity] |
|
91 | self.simopt = [b'-C%d%%' % similarity] | |
92 | findcopiesharder = ui.configbool( |
|
92 | findcopiesharder = ui.configbool( | |
93 | b'convert', b'git.findcopiesharder' |
|
93 | b'convert', b'git.findcopiesharder' | |
94 | ) |
|
94 | ) | |
95 | if findcopiesharder: |
|
95 | if findcopiesharder: | |
96 | self.simopt.append(b'--find-copies-harder') |
|
96 | self.simopt.append(b'--find-copies-harder') | |
97 |
|
97 | |||
98 | renamelimit = ui.configint(b'convert', b'git.renamelimit') |
|
98 | renamelimit = ui.configint(b'convert', b'git.renamelimit') | |
99 | self.simopt.append(b'-l%d' % renamelimit) |
|
99 | self.simopt.append(b'-l%d' % renamelimit) | |
100 | else: |
|
100 | else: | |
101 | self.simopt = [] |
|
101 | self.simopt = [] | |
102 |
|
102 | |||
103 | common.checktool(b'git', b'git') |
|
103 | common.checktool(b'git', b'git') | |
104 |
|
104 | |||
105 | self.path = path |
|
105 | self.path = path | |
106 | self.submodules = [] |
|
106 | self.submodules = [] | |
107 |
|
107 | |||
108 | self.catfilepipe = self.gitpipe(b'cat-file', b'--batch') |
|
108 | self.catfilepipe = self.gitpipe(b'cat-file', b'--batch') | |
109 |
|
109 | |||
110 | self.copyextrakeys = self.ui.configlist(b'convert', b'git.extrakeys') |
|
110 | self.copyextrakeys = self.ui.configlist(b'convert', b'git.extrakeys') | |
111 | banned = set(self.copyextrakeys) & bannedextrakeys |
|
111 | banned = set(self.copyextrakeys) & bannedextrakeys | |
112 | if banned: |
|
112 | if banned: | |
113 | raise error.Abort( |
|
113 | raise error.Abort( | |
114 | _(b'copying of extra key is forbidden: %s') |
|
114 | _(b'copying of extra key is forbidden: %s') | |
115 | % _(b', ').join(sorted(banned)) |
|
115 | % _(b', ').join(sorted(banned)) | |
116 | ) |
|
116 | ) | |
117 |
|
117 | |||
118 | committeractions = self.ui.configlist( |
|
118 | committeractions = self.ui.configlist( | |
119 | b'convert', b'git.committeractions' |
|
119 | b'convert', b'git.committeractions' | |
120 | ) |
|
120 | ) | |
121 |
|
121 | |||
122 | messagedifferent = None |
|
122 | messagedifferent = None | |
123 | messagealways = None |
|
123 | messagealways = None | |
124 | for a in committeractions: |
|
124 | for a in committeractions: | |
125 | if a.startswith((b'messagedifferent', b'messagealways')): |
|
125 | if a.startswith((b'messagedifferent', b'messagealways')): | |
126 | k = a |
|
126 | k = a | |
127 | v = None |
|
127 | v = None | |
128 | if b'=' in a: |
|
128 | if b'=' in a: | |
129 | k, v = a.split(b'=', 1) |
|
129 | k, v = a.split(b'=', 1) | |
130 |
|
130 | |||
131 | if k == b'messagedifferent': |
|
131 | if k == b'messagedifferent': | |
132 | messagedifferent = v or b'committer:' |
|
132 | messagedifferent = v or b'committer:' | |
133 | elif k == b'messagealways': |
|
133 | elif k == b'messagealways': | |
134 | messagealways = v or b'committer:' |
|
134 | messagealways = v or b'committer:' | |
135 |
|
135 | |||
136 | if messagedifferent and messagealways: |
|
136 | if messagedifferent and messagealways: | |
137 | raise error.Abort( |
|
137 | raise error.Abort( | |
138 | _( |
|
138 | _( | |
139 | b'committeractions cannot define both ' |
|
139 | b'committeractions cannot define both ' | |
140 | b'messagedifferent and messagealways' |
|
140 | b'messagedifferent and messagealways' | |
141 | ) |
|
141 | ) | |
142 | ) |
|
142 | ) | |
143 |
|
143 | |||
144 | dropcommitter = b'dropcommitter' in committeractions |
|
144 | dropcommitter = b'dropcommitter' in committeractions | |
145 | replaceauthor = b'replaceauthor' in committeractions |
|
145 | replaceauthor = b'replaceauthor' in committeractions | |
146 |
|
146 | |||
147 | if dropcommitter and replaceauthor: |
|
147 | if dropcommitter and replaceauthor: | |
148 | raise error.Abort( |
|
148 | raise error.Abort( | |
149 | _( |
|
149 | _( | |
150 | b'committeractions cannot define both ' |
|
150 | b'committeractions cannot define both ' | |
151 | b'dropcommitter and replaceauthor' |
|
151 | b'dropcommitter and replaceauthor' | |
152 | ) |
|
152 | ) | |
153 | ) |
|
153 | ) | |
154 |
|
154 | |||
155 | if dropcommitter and messagealways: |
|
155 | if dropcommitter and messagealways: | |
156 | raise error.Abort( |
|
156 | raise error.Abort( | |
157 | _( |
|
157 | _( | |
158 | b'committeractions cannot define both ' |
|
158 | b'committeractions cannot define both ' | |
159 | b'dropcommitter and messagealways' |
|
159 | b'dropcommitter and messagealways' | |
160 | ) |
|
160 | ) | |
161 | ) |
|
161 | ) | |
162 |
|
162 | |||
163 | if not messagedifferent and not messagealways: |
|
163 | if not messagedifferent and not messagealways: | |
164 | messagedifferent = b'committer:' |
|
164 | messagedifferent = b'committer:' | |
165 |
|
165 | |||
166 | self.committeractions = { |
|
166 | self.committeractions = { | |
167 | b'dropcommitter': dropcommitter, |
|
167 | b'dropcommitter': dropcommitter, | |
168 | b'replaceauthor': replaceauthor, |
|
168 | b'replaceauthor': replaceauthor, | |
169 | b'messagedifferent': messagedifferent, |
|
169 | b'messagedifferent': messagedifferent, | |
170 | b'messagealways': messagealways, |
|
170 | b'messagealways': messagealways, | |
171 | } |
|
171 | } | |
172 |
|
172 | |||
173 | def after(self): |
|
173 | def after(self): | |
174 | for f in self.catfilepipe: |
|
174 | for f in self.catfilepipe: | |
175 | f.close() |
|
175 | f.close() | |
176 |
|
176 | |||
177 | def getheads(self): |
|
177 | def getheads(self): | |
178 | if not self.revs: |
|
178 | if not self.revs: | |
179 | output, status = self.gitrun( |
|
179 | output, status = self.gitrun( | |
180 | b'rev-parse', b'--branches', b'--remotes' |
|
180 | b'rev-parse', b'--branches', b'--remotes' | |
181 | ) |
|
181 | ) | |
182 | heads = output.splitlines() |
|
182 | heads = output.splitlines() | |
183 | if status: |
|
183 | if status: | |
184 | raise error.Abort(_(b'cannot retrieve git heads')) |
|
184 | raise error.Abort(_(b'cannot retrieve git heads')) | |
185 | else: |
|
185 | else: | |
186 | heads = [] |
|
186 | heads = [] | |
187 | for rev in self.revs: |
|
187 | for rev in self.revs: | |
188 | rawhead, ret = self.gitrun(b'rev-parse', b'--verify', rev) |
|
188 | rawhead, ret = self.gitrun(b'rev-parse', b'--verify', rev) | |
189 | heads.append(rawhead[:-1]) |
|
189 | heads.append(rawhead[:-1]) | |
190 | if ret: |
|
190 | if ret: | |
191 | raise error.Abort(_(b'cannot retrieve git head "%s"') % rev) |
|
191 | raise error.Abort(_(b'cannot retrieve git head "%s"') % rev) | |
192 | return heads |
|
192 | return heads | |
193 |
|
193 | |||
194 | def catfile(self, rev, ftype): |
|
194 | def catfile(self, rev, ftype): | |
195 | if rev == nullhex: |
|
195 | if rev == sha1nodeconstants.nullhex: | |
196 | raise IOError |
|
196 | raise IOError | |
197 | self.catfilepipe[0].write(rev + b'\n') |
|
197 | self.catfilepipe[0].write(rev + b'\n') | |
198 | self.catfilepipe[0].flush() |
|
198 | self.catfilepipe[0].flush() | |
199 | info = self.catfilepipe[1].readline().split() |
|
199 | info = self.catfilepipe[1].readline().split() | |
200 | if info[1] != ftype: |
|
200 | if info[1] != ftype: | |
201 | raise error.Abort( |
|
201 | raise error.Abort( | |
202 | _(b'cannot read %r object at %s') |
|
202 | _(b'cannot read %r object at %s') | |
203 | % (pycompat.bytestr(ftype), rev) |
|
203 | % (pycompat.bytestr(ftype), rev) | |
204 | ) |
|
204 | ) | |
205 | size = int(info[2]) |
|
205 | size = int(info[2]) | |
206 | data = self.catfilepipe[1].read(size) |
|
206 | data = self.catfilepipe[1].read(size) | |
207 | if len(data) < size: |
|
207 | if len(data) < size: | |
208 | raise error.Abort( |
|
208 | raise error.Abort( | |
209 | _(b'cannot read %r object at %s: unexpected size') |
|
209 | _(b'cannot read %r object at %s: unexpected size') | |
210 | % (ftype, rev) |
|
210 | % (ftype, rev) | |
211 | ) |
|
211 | ) | |
212 | # read the trailing newline |
|
212 | # read the trailing newline | |
213 | self.catfilepipe[1].read(1) |
|
213 | self.catfilepipe[1].read(1) | |
214 | return data |
|
214 | return data | |
215 |
|
215 | |||
216 | def getfile(self, name, rev): |
|
216 | def getfile(self, name, rev): | |
217 | if rev == nullhex: |
|
217 | if rev == sha1nodeconstants.nullhex: | |
218 | return None, None |
|
218 | return None, None | |
219 | if name == b'.hgsub': |
|
219 | if name == b'.hgsub': | |
220 | data = b'\n'.join([m.hgsub() for m in self.submoditer()]) |
|
220 | data = b'\n'.join([m.hgsub() for m in self.submoditer()]) | |
221 | mode = b'' |
|
221 | mode = b'' | |
222 | elif name == b'.hgsubstate': |
|
222 | elif name == b'.hgsubstate': | |
223 | data = b'\n'.join([m.hgsubstate() for m in self.submoditer()]) |
|
223 | data = b'\n'.join([m.hgsubstate() for m in self.submoditer()]) | |
224 | mode = b'' |
|
224 | mode = b'' | |
225 | else: |
|
225 | else: | |
226 | data = self.catfile(rev, b"blob") |
|
226 | data = self.catfile(rev, b"blob") | |
227 | mode = self.modecache[(name, rev)] |
|
227 | mode = self.modecache[(name, rev)] | |
228 | return data, mode |
|
228 | return data, mode | |
229 |
|
229 | |||
230 | def submoditer(self): |
|
230 | def submoditer(self): | |
231 | null = nullhex |
|
231 | null = sha1nodeconstants.nullhex | |
232 | for m in sorted(self.submodules, key=lambda p: p.path): |
|
232 | for m in sorted(self.submodules, key=lambda p: p.path): | |
233 | if m.node != null: |
|
233 | if m.node != null: | |
234 | yield m |
|
234 | yield m | |
235 |
|
235 | |||
236 | def parsegitmodules(self, content): |
|
236 | def parsegitmodules(self, content): | |
237 | """Parse the formatted .gitmodules file, example file format: |
|
237 | """Parse the formatted .gitmodules file, example file format: | |
238 | [submodule "sub"]\n |
|
238 | [submodule "sub"]\n | |
239 | \tpath = sub\n |
|
239 | \tpath = sub\n | |
240 | \turl = git://giturl\n |
|
240 | \turl = git://giturl\n | |
241 | """ |
|
241 | """ | |
242 | self.submodules = [] |
|
242 | self.submodules = [] | |
243 | c = config.config() |
|
243 | c = config.config() | |
244 | # Each item in .gitmodules starts with whitespace that cant be parsed |
|
244 | # Each item in .gitmodules starts with whitespace that cant be parsed | |
245 | c.parse( |
|
245 | c.parse( | |
246 | b'.gitmodules', |
|
246 | b'.gitmodules', | |
247 | b'\n'.join(line.strip() for line in content.split(b'\n')), |
|
247 | b'\n'.join(line.strip() for line in content.split(b'\n')), | |
248 | ) |
|
248 | ) | |
249 | for sec in c.sections(): |
|
249 | for sec in c.sections(): | |
250 | # turn the config object into a real dict |
|
250 | # turn the config object into a real dict | |
251 | s = dict(c.items(sec)) |
|
251 | s = dict(c.items(sec)) | |
252 | if b'url' in s and b'path' in s: |
|
252 | if b'url' in s and b'path' in s: | |
253 | self.submodules.append(submodule(s[b'path'], b'', s[b'url'])) |
|
253 | self.submodules.append(submodule(s[b'path'], b'', s[b'url'])) | |
254 |
|
254 | |||
255 | def retrievegitmodules(self, version): |
|
255 | def retrievegitmodules(self, version): | |
256 | modules, ret = self.gitrun( |
|
256 | modules, ret = self.gitrun( | |
257 | b'show', b'%s:%s' % (version, b'.gitmodules') |
|
257 | b'show', b'%s:%s' % (version, b'.gitmodules') | |
258 | ) |
|
258 | ) | |
259 | if ret: |
|
259 | if ret: | |
260 | # This can happen if a file is in the repo that has permissions |
|
260 | # This can happen if a file is in the repo that has permissions | |
261 | # 160000, but there is no .gitmodules file. |
|
261 | # 160000, but there is no .gitmodules file. | |
262 | self.ui.warn( |
|
262 | self.ui.warn( | |
263 | _(b"warning: cannot read submodules config file in %s\n") |
|
263 | _(b"warning: cannot read submodules config file in %s\n") | |
264 | % version |
|
264 | % version | |
265 | ) |
|
265 | ) | |
266 | return |
|
266 | return | |
267 |
|
267 | |||
268 | try: |
|
268 | try: | |
269 | self.parsegitmodules(modules) |
|
269 | self.parsegitmodules(modules) | |
270 | except error.ParseError: |
|
270 | except error.ParseError: | |
271 | self.ui.warn( |
|
271 | self.ui.warn( | |
272 | _(b"warning: unable to parse .gitmodules in %s\n") % version |
|
272 | _(b"warning: unable to parse .gitmodules in %s\n") % version | |
273 | ) |
|
273 | ) | |
274 | return |
|
274 | return | |
275 |
|
275 | |||
276 | for m in self.submodules: |
|
276 | for m in self.submodules: | |
277 | node, ret = self.gitrun(b'rev-parse', b'%s:%s' % (version, m.path)) |
|
277 | node, ret = self.gitrun(b'rev-parse', b'%s:%s' % (version, m.path)) | |
278 | if ret: |
|
278 | if ret: | |
279 | continue |
|
279 | continue | |
280 | m.node = node.strip() |
|
280 | m.node = node.strip() | |
281 |
|
281 | |||
282 | def getchanges(self, version, full): |
|
282 | def getchanges(self, version, full): | |
283 | if full: |
|
283 | if full: | |
284 | raise error.Abort(_(b"convert from git does not support --full")) |
|
284 | raise error.Abort(_(b"convert from git does not support --full")) | |
285 | self.modecache = {} |
|
285 | self.modecache = {} | |
286 | cmd = ( |
|
286 | cmd = ( | |
287 | [b'diff-tree', b'-z', b'--root', b'-m', b'-r'] |
|
287 | [b'diff-tree', b'-z', b'--root', b'-m', b'-r'] | |
288 | + self.simopt |
|
288 | + self.simopt | |
289 | + [version] |
|
289 | + [version] | |
290 | ) |
|
290 | ) | |
291 | output, status = self.gitrun(*cmd) |
|
291 | output, status = self.gitrun(*cmd) | |
292 | if status: |
|
292 | if status: | |
293 | raise error.Abort(_(b'cannot read changes in %s') % version) |
|
293 | raise error.Abort(_(b'cannot read changes in %s') % version) | |
294 | changes = [] |
|
294 | changes = [] | |
295 | copies = {} |
|
295 | copies = {} | |
296 | seen = set() |
|
296 | seen = set() | |
297 | entry = None |
|
297 | entry = None | |
298 | subexists = [False] |
|
298 | subexists = [False] | |
299 | subdeleted = [False] |
|
299 | subdeleted = [False] | |
300 | difftree = output.split(b'\x00') |
|
300 | difftree = output.split(b'\x00') | |
301 | lcount = len(difftree) |
|
301 | lcount = len(difftree) | |
302 | i = 0 |
|
302 | i = 0 | |
303 |
|
303 | |||
304 | skipsubmodules = self.ui.configbool(b'convert', b'git.skipsubmodules') |
|
304 | skipsubmodules = self.ui.configbool(b'convert', b'git.skipsubmodules') | |
305 |
|
305 | |||
306 | def add(entry, f, isdest): |
|
306 | def add(entry, f, isdest): | |
307 | seen.add(f) |
|
307 | seen.add(f) | |
308 | h = entry[3] |
|
308 | h = entry[3] | |
309 | p = entry[1] == b"100755" |
|
309 | p = entry[1] == b"100755" | |
310 | s = entry[1] == b"120000" |
|
310 | s = entry[1] == b"120000" | |
311 | renamesource = not isdest and entry[4][0] == b'R' |
|
311 | renamesource = not isdest and entry[4][0] == b'R' | |
312 |
|
312 | |||
313 | if f == b'.gitmodules': |
|
313 | if f == b'.gitmodules': | |
314 | if skipsubmodules: |
|
314 | if skipsubmodules: | |
315 | return |
|
315 | return | |
316 |
|
316 | |||
317 | subexists[0] = True |
|
317 | subexists[0] = True | |
318 | if entry[4] == b'D' or renamesource: |
|
318 | if entry[4] == b'D' or renamesource: | |
319 | subdeleted[0] = True |
|
319 | subdeleted[0] = True | |
320 | changes.append((b'.hgsub', nullhex)) |
|
320 | changes.append((b'.hgsub', sha1nodeconstants.nullhex)) | |
321 | else: |
|
321 | else: | |
322 | changes.append((b'.hgsub', b'')) |
|
322 | changes.append((b'.hgsub', b'')) | |
323 | elif entry[1] == b'160000' or entry[0] == b':160000': |
|
323 | elif entry[1] == b'160000' or entry[0] == b':160000': | |
324 | if not skipsubmodules: |
|
324 | if not skipsubmodules: | |
325 | subexists[0] = True |
|
325 | subexists[0] = True | |
326 | else: |
|
326 | else: | |
327 | if renamesource: |
|
327 | if renamesource: | |
328 | h = nullhex |
|
328 | h = sha1nodeconstants.nullhex | |
329 | self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b"" |
|
329 | self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b"" | |
330 | changes.append((f, h)) |
|
330 | changes.append((f, h)) | |
331 |
|
331 | |||
332 | while i < lcount: |
|
332 | while i < lcount: | |
333 | l = difftree[i] |
|
333 | l = difftree[i] | |
334 | i += 1 |
|
334 | i += 1 | |
335 | if not entry: |
|
335 | if not entry: | |
336 | if not l.startswith(b':'): |
|
336 | if not l.startswith(b':'): | |
337 | continue |
|
337 | continue | |
338 | entry = tuple(pycompat.bytestr(p) for p in l.split()) |
|
338 | entry = tuple(pycompat.bytestr(p) for p in l.split()) | |
339 | continue |
|
339 | continue | |
340 | f = l |
|
340 | f = l | |
341 | if entry[4][0] == b'C': |
|
341 | if entry[4][0] == b'C': | |
342 | copysrc = f |
|
342 | copysrc = f | |
343 | copydest = difftree[i] |
|
343 | copydest = difftree[i] | |
344 | i += 1 |
|
344 | i += 1 | |
345 | f = copydest |
|
345 | f = copydest | |
346 | copies[copydest] = copysrc |
|
346 | copies[copydest] = copysrc | |
347 | if f not in seen: |
|
347 | if f not in seen: | |
348 | add(entry, f, False) |
|
348 | add(entry, f, False) | |
349 | # A file can be copied multiple times, or modified and copied |
|
349 | # A file can be copied multiple times, or modified and copied | |
350 | # simultaneously. So f can be repeated even if fdest isn't. |
|
350 | # simultaneously. So f can be repeated even if fdest isn't. | |
351 | if entry[4][0] == b'R': |
|
351 | if entry[4][0] == b'R': | |
352 | # rename: next line is the destination |
|
352 | # rename: next line is the destination | |
353 | fdest = difftree[i] |
|
353 | fdest = difftree[i] | |
354 | i += 1 |
|
354 | i += 1 | |
355 | if fdest not in seen: |
|
355 | if fdest not in seen: | |
356 | add(entry, fdest, True) |
|
356 | add(entry, fdest, True) | |
357 | # .gitmodules isn't imported at all, so it being copied to |
|
357 | # .gitmodules isn't imported at all, so it being copied to | |
358 | # and fro doesn't really make sense |
|
358 | # and fro doesn't really make sense | |
359 | if f != b'.gitmodules' and fdest != b'.gitmodules': |
|
359 | if f != b'.gitmodules' and fdest != b'.gitmodules': | |
360 | copies[fdest] = f |
|
360 | copies[fdest] = f | |
361 | entry = None |
|
361 | entry = None | |
362 |
|
362 | |||
363 | if subexists[0]: |
|
363 | if subexists[0]: | |
364 | if subdeleted[0]: |
|
364 | if subdeleted[0]: | |
365 | changes.append((b'.hgsubstate', nullhex)) |
|
365 | changes.append((b'.hgsubstate', sha1nodeconstants.nullhex)) | |
366 | else: |
|
366 | else: | |
367 | self.retrievegitmodules(version) |
|
367 | self.retrievegitmodules(version) | |
368 | changes.append((b'.hgsubstate', b'')) |
|
368 | changes.append((b'.hgsubstate', b'')) | |
369 | return (changes, copies, set()) |
|
369 | return (changes, copies, set()) | |
370 |
|
370 | |||
371 | def getcommit(self, version): |
|
371 | def getcommit(self, version): | |
372 | c = self.catfile(version, b"commit") # read the commit hash |
|
372 | c = self.catfile(version, b"commit") # read the commit hash | |
373 | end = c.find(b"\n\n") |
|
373 | end = c.find(b"\n\n") | |
374 | message = c[end + 2 :] |
|
374 | message = c[end + 2 :] | |
375 | message = self.recode(message) |
|
375 | message = self.recode(message) | |
376 | l = c[:end].splitlines() |
|
376 | l = c[:end].splitlines() | |
377 | parents = [] |
|
377 | parents = [] | |
378 | author = committer = None |
|
378 | author = committer = None | |
379 | extra = {} |
|
379 | extra = {} | |
380 | for e in l[1:]: |
|
380 | for e in l[1:]: | |
381 | n, v = e.split(b" ", 1) |
|
381 | n, v = e.split(b" ", 1) | |
382 | if n == b"author": |
|
382 | if n == b"author": | |
383 | p = v.split() |
|
383 | p = v.split() | |
384 | tm, tz = p[-2:] |
|
384 | tm, tz = p[-2:] | |
385 | author = b" ".join(p[:-2]) |
|
385 | author = b" ".join(p[:-2]) | |
386 | if author[0] == b"<": |
|
386 | if author[0] == b"<": | |
387 | author = author[1:-1] |
|
387 | author = author[1:-1] | |
388 | author = self.recode(author) |
|
388 | author = self.recode(author) | |
389 | if n == b"committer": |
|
389 | if n == b"committer": | |
390 | p = v.split() |
|
390 | p = v.split() | |
391 | tm, tz = p[-2:] |
|
391 | tm, tz = p[-2:] | |
392 | committer = b" ".join(p[:-2]) |
|
392 | committer = b" ".join(p[:-2]) | |
393 | if committer[0] == b"<": |
|
393 | if committer[0] == b"<": | |
394 | committer = committer[1:-1] |
|
394 | committer = committer[1:-1] | |
395 | committer = self.recode(committer) |
|
395 | committer = self.recode(committer) | |
396 | if n == b"parent": |
|
396 | if n == b"parent": | |
397 | parents.append(v) |
|
397 | parents.append(v) | |
398 | if n in self.copyextrakeys: |
|
398 | if n in self.copyextrakeys: | |
399 | extra[n] = v |
|
399 | extra[n] = v | |
400 |
|
400 | |||
401 | if self.committeractions[b'dropcommitter']: |
|
401 | if self.committeractions[b'dropcommitter']: | |
402 | committer = None |
|
402 | committer = None | |
403 | elif self.committeractions[b'replaceauthor']: |
|
403 | elif self.committeractions[b'replaceauthor']: | |
404 | author = committer |
|
404 | author = committer | |
405 |
|
405 | |||
406 | if committer: |
|
406 | if committer: | |
407 | messagealways = self.committeractions[b'messagealways'] |
|
407 | messagealways = self.committeractions[b'messagealways'] | |
408 | messagedifferent = self.committeractions[b'messagedifferent'] |
|
408 | messagedifferent = self.committeractions[b'messagedifferent'] | |
409 | if messagealways: |
|
409 | if messagealways: | |
410 | message += b'\n%s %s\n' % (messagealways, committer) |
|
410 | message += b'\n%s %s\n' % (messagealways, committer) | |
411 | elif messagedifferent and author != committer: |
|
411 | elif messagedifferent and author != committer: | |
412 | message += b'\n%s %s\n' % (messagedifferent, committer) |
|
412 | message += b'\n%s %s\n' % (messagedifferent, committer) | |
413 |
|
413 | |||
414 | tzs, tzh, tzm = tz[-5:-4] + b"1", tz[-4:-2], tz[-2:] |
|
414 | tzs, tzh, tzm = tz[-5:-4] + b"1", tz[-4:-2], tz[-2:] | |
415 | tz = -int(tzs) * (int(tzh) * 3600 + int(tzm)) |
|
415 | tz = -int(tzs) * (int(tzh) * 3600 + int(tzm)) | |
416 | date = tm + b" " + (b"%d" % tz) |
|
416 | date = tm + b" " + (b"%d" % tz) | |
417 | saverev = self.ui.configbool(b'convert', b'git.saverev') |
|
417 | saverev = self.ui.configbool(b'convert', b'git.saverev') | |
418 |
|
418 | |||
419 | c = common.commit( |
|
419 | c = common.commit( | |
420 | parents=parents, |
|
420 | parents=parents, | |
421 | date=date, |
|
421 | date=date, | |
422 | author=author, |
|
422 | author=author, | |
423 | desc=message, |
|
423 | desc=message, | |
424 | rev=version, |
|
424 | rev=version, | |
425 | extra=extra, |
|
425 | extra=extra, | |
426 | saverev=saverev, |
|
426 | saverev=saverev, | |
427 | ) |
|
427 | ) | |
428 | return c |
|
428 | return c | |
429 |
|
429 | |||
430 | def numcommits(self): |
|
430 | def numcommits(self): | |
431 | output, ret = self.gitrunlines(b'rev-list', b'--all') |
|
431 | output, ret = self.gitrunlines(b'rev-list', b'--all') | |
432 | if ret: |
|
432 | if ret: | |
433 | raise error.Abort( |
|
433 | raise error.Abort( | |
434 | _(b'cannot retrieve number of commits in %s') % self.path |
|
434 | _(b'cannot retrieve number of commits in %s') % self.path | |
435 | ) |
|
435 | ) | |
436 | return len(output) |
|
436 | return len(output) | |
437 |
|
437 | |||
438 | def gettags(self): |
|
438 | def gettags(self): | |
439 | tags = {} |
|
439 | tags = {} | |
440 | alltags = {} |
|
440 | alltags = {} | |
441 | output, status = self.gitrunlines(b'ls-remote', b'--tags', self.path) |
|
441 | output, status = self.gitrunlines(b'ls-remote', b'--tags', self.path) | |
442 |
|
442 | |||
443 | if status: |
|
443 | if status: | |
444 | raise error.Abort(_(b'cannot read tags from %s') % self.path) |
|
444 | raise error.Abort(_(b'cannot read tags from %s') % self.path) | |
445 | prefix = b'refs/tags/' |
|
445 | prefix = b'refs/tags/' | |
446 |
|
446 | |||
447 | # Build complete list of tags, both annotated and bare ones |
|
447 | # Build complete list of tags, both annotated and bare ones | |
448 | for line in output: |
|
448 | for line in output: | |
449 | line = line.strip() |
|
449 | line = line.strip() | |
450 | if line.startswith(b"error:") or line.startswith(b"fatal:"): |
|
450 | if line.startswith(b"error:") or line.startswith(b"fatal:"): | |
451 | raise error.Abort(_(b'cannot read tags from %s') % self.path) |
|
451 | raise error.Abort(_(b'cannot read tags from %s') % self.path) | |
452 | node, tag = line.split(None, 1) |
|
452 | node, tag = line.split(None, 1) | |
453 | if not tag.startswith(prefix): |
|
453 | if not tag.startswith(prefix): | |
454 | continue |
|
454 | continue | |
455 | alltags[tag[len(prefix) :]] = node |
|
455 | alltags[tag[len(prefix) :]] = node | |
456 |
|
456 | |||
457 | # Filter out tag objects for annotated tag refs |
|
457 | # Filter out tag objects for annotated tag refs | |
458 | for tag in alltags: |
|
458 | for tag in alltags: | |
459 | if tag.endswith(b'^{}'): |
|
459 | if tag.endswith(b'^{}'): | |
460 | tags[tag[:-3]] = alltags[tag] |
|
460 | tags[tag[:-3]] = alltags[tag] | |
461 | else: |
|
461 | else: | |
462 | if tag + b'^{}' in alltags: |
|
462 | if tag + b'^{}' in alltags: | |
463 | continue |
|
463 | continue | |
464 | else: |
|
464 | else: | |
465 | tags[tag] = alltags[tag] |
|
465 | tags[tag] = alltags[tag] | |
466 |
|
466 | |||
467 | return tags |
|
467 | return tags | |
468 |
|
468 | |||
469 | def getchangedfiles(self, version, i): |
|
469 | def getchangedfiles(self, version, i): | |
470 | changes = [] |
|
470 | changes = [] | |
471 | if i is None: |
|
471 | if i is None: | |
472 | output, status = self.gitrunlines( |
|
472 | output, status = self.gitrunlines( | |
473 | b'diff-tree', b'--root', b'-m', b'-r', version |
|
473 | b'diff-tree', b'--root', b'-m', b'-r', version | |
474 | ) |
|
474 | ) | |
475 | if status: |
|
475 | if status: | |
476 | raise error.Abort(_(b'cannot read changes in %s') % version) |
|
476 | raise error.Abort(_(b'cannot read changes in %s') % version) | |
477 | for l in output: |
|
477 | for l in output: | |
478 | if b"\t" not in l: |
|
478 | if b"\t" not in l: | |
479 | continue |
|
479 | continue | |
480 | m, f = l[:-1].split(b"\t") |
|
480 | m, f = l[:-1].split(b"\t") | |
481 | changes.append(f) |
|
481 | changes.append(f) | |
482 | else: |
|
482 | else: | |
483 | output, status = self.gitrunlines( |
|
483 | output, status = self.gitrunlines( | |
484 | b'diff-tree', |
|
484 | b'diff-tree', | |
485 | b'--name-only', |
|
485 | b'--name-only', | |
486 | b'--root', |
|
486 | b'--root', | |
487 | b'-r', |
|
487 | b'-r', | |
488 | version, |
|
488 | version, | |
489 | b'%s^%d' % (version, i + 1), |
|
489 | b'%s^%d' % (version, i + 1), | |
490 | b'--', |
|
490 | b'--', | |
491 | ) |
|
491 | ) | |
492 | if status: |
|
492 | if status: | |
493 | raise error.Abort(_(b'cannot read changes in %s') % version) |
|
493 | raise error.Abort(_(b'cannot read changes in %s') % version) | |
494 | changes = [f.rstrip(b'\n') for f in output] |
|
494 | changes = [f.rstrip(b'\n') for f in output] | |
495 |
|
495 | |||
496 | return changes |
|
496 | return changes | |
497 |
|
497 | |||
498 | def getbookmarks(self): |
|
498 | def getbookmarks(self): | |
499 | bookmarks = {} |
|
499 | bookmarks = {} | |
500 |
|
500 | |||
501 | # Handle local and remote branches |
|
501 | # Handle local and remote branches | |
502 | remoteprefix = self.ui.config(b'convert', b'git.remoteprefix') |
|
502 | remoteprefix = self.ui.config(b'convert', b'git.remoteprefix') | |
503 | reftypes = [ |
|
503 | reftypes = [ | |
504 | # (git prefix, hg prefix) |
|
504 | # (git prefix, hg prefix) | |
505 | (b'refs/remotes/origin/', remoteprefix + b'/'), |
|
505 | (b'refs/remotes/origin/', remoteprefix + b'/'), | |
506 | (b'refs/heads/', b''), |
|
506 | (b'refs/heads/', b''), | |
507 | ] |
|
507 | ] | |
508 |
|
508 | |||
509 | exclude = { |
|
509 | exclude = { | |
510 | b'refs/remotes/origin/HEAD', |
|
510 | b'refs/remotes/origin/HEAD', | |
511 | } |
|
511 | } | |
512 |
|
512 | |||
513 | try: |
|
513 | try: | |
514 | output, status = self.gitrunlines(b'show-ref') |
|
514 | output, status = self.gitrunlines(b'show-ref') | |
515 | for line in output: |
|
515 | for line in output: | |
516 | line = line.strip() |
|
516 | line = line.strip() | |
517 | rev, name = line.split(None, 1) |
|
517 | rev, name = line.split(None, 1) | |
518 | # Process each type of branch |
|
518 | # Process each type of branch | |
519 | for gitprefix, hgprefix in reftypes: |
|
519 | for gitprefix, hgprefix in reftypes: | |
520 | if not name.startswith(gitprefix) or name in exclude: |
|
520 | if not name.startswith(gitprefix) or name in exclude: | |
521 | continue |
|
521 | continue | |
522 | name = b'%s%s' % (hgprefix, name[len(gitprefix) :]) |
|
522 | name = b'%s%s' % (hgprefix, name[len(gitprefix) :]) | |
523 | bookmarks[name] = rev |
|
523 | bookmarks[name] = rev | |
524 | except Exception: |
|
524 | except Exception: | |
525 | pass |
|
525 | pass | |
526 |
|
526 | |||
527 | return bookmarks |
|
527 | return bookmarks | |
528 |
|
528 | |||
529 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
529 | def checkrevformat(self, revstr, mapname=b'splicemap'): | |
530 | """ git revision string is a 40 byte hex """ |
|
530 | """ git revision string is a 40 byte hex """ | |
531 | self.checkhexformat(revstr, mapname) |
|
531 | self.checkhexformat(revstr, mapname) |
@@ -1,733 +1,732 b'' | |||||
1 | # hg.py - hg backend for convert extension |
|
1 | # hg.py - hg backend for convert extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | # Notes for hg->hg conversion: |
|
8 | # Notes for hg->hg conversion: | |
9 | # |
|
9 | # | |
10 | # * Old versions of Mercurial didn't trim the whitespace from the ends |
|
10 | # * Old versions of Mercurial didn't trim the whitespace from the ends | |
11 | # of commit messages, but new versions do. Changesets created by |
|
11 | # of commit messages, but new versions do. Changesets created by | |
12 | # those older versions, then converted, may thus have different |
|
12 | # those older versions, then converted, may thus have different | |
13 | # hashes for changesets that are otherwise identical. |
|
13 | # hashes for changesets that are otherwise identical. | |
14 | # |
|
14 | # | |
15 | # * Using "--config convert.hg.saverev=true" will make the source |
|
15 | # * Using "--config convert.hg.saverev=true" will make the source | |
16 | # identifier to be stored in the converted revision. This will cause |
|
16 | # identifier to be stored in the converted revision. This will cause | |
17 | # the converted revision to have a different identity than the |
|
17 | # the converted revision to have a different identity than the | |
18 | # source. |
|
18 | # source. | |
19 | from __future__ import absolute_import |
|
19 | from __future__ import absolute_import | |
20 |
|
20 | |||
21 | import os |
|
21 | import os | |
22 | import re |
|
22 | import re | |
23 | import time |
|
23 | import time | |
24 |
|
24 | |||
25 | from mercurial.i18n import _ |
|
25 | from mercurial.i18n import _ | |
26 | from mercurial.pycompat import open |
|
26 | from mercurial.pycompat import open | |
27 | from mercurial.node import ( |
|
27 | from mercurial.node import ( | |
28 | bin, |
|
28 | bin, | |
29 | hex, |
|
29 | hex, | |
30 | nullhex, |
|
30 | sha1nodeconstants, | |
31 | nullid, |
|
|||
32 | ) |
|
31 | ) | |
33 | from mercurial import ( |
|
32 | from mercurial import ( | |
34 | bookmarks, |
|
33 | bookmarks, | |
35 | context, |
|
34 | context, | |
36 | error, |
|
35 | error, | |
37 | exchange, |
|
36 | exchange, | |
38 | hg, |
|
37 | hg, | |
39 | lock as lockmod, |
|
38 | lock as lockmod, | |
40 | merge as mergemod, |
|
39 | merge as mergemod, | |
41 | phases, |
|
40 | phases, | |
42 | pycompat, |
|
41 | pycompat, | |
43 | scmutil, |
|
42 | scmutil, | |
44 | util, |
|
43 | util, | |
45 | ) |
|
44 | ) | |
46 | from mercurial.utils import dateutil |
|
45 | from mercurial.utils import dateutil | |
47 |
|
46 | |||
48 | stringio = util.stringio |
|
47 | stringio = util.stringio | |
49 |
|
48 | |||
50 | from . import common |
|
49 | from . import common | |
51 |
|
50 | |||
52 | mapfile = common.mapfile |
|
51 | mapfile = common.mapfile | |
53 | NoRepo = common.NoRepo |
|
52 | NoRepo = common.NoRepo | |
54 |
|
53 | |||
55 | sha1re = re.compile(br'\b[0-9a-f]{12,40}\b') |
|
54 | sha1re = re.compile(br'\b[0-9a-f]{12,40}\b') | |
56 |
|
55 | |||
57 |
|
56 | |||
58 | class mercurial_sink(common.converter_sink): |
|
57 | class mercurial_sink(common.converter_sink): | |
59 | def __init__(self, ui, repotype, path): |
|
58 | def __init__(self, ui, repotype, path): | |
60 | common.converter_sink.__init__(self, ui, repotype, path) |
|
59 | common.converter_sink.__init__(self, ui, repotype, path) | |
61 | self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames') |
|
60 | self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames') | |
62 | self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches') |
|
61 | self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches') | |
63 | self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch') |
|
62 | self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch') | |
64 | self.lastbranch = None |
|
63 | self.lastbranch = None | |
65 | if os.path.isdir(path) and len(os.listdir(path)) > 0: |
|
64 | if os.path.isdir(path) and len(os.listdir(path)) > 0: | |
66 | try: |
|
65 | try: | |
67 | self.repo = hg.repository(self.ui, path) |
|
66 | self.repo = hg.repository(self.ui, path) | |
68 | if not self.repo.local(): |
|
67 | if not self.repo.local(): | |
69 | raise NoRepo( |
|
68 | raise NoRepo( | |
70 | _(b'%s is not a local Mercurial repository') % path |
|
69 | _(b'%s is not a local Mercurial repository') % path | |
71 | ) |
|
70 | ) | |
72 | except error.RepoError as err: |
|
71 | except error.RepoError as err: | |
73 | ui.traceback() |
|
72 | ui.traceback() | |
74 | raise NoRepo(err.args[0]) |
|
73 | raise NoRepo(err.args[0]) | |
75 | else: |
|
74 | else: | |
76 | try: |
|
75 | try: | |
77 | ui.status(_(b'initializing destination %s repository\n') % path) |
|
76 | ui.status(_(b'initializing destination %s repository\n') % path) | |
78 | self.repo = hg.repository(self.ui, path, create=True) |
|
77 | self.repo = hg.repository(self.ui, path, create=True) | |
79 | if not self.repo.local(): |
|
78 | if not self.repo.local(): | |
80 | raise NoRepo( |
|
79 | raise NoRepo( | |
81 | _(b'%s is not a local Mercurial repository') % path |
|
80 | _(b'%s is not a local Mercurial repository') % path | |
82 | ) |
|
81 | ) | |
83 | self.created.append(path) |
|
82 | self.created.append(path) | |
84 | except error.RepoError: |
|
83 | except error.RepoError: | |
85 | ui.traceback() |
|
84 | ui.traceback() | |
86 | raise NoRepo( |
|
85 | raise NoRepo( | |
87 | _(b"could not create hg repository %s as sink") % path |
|
86 | _(b"could not create hg repository %s as sink") % path | |
88 | ) |
|
87 | ) | |
89 | self.lock = None |
|
88 | self.lock = None | |
90 | self.wlock = None |
|
89 | self.wlock = None | |
91 | self.filemapmode = False |
|
90 | self.filemapmode = False | |
92 | self.subrevmaps = {} |
|
91 | self.subrevmaps = {} | |
93 |
|
92 | |||
94 | def before(self): |
|
93 | def before(self): | |
95 | self.ui.debug(b'run hg sink pre-conversion action\n') |
|
94 | self.ui.debug(b'run hg sink pre-conversion action\n') | |
96 | self.wlock = self.repo.wlock() |
|
95 | self.wlock = self.repo.wlock() | |
97 | self.lock = self.repo.lock() |
|
96 | self.lock = self.repo.lock() | |
98 |
|
97 | |||
99 | def after(self): |
|
98 | def after(self): | |
100 | self.ui.debug(b'run hg sink post-conversion action\n') |
|
99 | self.ui.debug(b'run hg sink post-conversion action\n') | |
101 | if self.lock: |
|
100 | if self.lock: | |
102 | self.lock.release() |
|
101 | self.lock.release() | |
103 | if self.wlock: |
|
102 | if self.wlock: | |
104 | self.wlock.release() |
|
103 | self.wlock.release() | |
105 |
|
104 | |||
106 | def revmapfile(self): |
|
105 | def revmapfile(self): | |
107 | return self.repo.vfs.join(b"shamap") |
|
106 | return self.repo.vfs.join(b"shamap") | |
108 |
|
107 | |||
109 | def authorfile(self): |
|
108 | def authorfile(self): | |
110 | return self.repo.vfs.join(b"authormap") |
|
109 | return self.repo.vfs.join(b"authormap") | |
111 |
|
110 | |||
112 | def setbranch(self, branch, pbranches): |
|
111 | def setbranch(self, branch, pbranches): | |
113 | if not self.clonebranches: |
|
112 | if not self.clonebranches: | |
114 | return |
|
113 | return | |
115 |
|
114 | |||
116 | setbranch = branch != self.lastbranch |
|
115 | setbranch = branch != self.lastbranch | |
117 | self.lastbranch = branch |
|
116 | self.lastbranch = branch | |
118 | if not branch: |
|
117 | if not branch: | |
119 | branch = b'default' |
|
118 | branch = b'default' | |
120 | pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches] |
|
119 | pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches] | |
121 |
|
120 | |||
122 | branchpath = os.path.join(self.path, branch) |
|
121 | branchpath = os.path.join(self.path, branch) | |
123 | if setbranch: |
|
122 | if setbranch: | |
124 | self.after() |
|
123 | self.after() | |
125 | try: |
|
124 | try: | |
126 | self.repo = hg.repository(self.ui, branchpath) |
|
125 | self.repo = hg.repository(self.ui, branchpath) | |
127 | except Exception: |
|
126 | except Exception: | |
128 | self.repo = hg.repository(self.ui, branchpath, create=True) |
|
127 | self.repo = hg.repository(self.ui, branchpath, create=True) | |
129 | self.before() |
|
128 | self.before() | |
130 |
|
129 | |||
131 | # pbranches may bring revisions from other branches (merge parents) |
|
130 | # pbranches may bring revisions from other branches (merge parents) | |
132 | # Make sure we have them, or pull them. |
|
131 | # Make sure we have them, or pull them. | |
133 | missings = {} |
|
132 | missings = {} | |
134 | for b in pbranches: |
|
133 | for b in pbranches: | |
135 | try: |
|
134 | try: | |
136 | self.repo.lookup(b[0]) |
|
135 | self.repo.lookup(b[0]) | |
137 | except Exception: |
|
136 | except Exception: | |
138 | missings.setdefault(b[1], []).append(b[0]) |
|
137 | missings.setdefault(b[1], []).append(b[0]) | |
139 |
|
138 | |||
140 | if missings: |
|
139 | if missings: | |
141 | self.after() |
|
140 | self.after() | |
142 | for pbranch, heads in sorted(pycompat.iteritems(missings)): |
|
141 | for pbranch, heads in sorted(pycompat.iteritems(missings)): | |
143 | pbranchpath = os.path.join(self.path, pbranch) |
|
142 | pbranchpath = os.path.join(self.path, pbranch) | |
144 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
143 | prepo = hg.peer(self.ui, {}, pbranchpath) | |
145 | self.ui.note( |
|
144 | self.ui.note( | |
146 | _(b'pulling from %s into %s\n') % (pbranch, branch) |
|
145 | _(b'pulling from %s into %s\n') % (pbranch, branch) | |
147 | ) |
|
146 | ) | |
148 | exchange.pull( |
|
147 | exchange.pull( | |
149 | self.repo, prepo, [prepo.lookup(h) for h in heads] |
|
148 | self.repo, prepo, [prepo.lookup(h) for h in heads] | |
150 | ) |
|
149 | ) | |
151 | self.before() |
|
150 | self.before() | |
152 |
|
151 | |||
153 | def _rewritetags(self, source, revmap, data): |
|
152 | def _rewritetags(self, source, revmap, data): | |
154 | fp = stringio() |
|
153 | fp = stringio() | |
155 | for line in data.splitlines(): |
|
154 | for line in data.splitlines(): | |
156 | s = line.split(b' ', 1) |
|
155 | s = line.split(b' ', 1) | |
157 | if len(s) != 2: |
|
156 | if len(s) != 2: | |
158 | self.ui.warn(_(b'invalid tag entry: "%s"\n') % line) |
|
157 | self.ui.warn(_(b'invalid tag entry: "%s"\n') % line) | |
159 | fp.write(b'%s\n' % line) # Bogus, but keep for hash stability |
|
158 | fp.write(b'%s\n' % line) # Bogus, but keep for hash stability | |
160 | continue |
|
159 | continue | |
161 | revid = revmap.get(source.lookuprev(s[0])) |
|
160 | revid = revmap.get(source.lookuprev(s[0])) | |
162 | if not revid: |
|
161 | if not revid: | |
163 | if s[0] == nullhex: |
|
162 | if s[0] == sha1nodeconstants.nullhex: | |
164 | revid = s[0] |
|
163 | revid = s[0] | |
165 | else: |
|
164 | else: | |
166 | # missing, but keep for hash stability |
|
165 | # missing, but keep for hash stability | |
167 | self.ui.warn(_(b'missing tag entry: "%s"\n') % line) |
|
166 | self.ui.warn(_(b'missing tag entry: "%s"\n') % line) | |
168 | fp.write(b'%s\n' % line) |
|
167 | fp.write(b'%s\n' % line) | |
169 | continue |
|
168 | continue | |
170 | fp.write(b'%s %s\n' % (revid, s[1])) |
|
169 | fp.write(b'%s %s\n' % (revid, s[1])) | |
171 | return fp.getvalue() |
|
170 | return fp.getvalue() | |
172 |
|
171 | |||
173 | def _rewritesubstate(self, source, data): |
|
172 | def _rewritesubstate(self, source, data): | |
174 | fp = stringio() |
|
173 | fp = stringio() | |
175 | for line in data.splitlines(): |
|
174 | for line in data.splitlines(): | |
176 | s = line.split(b' ', 1) |
|
175 | s = line.split(b' ', 1) | |
177 | if len(s) != 2: |
|
176 | if len(s) != 2: | |
178 | continue |
|
177 | continue | |
179 |
|
178 | |||
180 | revid = s[0] |
|
179 | revid = s[0] | |
181 | subpath = s[1] |
|
180 | subpath = s[1] | |
182 | if revid != nullhex: |
|
181 | if revid != sha1nodeconstants.nullhex: | |
183 | revmap = self.subrevmaps.get(subpath) |
|
182 | revmap = self.subrevmaps.get(subpath) | |
184 | if revmap is None: |
|
183 | if revmap is None: | |
185 | revmap = mapfile( |
|
184 | revmap = mapfile( | |
186 | self.ui, self.repo.wjoin(subpath, b'.hg/shamap') |
|
185 | self.ui, self.repo.wjoin(subpath, b'.hg/shamap') | |
187 | ) |
|
186 | ) | |
188 | self.subrevmaps[subpath] = revmap |
|
187 | self.subrevmaps[subpath] = revmap | |
189 |
|
188 | |||
190 | # It is reasonable that one or more of the subrepos don't |
|
189 | # It is reasonable that one or more of the subrepos don't | |
191 | # need to be converted, in which case they can be cloned |
|
190 | # need to be converted, in which case they can be cloned | |
192 | # into place instead of converted. Therefore, only warn |
|
191 | # into place instead of converted. Therefore, only warn | |
193 | # once. |
|
192 | # once. | |
194 | msg = _(b'no ".hgsubstate" updates will be made for "%s"\n') |
|
193 | msg = _(b'no ".hgsubstate" updates will be made for "%s"\n') | |
195 | if len(revmap) == 0: |
|
194 | if len(revmap) == 0: | |
196 | sub = self.repo.wvfs.reljoin(subpath, b'.hg') |
|
195 | sub = self.repo.wvfs.reljoin(subpath, b'.hg') | |
197 |
|
196 | |||
198 | if self.repo.wvfs.exists(sub): |
|
197 | if self.repo.wvfs.exists(sub): | |
199 | self.ui.warn(msg % subpath) |
|
198 | self.ui.warn(msg % subpath) | |
200 |
|
199 | |||
201 | newid = revmap.get(revid) |
|
200 | newid = revmap.get(revid) | |
202 | if not newid: |
|
201 | if not newid: | |
203 | if len(revmap) > 0: |
|
202 | if len(revmap) > 0: | |
204 | self.ui.warn( |
|
203 | self.ui.warn( | |
205 | _(b"%s is missing from %s/.hg/shamap\n") |
|
204 | _(b"%s is missing from %s/.hg/shamap\n") | |
206 | % (revid, subpath) |
|
205 | % (revid, subpath) | |
207 | ) |
|
206 | ) | |
208 | else: |
|
207 | else: | |
209 | revid = newid |
|
208 | revid = newid | |
210 |
|
209 | |||
211 | fp.write(b'%s %s\n' % (revid, subpath)) |
|
210 | fp.write(b'%s %s\n' % (revid, subpath)) | |
212 |
|
211 | |||
213 | return fp.getvalue() |
|
212 | return fp.getvalue() | |
214 |
|
213 | |||
215 | def _calculatemergedfiles(self, source, p1ctx, p2ctx): |
|
214 | def _calculatemergedfiles(self, source, p1ctx, p2ctx): | |
216 | """Calculates the files from p2 that we need to pull in when merging p1 |
|
215 | """Calculates the files from p2 that we need to pull in when merging p1 | |
217 | and p2, given that the merge is coming from the given source. |
|
216 | and p2, given that the merge is coming from the given source. | |
218 |
|
217 | |||
219 | This prevents us from losing files that only exist in the target p2 and |
|
218 | This prevents us from losing files that only exist in the target p2 and | |
220 | that don't come from the source repo (like if you're merging multiple |
|
219 | that don't come from the source repo (like if you're merging multiple | |
221 | repositories together). |
|
220 | repositories together). | |
222 | """ |
|
221 | """ | |
223 | anc = [p1ctx.ancestor(p2ctx)] |
|
222 | anc = [p1ctx.ancestor(p2ctx)] | |
224 | # Calculate what files are coming from p2 |
|
223 | # Calculate what files are coming from p2 | |
225 | # TODO: mresult.commitinfo might be able to get that info |
|
224 | # TODO: mresult.commitinfo might be able to get that info | |
226 | mresult = mergemod.calculateupdates( |
|
225 | mresult = mergemod.calculateupdates( | |
227 | self.repo, |
|
226 | self.repo, | |
228 | p1ctx, |
|
227 | p1ctx, | |
229 | p2ctx, |
|
228 | p2ctx, | |
230 | anc, |
|
229 | anc, | |
231 | branchmerge=True, |
|
230 | branchmerge=True, | |
232 | force=True, |
|
231 | force=True, | |
233 | acceptremote=False, |
|
232 | acceptremote=False, | |
234 | followcopies=False, |
|
233 | followcopies=False, | |
235 | ) |
|
234 | ) | |
236 |
|
235 | |||
237 | for file, (action, info, msg) in mresult.filemap(): |
|
236 | for file, (action, info, msg) in mresult.filemap(): | |
238 | if source.targetfilebelongstosource(file): |
|
237 | if source.targetfilebelongstosource(file): | |
239 | # If the file belongs to the source repo, ignore the p2 |
|
238 | # If the file belongs to the source repo, ignore the p2 | |
240 | # since it will be covered by the existing fileset. |
|
239 | # since it will be covered by the existing fileset. | |
241 | continue |
|
240 | continue | |
242 |
|
241 | |||
243 | # If the file requires actual merging, abort. We don't have enough |
|
242 | # If the file requires actual merging, abort. We don't have enough | |
244 | # context to resolve merges correctly. |
|
243 | # context to resolve merges correctly. | |
245 | if action in [b'm', b'dm', b'cd', b'dc']: |
|
244 | if action in [b'm', b'dm', b'cd', b'dc']: | |
246 | raise error.Abort( |
|
245 | raise error.Abort( | |
247 | _( |
|
246 | _( | |
248 | b"unable to convert merge commit " |
|
247 | b"unable to convert merge commit " | |
249 | b"since target parents do not merge cleanly (file " |
|
248 | b"since target parents do not merge cleanly (file " | |
250 | b"%s, parents %s and %s)" |
|
249 | b"%s, parents %s and %s)" | |
251 | ) |
|
250 | ) | |
252 | % (file, p1ctx, p2ctx) |
|
251 | % (file, p1ctx, p2ctx) | |
253 | ) |
|
252 | ) | |
254 | elif action == b'k': |
|
253 | elif action == b'k': | |
255 | # 'keep' means nothing changed from p1 |
|
254 | # 'keep' means nothing changed from p1 | |
256 | continue |
|
255 | continue | |
257 | else: |
|
256 | else: | |
258 | # Any other change means we want to take the p2 version |
|
257 | # Any other change means we want to take the p2 version | |
259 | yield file |
|
258 | yield file | |
260 |
|
259 | |||
261 | def putcommit( |
|
260 | def putcommit( | |
262 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
261 | self, files, copies, parents, commit, source, revmap, full, cleanp2 | |
263 | ): |
|
262 | ): | |
264 | files = dict(files) |
|
263 | files = dict(files) | |
265 |
|
264 | |||
266 | def getfilectx(repo, memctx, f): |
|
265 | def getfilectx(repo, memctx, f): | |
267 | if p2ctx and f in p2files and f not in copies: |
|
266 | if p2ctx and f in p2files and f not in copies: | |
268 | self.ui.debug(b'reusing %s from p2\n' % f) |
|
267 | self.ui.debug(b'reusing %s from p2\n' % f) | |
269 | try: |
|
268 | try: | |
270 | return p2ctx[f] |
|
269 | return p2ctx[f] | |
271 | except error.ManifestLookupError: |
|
270 | except error.ManifestLookupError: | |
272 | # If the file doesn't exist in p2, then we're syncing a |
|
271 | # If the file doesn't exist in p2, then we're syncing a | |
273 | # delete, so just return None. |
|
272 | # delete, so just return None. | |
274 | return None |
|
273 | return None | |
275 | try: |
|
274 | try: | |
276 | v = files[f] |
|
275 | v = files[f] | |
277 | except KeyError: |
|
276 | except KeyError: | |
278 | return None |
|
277 | return None | |
279 | data, mode = source.getfile(f, v) |
|
278 | data, mode = source.getfile(f, v) | |
280 | if data is None: |
|
279 | if data is None: | |
281 | return None |
|
280 | return None | |
282 | if f == b'.hgtags': |
|
281 | if f == b'.hgtags': | |
283 | data = self._rewritetags(source, revmap, data) |
|
282 | data = self._rewritetags(source, revmap, data) | |
284 | if f == b'.hgsubstate': |
|
283 | if f == b'.hgsubstate': | |
285 | data = self._rewritesubstate(source, data) |
|
284 | data = self._rewritesubstate(source, data) | |
286 | return context.memfilectx( |
|
285 | return context.memfilectx( | |
287 | self.repo, |
|
286 | self.repo, | |
288 | memctx, |
|
287 | memctx, | |
289 | f, |
|
288 | f, | |
290 | data, |
|
289 | data, | |
291 | b'l' in mode, |
|
290 | b'l' in mode, | |
292 | b'x' in mode, |
|
291 | b'x' in mode, | |
293 | copies.get(f), |
|
292 | copies.get(f), | |
294 | ) |
|
293 | ) | |
295 |
|
294 | |||
296 | pl = [] |
|
295 | pl = [] | |
297 | for p in parents: |
|
296 | for p in parents: | |
298 | if p not in pl: |
|
297 | if p not in pl: | |
299 | pl.append(p) |
|
298 | pl.append(p) | |
300 | parents = pl |
|
299 | parents = pl | |
301 | nparents = len(parents) |
|
300 | nparents = len(parents) | |
302 | if self.filemapmode and nparents == 1: |
|
301 | if self.filemapmode and nparents == 1: | |
303 | m1node = self.repo.changelog.read(bin(parents[0]))[0] |
|
302 | m1node = self.repo.changelog.read(bin(parents[0]))[0] | |
304 | parent = parents[0] |
|
303 | parent = parents[0] | |
305 |
|
304 | |||
306 | if len(parents) < 2: |
|
305 | if len(parents) < 2: | |
307 | parents.append(nullid) |
|
306 | parents.append(self.repo.nullid) | |
308 | if len(parents) < 2: |
|
307 | if len(parents) < 2: | |
309 | parents.append(nullid) |
|
308 | parents.append(self.repo.nullid) | |
310 | p2 = parents.pop(0) |
|
309 | p2 = parents.pop(0) | |
311 |
|
310 | |||
312 | text = commit.desc |
|
311 | text = commit.desc | |
313 |
|
312 | |||
314 | sha1s = re.findall(sha1re, text) |
|
313 | sha1s = re.findall(sha1re, text) | |
315 | for sha1 in sha1s: |
|
314 | for sha1 in sha1s: | |
316 | oldrev = source.lookuprev(sha1) |
|
315 | oldrev = source.lookuprev(sha1) | |
317 | newrev = revmap.get(oldrev) |
|
316 | newrev = revmap.get(oldrev) | |
318 | if newrev is not None: |
|
317 | if newrev is not None: | |
319 | text = text.replace(sha1, newrev[: len(sha1)]) |
|
318 | text = text.replace(sha1, newrev[: len(sha1)]) | |
320 |
|
319 | |||
321 | extra = commit.extra.copy() |
|
320 | extra = commit.extra.copy() | |
322 |
|
321 | |||
323 | sourcename = self.repo.ui.config(b'convert', b'hg.sourcename') |
|
322 | sourcename = self.repo.ui.config(b'convert', b'hg.sourcename') | |
324 | if sourcename: |
|
323 | if sourcename: | |
325 | extra[b'convert_source'] = sourcename |
|
324 | extra[b'convert_source'] = sourcename | |
326 |
|
325 | |||
327 | for label in ( |
|
326 | for label in ( | |
328 | b'source', |
|
327 | b'source', | |
329 | b'transplant_source', |
|
328 | b'transplant_source', | |
330 | b'rebase_source', |
|
329 | b'rebase_source', | |
331 | b'intermediate-source', |
|
330 | b'intermediate-source', | |
332 | ): |
|
331 | ): | |
333 | node = extra.get(label) |
|
332 | node = extra.get(label) | |
334 |
|
333 | |||
335 | if node is None: |
|
334 | if node is None: | |
336 | continue |
|
335 | continue | |
337 |
|
336 | |||
338 | # Only transplant stores its reference in binary |
|
337 | # Only transplant stores its reference in binary | |
339 | if label == b'transplant_source': |
|
338 | if label == b'transplant_source': | |
340 | node = hex(node) |
|
339 | node = hex(node) | |
341 |
|
340 | |||
342 | newrev = revmap.get(node) |
|
341 | newrev = revmap.get(node) | |
343 | if newrev is not None: |
|
342 | if newrev is not None: | |
344 | if label == b'transplant_source': |
|
343 | if label == b'transplant_source': | |
345 | newrev = bin(newrev) |
|
344 | newrev = bin(newrev) | |
346 |
|
345 | |||
347 | extra[label] = newrev |
|
346 | extra[label] = newrev | |
348 |
|
347 | |||
349 | if self.branchnames and commit.branch: |
|
348 | if self.branchnames and commit.branch: | |
350 | extra[b'branch'] = commit.branch |
|
349 | extra[b'branch'] = commit.branch | |
351 | if commit.rev and commit.saverev: |
|
350 | if commit.rev and commit.saverev: | |
352 | extra[b'convert_revision'] = commit.rev |
|
351 | extra[b'convert_revision'] = commit.rev | |
353 |
|
352 | |||
354 | while parents: |
|
353 | while parents: | |
355 | p1 = p2 |
|
354 | p1 = p2 | |
356 | p2 = parents.pop(0) |
|
355 | p2 = parents.pop(0) | |
357 | p1ctx = self.repo[p1] |
|
356 | p1ctx = self.repo[p1] | |
358 | p2ctx = None |
|
357 | p2ctx = None | |
359 | if p2 != nullid: |
|
358 | if p2 != self.repo.nullid: | |
360 | p2ctx = self.repo[p2] |
|
359 | p2ctx = self.repo[p2] | |
361 | fileset = set(files) |
|
360 | fileset = set(files) | |
362 | if full: |
|
361 | if full: | |
363 | fileset.update(self.repo[p1]) |
|
362 | fileset.update(self.repo[p1]) | |
364 | fileset.update(self.repo[p2]) |
|
363 | fileset.update(self.repo[p2]) | |
365 |
|
364 | |||
366 | if p2ctx: |
|
365 | if p2ctx: | |
367 | p2files = set(cleanp2) |
|
366 | p2files = set(cleanp2) | |
368 | for file in self._calculatemergedfiles(source, p1ctx, p2ctx): |
|
367 | for file in self._calculatemergedfiles(source, p1ctx, p2ctx): | |
369 | p2files.add(file) |
|
368 | p2files.add(file) | |
370 | fileset.add(file) |
|
369 | fileset.add(file) | |
371 |
|
370 | |||
372 | ctx = context.memctx( |
|
371 | ctx = context.memctx( | |
373 | self.repo, |
|
372 | self.repo, | |
374 | (p1, p2), |
|
373 | (p1, p2), | |
375 | text, |
|
374 | text, | |
376 | fileset, |
|
375 | fileset, | |
377 | getfilectx, |
|
376 | getfilectx, | |
378 | commit.author, |
|
377 | commit.author, | |
379 | commit.date, |
|
378 | commit.date, | |
380 | extra, |
|
379 | extra, | |
381 | ) |
|
380 | ) | |
382 |
|
381 | |||
383 | # We won't know if the conversion changes the node until after the |
|
382 | # We won't know if the conversion changes the node until after the | |
384 | # commit, so copy the source's phase for now. |
|
383 | # commit, so copy the source's phase for now. | |
385 | self.repo.ui.setconfig( |
|
384 | self.repo.ui.setconfig( | |
386 | b'phases', |
|
385 | b'phases', | |
387 | b'new-commit', |
|
386 | b'new-commit', | |
388 | phases.phasenames[commit.phase], |
|
387 | phases.phasenames[commit.phase], | |
389 | b'convert', |
|
388 | b'convert', | |
390 | ) |
|
389 | ) | |
391 |
|
390 | |||
392 | with self.repo.transaction(b"convert") as tr: |
|
391 | with self.repo.transaction(b"convert") as tr: | |
393 | if self.repo.ui.config(b'convert', b'hg.preserve-hash'): |
|
392 | if self.repo.ui.config(b'convert', b'hg.preserve-hash'): | |
394 | origctx = commit.ctx |
|
393 | origctx = commit.ctx | |
395 | else: |
|
394 | else: | |
396 | origctx = None |
|
395 | origctx = None | |
397 | node = hex(self.repo.commitctx(ctx, origctx=origctx)) |
|
396 | node = hex(self.repo.commitctx(ctx, origctx=origctx)) | |
398 |
|
397 | |||
399 | # If the node value has changed, but the phase is lower than |
|
398 | # If the node value has changed, but the phase is lower than | |
400 | # draft, set it back to draft since it hasn't been exposed |
|
399 | # draft, set it back to draft since it hasn't been exposed | |
401 | # anywhere. |
|
400 | # anywhere. | |
402 | if commit.rev != node: |
|
401 | if commit.rev != node: | |
403 | ctx = self.repo[node] |
|
402 | ctx = self.repo[node] | |
404 | if ctx.phase() < phases.draft: |
|
403 | if ctx.phase() < phases.draft: | |
405 | phases.registernew( |
|
404 | phases.registernew( | |
406 | self.repo, tr, phases.draft, [ctx.rev()] |
|
405 | self.repo, tr, phases.draft, [ctx.rev()] | |
407 | ) |
|
406 | ) | |
408 |
|
407 | |||
409 | text = b"(octopus merge fixup)\n" |
|
408 | text = b"(octopus merge fixup)\n" | |
410 | p2 = node |
|
409 | p2 = node | |
411 |
|
410 | |||
412 | if self.filemapmode and nparents == 1: |
|
411 | if self.filemapmode and nparents == 1: | |
413 | man = self.repo.manifestlog.getstorage(b'') |
|
412 | man = self.repo.manifestlog.getstorage(b'') | |
414 | mnode = self.repo.changelog.read(bin(p2))[0] |
|
413 | mnode = self.repo.changelog.read(bin(p2))[0] | |
415 | closed = b'close' in commit.extra |
|
414 | closed = b'close' in commit.extra | |
416 | if not closed and not man.cmp(m1node, man.revision(mnode)): |
|
415 | if not closed and not man.cmp(m1node, man.revision(mnode)): | |
417 | self.ui.status(_(b"filtering out empty revision\n")) |
|
416 | self.ui.status(_(b"filtering out empty revision\n")) | |
418 | self.repo.rollback(force=True) |
|
417 | self.repo.rollback(force=True) | |
419 | return parent |
|
418 | return parent | |
420 | return p2 |
|
419 | return p2 | |
421 |
|
420 | |||
422 | def puttags(self, tags): |
|
421 | def puttags(self, tags): | |
423 | tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True) |
|
422 | tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True) | |
424 | tagparent = tagparent or nullid |
|
423 | tagparent = tagparent or self.repo.nullid | |
425 |
|
424 | |||
426 | oldlines = set() |
|
425 | oldlines = set() | |
427 | for branch, heads in pycompat.iteritems(self.repo.branchmap()): |
|
426 | for branch, heads in pycompat.iteritems(self.repo.branchmap()): | |
428 | for h in heads: |
|
427 | for h in heads: | |
429 | if b'.hgtags' in self.repo[h]: |
|
428 | if b'.hgtags' in self.repo[h]: | |
430 | oldlines.update( |
|
429 | oldlines.update( | |
431 | set(self.repo[h][b'.hgtags'].data().splitlines(True)) |
|
430 | set(self.repo[h][b'.hgtags'].data().splitlines(True)) | |
432 | ) |
|
431 | ) | |
433 | oldlines = sorted(list(oldlines)) |
|
432 | oldlines = sorted(list(oldlines)) | |
434 |
|
433 | |||
435 | newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
434 | newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags]) | |
436 | if newlines == oldlines: |
|
435 | if newlines == oldlines: | |
437 | return None, None |
|
436 | return None, None | |
438 |
|
437 | |||
439 | # if the old and new tags match, then there is nothing to update |
|
438 | # if the old and new tags match, then there is nothing to update | |
440 | oldtags = set() |
|
439 | oldtags = set() | |
441 | newtags = set() |
|
440 | newtags = set() | |
442 | for line in oldlines: |
|
441 | for line in oldlines: | |
443 | s = line.strip().split(b' ', 1) |
|
442 | s = line.strip().split(b' ', 1) | |
444 | if len(s) != 2: |
|
443 | if len(s) != 2: | |
445 | continue |
|
444 | continue | |
446 | oldtags.add(s[1]) |
|
445 | oldtags.add(s[1]) | |
447 | for line in newlines: |
|
446 | for line in newlines: | |
448 | s = line.strip().split(b' ', 1) |
|
447 | s = line.strip().split(b' ', 1) | |
449 | if len(s) != 2: |
|
448 | if len(s) != 2: | |
450 | continue |
|
449 | continue | |
451 | if s[1] not in oldtags: |
|
450 | if s[1] not in oldtags: | |
452 | newtags.add(s[1].strip()) |
|
451 | newtags.add(s[1].strip()) | |
453 |
|
452 | |||
454 | if not newtags: |
|
453 | if not newtags: | |
455 | return None, None |
|
454 | return None, None | |
456 |
|
455 | |||
457 | data = b"".join(newlines) |
|
456 | data = b"".join(newlines) | |
458 |
|
457 | |||
459 | def getfilectx(repo, memctx, f): |
|
458 | def getfilectx(repo, memctx, f): | |
460 | return context.memfilectx(repo, memctx, f, data, False, False, None) |
|
459 | return context.memfilectx(repo, memctx, f, data, False, False, None) | |
461 |
|
460 | |||
462 | self.ui.status(_(b"updating tags\n")) |
|
461 | self.ui.status(_(b"updating tags\n")) | |
463 | date = b"%d 0" % int(time.mktime(time.gmtime())) |
|
462 | date = b"%d 0" % int(time.mktime(time.gmtime())) | |
464 | extra = {b'branch': self.tagsbranch} |
|
463 | extra = {b'branch': self.tagsbranch} | |
465 | ctx = context.memctx( |
|
464 | ctx = context.memctx( | |
466 | self.repo, |
|
465 | self.repo, | |
467 | (tagparent, None), |
|
466 | (tagparent, None), | |
468 | b"update tags", |
|
467 | b"update tags", | |
469 | [b".hgtags"], |
|
468 | [b".hgtags"], | |
470 | getfilectx, |
|
469 | getfilectx, | |
471 | b"convert-repo", |
|
470 | b"convert-repo", | |
472 | date, |
|
471 | date, | |
473 | extra, |
|
472 | extra, | |
474 | ) |
|
473 | ) | |
475 | node = self.repo.commitctx(ctx) |
|
474 | node = self.repo.commitctx(ctx) | |
476 | return hex(node), hex(tagparent) |
|
475 | return hex(node), hex(tagparent) | |
477 |
|
476 | |||
478 | def setfilemapmode(self, active): |
|
477 | def setfilemapmode(self, active): | |
479 | self.filemapmode = active |
|
478 | self.filemapmode = active | |
480 |
|
479 | |||
481 | def putbookmarks(self, updatedbookmark): |
|
480 | def putbookmarks(self, updatedbookmark): | |
482 | if not len(updatedbookmark): |
|
481 | if not len(updatedbookmark): | |
483 | return |
|
482 | return | |
484 | wlock = lock = tr = None |
|
483 | wlock = lock = tr = None | |
485 | try: |
|
484 | try: | |
486 | wlock = self.repo.wlock() |
|
485 | wlock = self.repo.wlock() | |
487 | lock = self.repo.lock() |
|
486 | lock = self.repo.lock() | |
488 | tr = self.repo.transaction(b'bookmark') |
|
487 | tr = self.repo.transaction(b'bookmark') | |
489 | self.ui.status(_(b"updating bookmarks\n")) |
|
488 | self.ui.status(_(b"updating bookmarks\n")) | |
490 | destmarks = self.repo._bookmarks |
|
489 | destmarks = self.repo._bookmarks | |
491 | changes = [ |
|
490 | changes = [ | |
492 | (bookmark, bin(updatedbookmark[bookmark])) |
|
491 | (bookmark, bin(updatedbookmark[bookmark])) | |
493 | for bookmark in updatedbookmark |
|
492 | for bookmark in updatedbookmark | |
494 | ] |
|
493 | ] | |
495 | destmarks.applychanges(self.repo, tr, changes) |
|
494 | destmarks.applychanges(self.repo, tr, changes) | |
496 | tr.close() |
|
495 | tr.close() | |
497 | finally: |
|
496 | finally: | |
498 | lockmod.release(lock, wlock, tr) |
|
497 | lockmod.release(lock, wlock, tr) | |
499 |
|
498 | |||
500 | def hascommitfrommap(self, rev): |
|
499 | def hascommitfrommap(self, rev): | |
501 | # the exact semantics of clonebranches is unclear so we can't say no |
|
500 | # the exact semantics of clonebranches is unclear so we can't say no | |
502 | return rev in self.repo or self.clonebranches |
|
501 | return rev in self.repo or self.clonebranches | |
503 |
|
502 | |||
504 | def hascommitforsplicemap(self, rev): |
|
503 | def hascommitforsplicemap(self, rev): | |
505 | if rev not in self.repo and self.clonebranches: |
|
504 | if rev not in self.repo and self.clonebranches: | |
506 | raise error.Abort( |
|
505 | raise error.Abort( | |
507 | _( |
|
506 | _( | |
508 | b'revision %s not found in destination ' |
|
507 | b'revision %s not found in destination ' | |
509 | b'repository (lookups with clonebranches=true ' |
|
508 | b'repository (lookups with clonebranches=true ' | |
510 | b'are not implemented)' |
|
509 | b'are not implemented)' | |
511 | ) |
|
510 | ) | |
512 | % rev |
|
511 | % rev | |
513 | ) |
|
512 | ) | |
514 | return rev in self.repo |
|
513 | return rev in self.repo | |
515 |
|
514 | |||
516 |
|
515 | |||
517 | class mercurial_source(common.converter_source): |
|
516 | class mercurial_source(common.converter_source): | |
518 | def __init__(self, ui, repotype, path, revs=None): |
|
517 | def __init__(self, ui, repotype, path, revs=None): | |
519 | common.converter_source.__init__(self, ui, repotype, path, revs) |
|
518 | common.converter_source.__init__(self, ui, repotype, path, revs) | |
520 | self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors') |
|
519 | self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors') | |
521 | self.ignored = set() |
|
520 | self.ignored = set() | |
522 | self.saverev = ui.configbool(b'convert', b'hg.saverev') |
|
521 | self.saverev = ui.configbool(b'convert', b'hg.saverev') | |
523 | try: |
|
522 | try: | |
524 | self.repo = hg.repository(self.ui, path) |
|
523 | self.repo = hg.repository(self.ui, path) | |
525 | # try to provoke an exception if this isn't really a hg |
|
524 | # try to provoke an exception if this isn't really a hg | |
526 | # repo, but some other bogus compatible-looking url |
|
525 | # repo, but some other bogus compatible-looking url | |
527 | if not self.repo.local(): |
|
526 | if not self.repo.local(): | |
528 | raise error.RepoError |
|
527 | raise error.RepoError | |
529 | except error.RepoError: |
|
528 | except error.RepoError: | |
530 | ui.traceback() |
|
529 | ui.traceback() | |
531 | raise NoRepo(_(b"%s is not a local Mercurial repository") % path) |
|
530 | raise NoRepo(_(b"%s is not a local Mercurial repository") % path) | |
532 | self.lastrev = None |
|
531 | self.lastrev = None | |
533 | self.lastctx = None |
|
532 | self.lastctx = None | |
534 | self._changescache = None, None |
|
533 | self._changescache = None, None | |
535 | self.convertfp = None |
|
534 | self.convertfp = None | |
536 | # Restrict converted revisions to startrev descendants |
|
535 | # Restrict converted revisions to startrev descendants | |
537 | startnode = ui.config(b'convert', b'hg.startrev') |
|
536 | startnode = ui.config(b'convert', b'hg.startrev') | |
538 | hgrevs = ui.config(b'convert', b'hg.revs') |
|
537 | hgrevs = ui.config(b'convert', b'hg.revs') | |
539 | if hgrevs is None: |
|
538 | if hgrevs is None: | |
540 | if startnode is not None: |
|
539 | if startnode is not None: | |
541 | try: |
|
540 | try: | |
542 | startnode = self.repo.lookup(startnode) |
|
541 | startnode = self.repo.lookup(startnode) | |
543 | except error.RepoError: |
|
542 | except error.RepoError: | |
544 | raise error.Abort( |
|
543 | raise error.Abort( | |
545 | _(b'%s is not a valid start revision') % startnode |
|
544 | _(b'%s is not a valid start revision') % startnode | |
546 | ) |
|
545 | ) | |
547 | startrev = self.repo.changelog.rev(startnode) |
|
546 | startrev = self.repo.changelog.rev(startnode) | |
548 | children = {startnode: 1} |
|
547 | children = {startnode: 1} | |
549 | for r in self.repo.changelog.descendants([startrev]): |
|
548 | for r in self.repo.changelog.descendants([startrev]): | |
550 | children[self.repo.changelog.node(r)] = 1 |
|
549 | children[self.repo.changelog.node(r)] = 1 | |
551 | self.keep = children.__contains__ |
|
550 | self.keep = children.__contains__ | |
552 | else: |
|
551 | else: | |
553 | self.keep = util.always |
|
552 | self.keep = util.always | |
554 | if revs: |
|
553 | if revs: | |
555 | self._heads = [self.repo.lookup(r) for r in revs] |
|
554 | self._heads = [self.repo.lookup(r) for r in revs] | |
556 | else: |
|
555 | else: | |
557 | self._heads = self.repo.heads() |
|
556 | self._heads = self.repo.heads() | |
558 | else: |
|
557 | else: | |
559 | if revs or startnode is not None: |
|
558 | if revs or startnode is not None: | |
560 | raise error.Abort( |
|
559 | raise error.Abort( | |
561 | _( |
|
560 | _( | |
562 | b'hg.revs cannot be combined with ' |
|
561 | b'hg.revs cannot be combined with ' | |
563 | b'hg.startrev or --rev' |
|
562 | b'hg.startrev or --rev' | |
564 | ) |
|
563 | ) | |
565 | ) |
|
564 | ) | |
566 | nodes = set() |
|
565 | nodes = set() | |
567 | parents = set() |
|
566 | parents = set() | |
568 | for r in scmutil.revrange(self.repo, [hgrevs]): |
|
567 | for r in scmutil.revrange(self.repo, [hgrevs]): | |
569 | ctx = self.repo[r] |
|
568 | ctx = self.repo[r] | |
570 | nodes.add(ctx.node()) |
|
569 | nodes.add(ctx.node()) | |
571 | parents.update(p.node() for p in ctx.parents()) |
|
570 | parents.update(p.node() for p in ctx.parents()) | |
572 | self.keep = nodes.__contains__ |
|
571 | self.keep = nodes.__contains__ | |
573 | self._heads = nodes - parents |
|
572 | self._heads = nodes - parents | |
574 |
|
573 | |||
575 | def _changectx(self, rev): |
|
574 | def _changectx(self, rev): | |
576 | if self.lastrev != rev: |
|
575 | if self.lastrev != rev: | |
577 | self.lastctx = self.repo[rev] |
|
576 | self.lastctx = self.repo[rev] | |
578 | self.lastrev = rev |
|
577 | self.lastrev = rev | |
579 | return self.lastctx |
|
578 | return self.lastctx | |
580 |
|
579 | |||
581 | def _parents(self, ctx): |
|
580 | def _parents(self, ctx): | |
582 | return [p for p in ctx.parents() if p and self.keep(p.node())] |
|
581 | return [p for p in ctx.parents() if p and self.keep(p.node())] | |
583 |
|
582 | |||
584 | def getheads(self): |
|
583 | def getheads(self): | |
585 | return [hex(h) for h in self._heads if self.keep(h)] |
|
584 | return [hex(h) for h in self._heads if self.keep(h)] | |
586 |
|
585 | |||
587 | def getfile(self, name, rev): |
|
586 | def getfile(self, name, rev): | |
588 | try: |
|
587 | try: | |
589 | fctx = self._changectx(rev)[name] |
|
588 | fctx = self._changectx(rev)[name] | |
590 | return fctx.data(), fctx.flags() |
|
589 | return fctx.data(), fctx.flags() | |
591 | except error.LookupError: |
|
590 | except error.LookupError: | |
592 | return None, None |
|
591 | return None, None | |
593 |
|
592 | |||
594 | def _changedfiles(self, ctx1, ctx2): |
|
593 | def _changedfiles(self, ctx1, ctx2): | |
595 | ma, r = [], [] |
|
594 | ma, r = [], [] | |
596 | maappend = ma.append |
|
595 | maappend = ma.append | |
597 | rappend = r.append |
|
596 | rappend = r.append | |
598 | d = ctx1.manifest().diff(ctx2.manifest()) |
|
597 | d = ctx1.manifest().diff(ctx2.manifest()) | |
599 | for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d): |
|
598 | for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d): | |
600 | if node2 is None: |
|
599 | if node2 is None: | |
601 | rappend(f) |
|
600 | rappend(f) | |
602 | else: |
|
601 | else: | |
603 | maappend(f) |
|
602 | maappend(f) | |
604 | return ma, r |
|
603 | return ma, r | |
605 |
|
604 | |||
606 | def getchanges(self, rev, full): |
|
605 | def getchanges(self, rev, full): | |
607 | ctx = self._changectx(rev) |
|
606 | ctx = self._changectx(rev) | |
608 | parents = self._parents(ctx) |
|
607 | parents = self._parents(ctx) | |
609 | if full or not parents: |
|
608 | if full or not parents: | |
610 | files = copyfiles = ctx.manifest() |
|
609 | files = copyfiles = ctx.manifest() | |
611 | if parents: |
|
610 | if parents: | |
612 | if self._changescache[0] == rev: |
|
611 | if self._changescache[0] == rev: | |
613 | ma, r = self._changescache[1] |
|
612 | ma, r = self._changescache[1] | |
614 | else: |
|
613 | else: | |
615 | ma, r = self._changedfiles(parents[0], ctx) |
|
614 | ma, r = self._changedfiles(parents[0], ctx) | |
616 | if not full: |
|
615 | if not full: | |
617 | files = ma + r |
|
616 | files = ma + r | |
618 | copyfiles = ma |
|
617 | copyfiles = ma | |
619 | # _getcopies() is also run for roots and before filtering so missing |
|
618 | # _getcopies() is also run for roots and before filtering so missing | |
620 | # revlogs are detected early |
|
619 | # revlogs are detected early | |
621 | copies = self._getcopies(ctx, parents, copyfiles) |
|
620 | copies = self._getcopies(ctx, parents, copyfiles) | |
622 | cleanp2 = set() |
|
621 | cleanp2 = set() | |
623 | if len(parents) == 2: |
|
622 | if len(parents) == 2: | |
624 | d = parents[1].manifest().diff(ctx.manifest(), clean=True) |
|
623 | d = parents[1].manifest().diff(ctx.manifest(), clean=True) | |
625 | for f, value in pycompat.iteritems(d): |
|
624 | for f, value in pycompat.iteritems(d): | |
626 | if value is None: |
|
625 | if value is None: | |
627 | cleanp2.add(f) |
|
626 | cleanp2.add(f) | |
628 | changes = [(f, rev) for f in files if f not in self.ignored] |
|
627 | changes = [(f, rev) for f in files if f not in self.ignored] | |
629 | changes.sort() |
|
628 | changes.sort() | |
630 | return changes, copies, cleanp2 |
|
629 | return changes, copies, cleanp2 | |
631 |
|
630 | |||
632 | def _getcopies(self, ctx, parents, files): |
|
631 | def _getcopies(self, ctx, parents, files): | |
633 | copies = {} |
|
632 | copies = {} | |
634 | for name in files: |
|
633 | for name in files: | |
635 | if name in self.ignored: |
|
634 | if name in self.ignored: | |
636 | continue |
|
635 | continue | |
637 | try: |
|
636 | try: | |
638 | copysource = ctx.filectx(name).copysource() |
|
637 | copysource = ctx.filectx(name).copysource() | |
639 | if copysource in self.ignored: |
|
638 | if copysource in self.ignored: | |
640 | continue |
|
639 | continue | |
641 | # Ignore copy sources not in parent revisions |
|
640 | # Ignore copy sources not in parent revisions | |
642 | if not any(copysource in p for p in parents): |
|
641 | if not any(copysource in p for p in parents): | |
643 | continue |
|
642 | continue | |
644 | copies[name] = copysource |
|
643 | copies[name] = copysource | |
645 | except TypeError: |
|
644 | except TypeError: | |
646 | pass |
|
645 | pass | |
647 | except error.LookupError as e: |
|
646 | except error.LookupError as e: | |
648 | if not self.ignoreerrors: |
|
647 | if not self.ignoreerrors: | |
649 | raise |
|
648 | raise | |
650 | self.ignored.add(name) |
|
649 | self.ignored.add(name) | |
651 | self.ui.warn(_(b'ignoring: %s\n') % e) |
|
650 | self.ui.warn(_(b'ignoring: %s\n') % e) | |
652 | return copies |
|
651 | return copies | |
653 |
|
652 | |||
654 | def getcommit(self, rev): |
|
653 | def getcommit(self, rev): | |
655 | ctx = self._changectx(rev) |
|
654 | ctx = self._changectx(rev) | |
656 | _parents = self._parents(ctx) |
|
655 | _parents = self._parents(ctx) | |
657 | parents = [p.hex() for p in _parents] |
|
656 | parents = [p.hex() for p in _parents] | |
658 | optparents = [p.hex() for p in ctx.parents() if p and p not in _parents] |
|
657 | optparents = [p.hex() for p in ctx.parents() if p and p not in _parents] | |
659 | crev = rev |
|
658 | crev = rev | |
660 |
|
659 | |||
661 | return common.commit( |
|
660 | return common.commit( | |
662 | author=ctx.user(), |
|
661 | author=ctx.user(), | |
663 | date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'), |
|
662 | date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'), | |
664 | desc=ctx.description(), |
|
663 | desc=ctx.description(), | |
665 | rev=crev, |
|
664 | rev=crev, | |
666 | parents=parents, |
|
665 | parents=parents, | |
667 | optparents=optparents, |
|
666 | optparents=optparents, | |
668 | branch=ctx.branch(), |
|
667 | branch=ctx.branch(), | |
669 | extra=ctx.extra(), |
|
668 | extra=ctx.extra(), | |
670 | sortkey=ctx.rev(), |
|
669 | sortkey=ctx.rev(), | |
671 | saverev=self.saverev, |
|
670 | saverev=self.saverev, | |
672 | phase=ctx.phase(), |
|
671 | phase=ctx.phase(), | |
673 | ctx=ctx, |
|
672 | ctx=ctx, | |
674 | ) |
|
673 | ) | |
675 |
|
674 | |||
676 | def numcommits(self): |
|
675 | def numcommits(self): | |
677 | return len(self.repo) |
|
676 | return len(self.repo) | |
678 |
|
677 | |||
679 | def gettags(self): |
|
678 | def gettags(self): | |
680 | # This will get written to .hgtags, filter non global tags out. |
|
679 | # This will get written to .hgtags, filter non global tags out. | |
681 | tags = [ |
|
680 | tags = [ | |
682 | t |
|
681 | t | |
683 | for t in self.repo.tagslist() |
|
682 | for t in self.repo.tagslist() | |
684 | if self.repo.tagtype(t[0]) == b'global' |
|
683 | if self.repo.tagtype(t[0]) == b'global' | |
685 | ] |
|
684 | ] | |
686 | return {name: hex(node) for name, node in tags if self.keep(node)} |
|
685 | return {name: hex(node) for name, node in tags if self.keep(node)} | |
687 |
|
686 | |||
688 | def getchangedfiles(self, rev, i): |
|
687 | def getchangedfiles(self, rev, i): | |
689 | ctx = self._changectx(rev) |
|
688 | ctx = self._changectx(rev) | |
690 | parents = self._parents(ctx) |
|
689 | parents = self._parents(ctx) | |
691 | if not parents and i is None: |
|
690 | if not parents and i is None: | |
692 | i = 0 |
|
691 | i = 0 | |
693 | ma, r = ctx.manifest().keys(), [] |
|
692 | ma, r = ctx.manifest().keys(), [] | |
694 | else: |
|
693 | else: | |
695 | i = i or 0 |
|
694 | i = i or 0 | |
696 | ma, r = self._changedfiles(parents[i], ctx) |
|
695 | ma, r = self._changedfiles(parents[i], ctx) | |
697 | ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)] |
|
696 | ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)] | |
698 |
|
697 | |||
699 | if i == 0: |
|
698 | if i == 0: | |
700 | self._changescache = (rev, (ma, r)) |
|
699 | self._changescache = (rev, (ma, r)) | |
701 |
|
700 | |||
702 | return ma + r |
|
701 | return ma + r | |
703 |
|
702 | |||
704 | def converted(self, rev, destrev): |
|
703 | def converted(self, rev, destrev): | |
705 | if self.convertfp is None: |
|
704 | if self.convertfp is None: | |
706 | self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab') |
|
705 | self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab') | |
707 | self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev))) |
|
706 | self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev))) | |
708 | self.convertfp.flush() |
|
707 | self.convertfp.flush() | |
709 |
|
708 | |||
710 | def before(self): |
|
709 | def before(self): | |
711 | self.ui.debug(b'run hg source pre-conversion action\n') |
|
710 | self.ui.debug(b'run hg source pre-conversion action\n') | |
712 |
|
711 | |||
713 | def after(self): |
|
712 | def after(self): | |
714 | self.ui.debug(b'run hg source post-conversion action\n') |
|
713 | self.ui.debug(b'run hg source post-conversion action\n') | |
715 |
|
714 | |||
716 | def hasnativeorder(self): |
|
715 | def hasnativeorder(self): | |
717 | return True |
|
716 | return True | |
718 |
|
717 | |||
719 | def hasnativeclose(self): |
|
718 | def hasnativeclose(self): | |
720 | return True |
|
719 | return True | |
721 |
|
720 | |||
722 | def lookuprev(self, rev): |
|
721 | def lookuprev(self, rev): | |
723 | try: |
|
722 | try: | |
724 | return hex(self.repo.lookup(rev)) |
|
723 | return hex(self.repo.lookup(rev)) | |
725 | except (error.RepoError, error.LookupError): |
|
724 | except (error.RepoError, error.LookupError): | |
726 | return None |
|
725 | return None | |
727 |
|
726 | |||
728 | def getbookmarks(self): |
|
727 | def getbookmarks(self): | |
729 | return bookmarks.listbookmarks(self.repo) |
|
728 | return bookmarks.listbookmarks(self.repo) | |
730 |
|
729 | |||
731 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
730 | def checkrevformat(self, revstr, mapname=b'splicemap'): | |
732 | """ Mercurial, revision string is a 40 byte hex """ |
|
731 | """ Mercurial, revision string is a 40 byte hex """ | |
733 | self.checkhexformat(revstr, mapname) |
|
732 | self.checkhexformat(revstr, mapname) |
@@ -1,341 +1,343 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import contextlib |
|
3 | import contextlib | |
4 | import errno |
|
4 | import errno | |
5 | import os |
|
5 | import os | |
6 |
|
6 | |||
7 |
from mercurial.node import |
|
7 | from mercurial.node import sha1nodeconstants | |
8 | from mercurial import ( |
|
8 | from mercurial import ( | |
9 | error, |
|
9 | error, | |
10 | extensions, |
|
10 | extensions, | |
11 | match as matchmod, |
|
11 | match as matchmod, | |
12 | pycompat, |
|
12 | pycompat, | |
13 | scmutil, |
|
13 | scmutil, | |
14 | util, |
|
14 | util, | |
15 | ) |
|
15 | ) | |
16 | from mercurial.interfaces import ( |
|
16 | from mercurial.interfaces import ( | |
17 | dirstate as intdirstate, |
|
17 | dirstate as intdirstate, | |
18 | util as interfaceutil, |
|
18 | util as interfaceutil, | |
19 | ) |
|
19 | ) | |
20 |
|
20 | |||
21 | from . import gitutil |
|
21 | from . import gitutil | |
22 |
|
22 | |||
23 | pygit2 = gitutil.get_pygit2() |
|
23 | pygit2 = gitutil.get_pygit2() | |
24 |
|
24 | |||
25 |
|
25 | |||
26 | def readpatternfile(orig, filepath, warn, sourceinfo=False): |
|
26 | def readpatternfile(orig, filepath, warn, sourceinfo=False): | |
27 | if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')): |
|
27 | if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')): | |
28 | return orig(filepath, warn, sourceinfo=False) |
|
28 | return orig(filepath, warn, sourceinfo=False) | |
29 | result = [] |
|
29 | result = [] | |
30 | warnings = [] |
|
30 | warnings = [] | |
31 | with open(filepath, b'rb') as fp: |
|
31 | with open(filepath, b'rb') as fp: | |
32 | for l in fp: |
|
32 | for l in fp: | |
33 | l = l.strip() |
|
33 | l = l.strip() | |
34 | if not l or l.startswith(b'#'): |
|
34 | if not l or l.startswith(b'#'): | |
35 | continue |
|
35 | continue | |
36 | if l.startswith(b'!'): |
|
36 | if l.startswith(b'!'): | |
37 | warnings.append(b'unsupported ignore pattern %s' % l) |
|
37 | warnings.append(b'unsupported ignore pattern %s' % l) | |
38 | continue |
|
38 | continue | |
39 | if l.startswith(b'/'): |
|
39 | if l.startswith(b'/'): | |
40 | result.append(b'rootglob:' + l[1:]) |
|
40 | result.append(b'rootglob:' + l[1:]) | |
41 | else: |
|
41 | else: | |
42 | result.append(b'relglob:' + l) |
|
42 | result.append(b'relglob:' + l) | |
43 | return result, warnings |
|
43 | return result, warnings | |
44 |
|
44 | |||
45 |
|
45 | |||
46 | extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile) |
|
46 | extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile) | |
47 |
|
47 | |||
48 |
|
48 | |||
49 | _STATUS_MAP = {} |
|
49 | _STATUS_MAP = {} | |
50 | if pygit2: |
|
50 | if pygit2: | |
51 | _STATUS_MAP = { |
|
51 | _STATUS_MAP = { | |
52 | pygit2.GIT_STATUS_CONFLICTED: b'm', |
|
52 | pygit2.GIT_STATUS_CONFLICTED: b'm', | |
53 | pygit2.GIT_STATUS_CURRENT: b'n', |
|
53 | pygit2.GIT_STATUS_CURRENT: b'n', | |
54 | pygit2.GIT_STATUS_IGNORED: b'?', |
|
54 | pygit2.GIT_STATUS_IGNORED: b'?', | |
55 | pygit2.GIT_STATUS_INDEX_DELETED: b'r', |
|
55 | pygit2.GIT_STATUS_INDEX_DELETED: b'r', | |
56 | pygit2.GIT_STATUS_INDEX_MODIFIED: b'n', |
|
56 | pygit2.GIT_STATUS_INDEX_MODIFIED: b'n', | |
57 | pygit2.GIT_STATUS_INDEX_NEW: b'a', |
|
57 | pygit2.GIT_STATUS_INDEX_NEW: b'a', | |
58 | pygit2.GIT_STATUS_INDEX_RENAMED: b'a', |
|
58 | pygit2.GIT_STATUS_INDEX_RENAMED: b'a', | |
59 | pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n', |
|
59 | pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n', | |
60 | pygit2.GIT_STATUS_WT_DELETED: b'r', |
|
60 | pygit2.GIT_STATUS_WT_DELETED: b'r', | |
61 | pygit2.GIT_STATUS_WT_MODIFIED: b'n', |
|
61 | pygit2.GIT_STATUS_WT_MODIFIED: b'n', | |
62 | pygit2.GIT_STATUS_WT_NEW: b'?', |
|
62 | pygit2.GIT_STATUS_WT_NEW: b'?', | |
63 | pygit2.GIT_STATUS_WT_RENAMED: b'a', |
|
63 | pygit2.GIT_STATUS_WT_RENAMED: b'a', | |
64 | pygit2.GIT_STATUS_WT_TYPECHANGE: b'n', |
|
64 | pygit2.GIT_STATUS_WT_TYPECHANGE: b'n', | |
65 | pygit2.GIT_STATUS_WT_UNREADABLE: b'?', |
|
65 | pygit2.GIT_STATUS_WT_UNREADABLE: b'?', | |
66 | pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: 'm', |
|
66 | pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: 'm', | |
67 | } |
|
67 | } | |
68 |
|
68 | |||
69 |
|
69 | |||
70 | @interfaceutil.implementer(intdirstate.idirstate) |
|
70 | @interfaceutil.implementer(intdirstate.idirstate) | |
71 | class gitdirstate(object): |
|
71 | class gitdirstate(object): | |
72 | def __init__(self, ui, root, gitrepo): |
|
72 | def __init__(self, ui, root, gitrepo): | |
73 | self._ui = ui |
|
73 | self._ui = ui | |
74 | self._root = os.path.dirname(root) |
|
74 | self._root = os.path.dirname(root) | |
75 | self.git = gitrepo |
|
75 | self.git = gitrepo | |
76 | self._plchangecallbacks = {} |
|
76 | self._plchangecallbacks = {} | |
77 |
|
77 | |||
78 | def p1(self): |
|
78 | def p1(self): | |
79 | try: |
|
79 | try: | |
80 | return self.git.head.peel().id.raw |
|
80 | return self.git.head.peel().id.raw | |
81 | except pygit2.GitError: |
|
81 | except pygit2.GitError: | |
82 | # Typically happens when peeling HEAD fails, as in an |
|
82 | # Typically happens when peeling HEAD fails, as in an | |
83 | # empty repository. |
|
83 | # empty repository. | |
84 | return nullid |
|
84 | return sha1nodeconstants.nullid | |
85 |
|
85 | |||
86 | def p2(self): |
|
86 | def p2(self): | |
87 | # TODO: MERGE_HEAD? something like that, right? |
|
87 | # TODO: MERGE_HEAD? something like that, right? | |
88 | return nullid |
|
88 | return sha1nodeconstants.nullid | |
89 |
|
89 | |||
90 |
def setparents(self, p1, p2= |
|
90 | def setparents(self, p1, p2=None): | |
91 | assert p2 == nullid, b'TODO merging support' |
|
91 | if p2 is None: | |
|
92 | p2 = sha1nodeconstants.nullid | |||
|
93 | assert p2 == sha1nodeconstants.nullid, b'TODO merging support' | |||
92 | self.git.head.set_target(gitutil.togitnode(p1)) |
|
94 | self.git.head.set_target(gitutil.togitnode(p1)) | |
93 |
|
95 | |||
94 | @util.propertycache |
|
96 | @util.propertycache | |
95 | def identity(self): |
|
97 | def identity(self): | |
96 | return util.filestat.frompath( |
|
98 | return util.filestat.frompath( | |
97 | os.path.join(self._root, b'.git', b'index') |
|
99 | os.path.join(self._root, b'.git', b'index') | |
98 | ) |
|
100 | ) | |
99 |
|
101 | |||
100 | def branch(self): |
|
102 | def branch(self): | |
101 | return b'default' |
|
103 | return b'default' | |
102 |
|
104 | |||
103 | def parents(self): |
|
105 | def parents(self): | |
104 | # TODO how on earth do we find p2 if a merge is in flight? |
|
106 | # TODO how on earth do we find p2 if a merge is in flight? | |
105 | return self.p1(), nullid |
|
107 | return self.p1(), sha1nodeconstants.nullid | |
106 |
|
108 | |||
107 | def __iter__(self): |
|
109 | def __iter__(self): | |
108 | return (pycompat.fsencode(f.path) for f in self.git.index) |
|
110 | return (pycompat.fsencode(f.path) for f in self.git.index) | |
109 |
|
111 | |||
110 | def items(self): |
|
112 | def items(self): | |
111 | for ie in self.git.index: |
|
113 | for ie in self.git.index: | |
112 | yield ie.path, None # value should be a dirstatetuple |
|
114 | yield ie.path, None # value should be a dirstatetuple | |
113 |
|
115 | |||
114 | # py2,3 compat forward |
|
116 | # py2,3 compat forward | |
115 | iteritems = items |
|
117 | iteritems = items | |
116 |
|
118 | |||
117 | def __getitem__(self, filename): |
|
119 | def __getitem__(self, filename): | |
118 | try: |
|
120 | try: | |
119 | gs = self.git.status_file(filename) |
|
121 | gs = self.git.status_file(filename) | |
120 | except KeyError: |
|
122 | except KeyError: | |
121 | return b'?' |
|
123 | return b'?' | |
122 | return _STATUS_MAP[gs] |
|
124 | return _STATUS_MAP[gs] | |
123 |
|
125 | |||
124 | def __contains__(self, filename): |
|
126 | def __contains__(self, filename): | |
125 | try: |
|
127 | try: | |
126 | gs = self.git.status_file(filename) |
|
128 | gs = self.git.status_file(filename) | |
127 | return _STATUS_MAP[gs] != b'?' |
|
129 | return _STATUS_MAP[gs] != b'?' | |
128 | except KeyError: |
|
130 | except KeyError: | |
129 | return False |
|
131 | return False | |
130 |
|
132 | |||
131 | def status(self, match, subrepos, ignored, clean, unknown): |
|
133 | def status(self, match, subrepos, ignored, clean, unknown): | |
132 | listclean = clean |
|
134 | listclean = clean | |
133 | # TODO handling of clean files - can we get that from git.status()? |
|
135 | # TODO handling of clean files - can we get that from git.status()? | |
134 | modified, added, removed, deleted, unknown, ignored, clean = ( |
|
136 | modified, added, removed, deleted, unknown, ignored, clean = ( | |
135 | [], |
|
137 | [], | |
136 | [], |
|
138 | [], | |
137 | [], |
|
139 | [], | |
138 | [], |
|
140 | [], | |
139 | [], |
|
141 | [], | |
140 | [], |
|
142 | [], | |
141 | [], |
|
143 | [], | |
142 | ) |
|
144 | ) | |
143 | gstatus = self.git.status() |
|
145 | gstatus = self.git.status() | |
144 | for path, status in gstatus.items(): |
|
146 | for path, status in gstatus.items(): | |
145 | path = pycompat.fsencode(path) |
|
147 | path = pycompat.fsencode(path) | |
146 | if not match(path): |
|
148 | if not match(path): | |
147 | continue |
|
149 | continue | |
148 | if status == pygit2.GIT_STATUS_IGNORED: |
|
150 | if status == pygit2.GIT_STATUS_IGNORED: | |
149 | if path.endswith(b'/'): |
|
151 | if path.endswith(b'/'): | |
150 | continue |
|
152 | continue | |
151 | ignored.append(path) |
|
153 | ignored.append(path) | |
152 | elif status in ( |
|
154 | elif status in ( | |
153 | pygit2.GIT_STATUS_WT_MODIFIED, |
|
155 | pygit2.GIT_STATUS_WT_MODIFIED, | |
154 | pygit2.GIT_STATUS_INDEX_MODIFIED, |
|
156 | pygit2.GIT_STATUS_INDEX_MODIFIED, | |
155 | pygit2.GIT_STATUS_WT_MODIFIED |
|
157 | pygit2.GIT_STATUS_WT_MODIFIED | |
156 | | pygit2.GIT_STATUS_INDEX_MODIFIED, |
|
158 | | pygit2.GIT_STATUS_INDEX_MODIFIED, | |
157 | ): |
|
159 | ): | |
158 | modified.append(path) |
|
160 | modified.append(path) | |
159 | elif status == pygit2.GIT_STATUS_INDEX_NEW: |
|
161 | elif status == pygit2.GIT_STATUS_INDEX_NEW: | |
160 | added.append(path) |
|
162 | added.append(path) | |
161 | elif status == pygit2.GIT_STATUS_WT_NEW: |
|
163 | elif status == pygit2.GIT_STATUS_WT_NEW: | |
162 | unknown.append(path) |
|
164 | unknown.append(path) | |
163 | elif status == pygit2.GIT_STATUS_WT_DELETED: |
|
165 | elif status == pygit2.GIT_STATUS_WT_DELETED: | |
164 | deleted.append(path) |
|
166 | deleted.append(path) | |
165 | elif status == pygit2.GIT_STATUS_INDEX_DELETED: |
|
167 | elif status == pygit2.GIT_STATUS_INDEX_DELETED: | |
166 | removed.append(path) |
|
168 | removed.append(path) | |
167 | else: |
|
169 | else: | |
168 | raise error.Abort( |
|
170 | raise error.Abort( | |
169 | b'unhandled case: status for %r is %r' % (path, status) |
|
171 | b'unhandled case: status for %r is %r' % (path, status) | |
170 | ) |
|
172 | ) | |
171 |
|
173 | |||
172 | if listclean: |
|
174 | if listclean: | |
173 | observed = set( |
|
175 | observed = set( | |
174 | modified + added + removed + deleted + unknown + ignored |
|
176 | modified + added + removed + deleted + unknown + ignored | |
175 | ) |
|
177 | ) | |
176 | index = self.git.index |
|
178 | index = self.git.index | |
177 | index.read() |
|
179 | index.read() | |
178 | for entry in index: |
|
180 | for entry in index: | |
179 | path = pycompat.fsencode(entry.path) |
|
181 | path = pycompat.fsencode(entry.path) | |
180 | if not match(path): |
|
182 | if not match(path): | |
181 | continue |
|
183 | continue | |
182 | if path in observed: |
|
184 | if path in observed: | |
183 | continue # already in some other set |
|
185 | continue # already in some other set | |
184 | if path[-1] == b'/': |
|
186 | if path[-1] == b'/': | |
185 | continue # directory |
|
187 | continue # directory | |
186 | clean.append(path) |
|
188 | clean.append(path) | |
187 |
|
189 | |||
188 | # TODO are we really always sure of status here? |
|
190 | # TODO are we really always sure of status here? | |
189 | return ( |
|
191 | return ( | |
190 | False, |
|
192 | False, | |
191 | scmutil.status( |
|
193 | scmutil.status( | |
192 | modified, added, removed, deleted, unknown, ignored, clean |
|
194 | modified, added, removed, deleted, unknown, ignored, clean | |
193 | ), |
|
195 | ), | |
194 | ) |
|
196 | ) | |
195 |
|
197 | |||
196 | def flagfunc(self, buildfallback): |
|
198 | def flagfunc(self, buildfallback): | |
197 | # TODO we can do better |
|
199 | # TODO we can do better | |
198 | return buildfallback() |
|
200 | return buildfallback() | |
199 |
|
201 | |||
200 | def getcwd(self): |
|
202 | def getcwd(self): | |
201 | # TODO is this a good way to do this? |
|
203 | # TODO is this a good way to do this? | |
202 | return os.path.dirname( |
|
204 | return os.path.dirname( | |
203 | os.path.dirname(pycompat.fsencode(self.git.path)) |
|
205 | os.path.dirname(pycompat.fsencode(self.git.path)) | |
204 | ) |
|
206 | ) | |
205 |
|
207 | |||
206 | def normalize(self, path): |
|
208 | def normalize(self, path): | |
207 | normed = util.normcase(path) |
|
209 | normed = util.normcase(path) | |
208 | assert normed == path, b"TODO handling of case folding: %s != %s" % ( |
|
210 | assert normed == path, b"TODO handling of case folding: %s != %s" % ( | |
209 | normed, |
|
211 | normed, | |
210 | path, |
|
212 | path, | |
211 | ) |
|
213 | ) | |
212 | return path |
|
214 | return path | |
213 |
|
215 | |||
214 | @property |
|
216 | @property | |
215 | def _checklink(self): |
|
217 | def _checklink(self): | |
216 | return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path))) |
|
218 | return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path))) | |
217 |
|
219 | |||
218 | def copies(self): |
|
220 | def copies(self): | |
219 | # TODO support copies? |
|
221 | # TODO support copies? | |
220 | return {} |
|
222 | return {} | |
221 |
|
223 | |||
222 | # # TODO what the heck is this |
|
224 | # # TODO what the heck is this | |
223 | _filecache = set() |
|
225 | _filecache = set() | |
224 |
|
226 | |||
225 | def pendingparentchange(self): |
|
227 | def pendingparentchange(self): | |
226 | # TODO: we need to implement the context manager bits and |
|
228 | # TODO: we need to implement the context manager bits and | |
227 | # correctly stage/revert index edits. |
|
229 | # correctly stage/revert index edits. | |
228 | return False |
|
230 | return False | |
229 |
|
231 | |||
230 | def write(self, tr): |
|
232 | def write(self, tr): | |
231 | # TODO: call parent change callbacks |
|
233 | # TODO: call parent change callbacks | |
232 |
|
234 | |||
233 | if tr: |
|
235 | if tr: | |
234 |
|
236 | |||
235 | def writeinner(category): |
|
237 | def writeinner(category): | |
236 | self.git.index.write() |
|
238 | self.git.index.write() | |
237 |
|
239 | |||
238 | tr.addpending(b'gitdirstate', writeinner) |
|
240 | tr.addpending(b'gitdirstate', writeinner) | |
239 | else: |
|
241 | else: | |
240 | self.git.index.write() |
|
242 | self.git.index.write() | |
241 |
|
243 | |||
242 | def pathto(self, f, cwd=None): |
|
244 | def pathto(self, f, cwd=None): | |
243 | if cwd is None: |
|
245 | if cwd is None: | |
244 | cwd = self.getcwd() |
|
246 | cwd = self.getcwd() | |
245 | # TODO core dirstate does something about slashes here |
|
247 | # TODO core dirstate does something about slashes here | |
246 | assert isinstance(f, bytes) |
|
248 | assert isinstance(f, bytes) | |
247 | r = util.pathto(self._root, cwd, f) |
|
249 | r = util.pathto(self._root, cwd, f) | |
248 | return r |
|
250 | return r | |
249 |
|
251 | |||
250 | def matches(self, match): |
|
252 | def matches(self, match): | |
251 | for x in self.git.index: |
|
253 | for x in self.git.index: | |
252 | p = pycompat.fsencode(x.path) |
|
254 | p = pycompat.fsencode(x.path) | |
253 | if match(p): |
|
255 | if match(p): | |
254 | yield p |
|
256 | yield p | |
255 |
|
257 | |||
256 | def normal(self, f, parentfiledata=None): |
|
258 | def normal(self, f, parentfiledata=None): | |
257 | """Mark a file normal and clean.""" |
|
259 | """Mark a file normal and clean.""" | |
258 | # TODO: for now we just let libgit2 re-stat the file. We can |
|
260 | # TODO: for now we just let libgit2 re-stat the file. We can | |
259 | # clearly do better. |
|
261 | # clearly do better. | |
260 |
|
262 | |||
261 | def normallookup(self, f): |
|
263 | def normallookup(self, f): | |
262 | """Mark a file normal, but possibly dirty.""" |
|
264 | """Mark a file normal, but possibly dirty.""" | |
263 | # TODO: for now we just let libgit2 re-stat the file. We can |
|
265 | # TODO: for now we just let libgit2 re-stat the file. We can | |
264 | # clearly do better. |
|
266 | # clearly do better. | |
265 |
|
267 | |||
266 | def walk(self, match, subrepos, unknown, ignored, full=True): |
|
268 | def walk(self, match, subrepos, unknown, ignored, full=True): | |
267 | # TODO: we need to use .status() and not iterate the index, |
|
269 | # TODO: we need to use .status() and not iterate the index, | |
268 | # because the index doesn't force a re-walk and so `hg add` of |
|
270 | # because the index doesn't force a re-walk and so `hg add` of | |
269 | # a new file without an intervening call to status will |
|
271 | # a new file without an intervening call to status will | |
270 | # silently do nothing. |
|
272 | # silently do nothing. | |
271 | r = {} |
|
273 | r = {} | |
272 | cwd = self.getcwd() |
|
274 | cwd = self.getcwd() | |
273 | for path, status in self.git.status().items(): |
|
275 | for path, status in self.git.status().items(): | |
274 | if path.startswith('.hg/'): |
|
276 | if path.startswith('.hg/'): | |
275 | continue |
|
277 | continue | |
276 | path = pycompat.fsencode(path) |
|
278 | path = pycompat.fsencode(path) | |
277 | if not match(path): |
|
279 | if not match(path): | |
278 | continue |
|
280 | continue | |
279 | # TODO construct the stat info from the status object? |
|
281 | # TODO construct the stat info from the status object? | |
280 | try: |
|
282 | try: | |
281 | s = os.stat(os.path.join(cwd, path)) |
|
283 | s = os.stat(os.path.join(cwd, path)) | |
282 | except OSError as e: |
|
284 | except OSError as e: | |
283 | if e.errno != errno.ENOENT: |
|
285 | if e.errno != errno.ENOENT: | |
284 | raise |
|
286 | raise | |
285 | continue |
|
287 | continue | |
286 | r[path] = s |
|
288 | r[path] = s | |
287 | return r |
|
289 | return r | |
288 |
|
290 | |||
289 | def savebackup(self, tr, backupname): |
|
291 | def savebackup(self, tr, backupname): | |
290 | # TODO: figure out a strategy for saving index backups. |
|
292 | # TODO: figure out a strategy for saving index backups. | |
291 | pass |
|
293 | pass | |
292 |
|
294 | |||
293 | def restorebackup(self, tr, backupname): |
|
295 | def restorebackup(self, tr, backupname): | |
294 | # TODO: figure out a strategy for saving index backups. |
|
296 | # TODO: figure out a strategy for saving index backups. | |
295 | pass |
|
297 | pass | |
296 |
|
298 | |||
297 | def add(self, f): |
|
299 | def add(self, f): | |
298 | index = self.git.index |
|
300 | index = self.git.index | |
299 | index.read() |
|
301 | index.read() | |
300 | index.add(pycompat.fsdecode(f)) |
|
302 | index.add(pycompat.fsdecode(f)) | |
301 | index.write() |
|
303 | index.write() | |
302 |
|
304 | |||
303 | def drop(self, f): |
|
305 | def drop(self, f): | |
304 | index = self.git.index |
|
306 | index = self.git.index | |
305 | index.read() |
|
307 | index.read() | |
306 | fs = pycompat.fsdecode(f) |
|
308 | fs = pycompat.fsdecode(f) | |
307 | if fs in index: |
|
309 | if fs in index: | |
308 | index.remove(fs) |
|
310 | index.remove(fs) | |
309 | index.write() |
|
311 | index.write() | |
310 |
|
312 | |||
311 | def remove(self, f): |
|
313 | def remove(self, f): | |
312 | index = self.git.index |
|
314 | index = self.git.index | |
313 | index.read() |
|
315 | index.read() | |
314 | index.remove(pycompat.fsdecode(f)) |
|
316 | index.remove(pycompat.fsdecode(f)) | |
315 | index.write() |
|
317 | index.write() | |
316 |
|
318 | |||
317 | def copied(self, path): |
|
319 | def copied(self, path): | |
318 | # TODO: track copies? |
|
320 | # TODO: track copies? | |
319 | return None |
|
321 | return None | |
320 |
|
322 | |||
321 | def prefetch_parents(self): |
|
323 | def prefetch_parents(self): | |
322 | # TODO |
|
324 | # TODO | |
323 | pass |
|
325 | pass | |
324 |
|
326 | |||
325 | @contextlib.contextmanager |
|
327 | @contextlib.contextmanager | |
326 | def parentchange(self): |
|
328 | def parentchange(self): | |
327 | # TODO: track this maybe? |
|
329 | # TODO: track this maybe? | |
328 | yield |
|
330 | yield | |
329 |
|
331 | |||
330 | def addparentchangecallback(self, category, callback): |
|
332 | def addparentchangecallback(self, category, callback): | |
331 | # TODO: should this be added to the dirstate interface? |
|
333 | # TODO: should this be added to the dirstate interface? | |
332 | self._plchangecallbacks[category] = callback |
|
334 | self._plchangecallbacks[category] = callback | |
333 |
|
335 | |||
334 | def clearbackup(self, tr, backupname): |
|
336 | def clearbackup(self, tr, backupname): | |
335 | # TODO |
|
337 | # TODO | |
336 | pass |
|
338 | pass | |
337 |
|
339 | |||
338 | def setbranch(self, branch): |
|
340 | def setbranch(self, branch): | |
339 | raise error.Abort( |
|
341 | raise error.Abort( | |
340 | b'git repos do not support branches. try using bookmarks' |
|
342 | b'git repos do not support branches. try using bookmarks' | |
341 | ) |
|
343 | ) |
@@ -1,543 +1,543 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | from mercurial.i18n import _ |
|
3 | from mercurial.i18n import _ | |
4 |
|
4 | |||
5 | from mercurial.node import ( |
|
5 | from mercurial.node import ( | |
6 | bin, |
|
6 | bin, | |
7 | hex, |
|
7 | hex, | |
8 | nullhex, |
|
|||
9 | nullid, |
|
|||
10 | nullrev, |
|
8 | nullrev, | |
11 | sha1nodeconstants, |
|
9 | sha1nodeconstants, | |
12 | wdirhex, |
|
|||
13 | ) |
|
10 | ) | |
14 | from mercurial import ( |
|
11 | from mercurial import ( | |
15 | ancestor, |
|
12 | ancestor, | |
16 | changelog as hgchangelog, |
|
13 | changelog as hgchangelog, | |
17 | dagop, |
|
14 | dagop, | |
18 | encoding, |
|
15 | encoding, | |
19 | error, |
|
16 | error, | |
20 | manifest, |
|
17 | manifest, | |
21 | pycompat, |
|
18 | pycompat, | |
22 | ) |
|
19 | ) | |
23 | from mercurial.interfaces import ( |
|
20 | from mercurial.interfaces import ( | |
24 | repository, |
|
21 | repository, | |
25 | util as interfaceutil, |
|
22 | util as interfaceutil, | |
26 | ) |
|
23 | ) | |
27 | from mercurial.utils import stringutil |
|
24 | from mercurial.utils import stringutil | |
28 | from . import ( |
|
25 | from . import ( | |
29 | gitutil, |
|
26 | gitutil, | |
30 | index, |
|
27 | index, | |
31 | manifest as gitmanifest, |
|
28 | manifest as gitmanifest, | |
32 | ) |
|
29 | ) | |
33 |
|
30 | |||
34 | pygit2 = gitutil.get_pygit2() |
|
31 | pygit2 = gitutil.get_pygit2() | |
35 |
|
32 | |||
36 |
|
33 | |||
37 | class baselog(object): # revlog.revlog): |
|
34 | class baselog(object): # revlog.revlog): | |
38 | """Common implementations between changelog and manifestlog.""" |
|
35 | """Common implementations between changelog and manifestlog.""" | |
39 |
|
36 | |||
40 | def __init__(self, gr, db): |
|
37 | def __init__(self, gr, db): | |
41 | self.gitrepo = gr |
|
38 | self.gitrepo = gr | |
42 | self._db = db |
|
39 | self._db = db | |
43 |
|
40 | |||
44 | def __len__(self): |
|
41 | def __len__(self): | |
45 | return int( |
|
42 | return int( | |
46 | self._db.execute('SELECT COUNT(*) FROM changelog').fetchone()[0] |
|
43 | self._db.execute('SELECT COUNT(*) FROM changelog').fetchone()[0] | |
47 | ) |
|
44 | ) | |
48 |
|
45 | |||
49 | def rev(self, n): |
|
46 | def rev(self, n): | |
50 | if n == nullid: |
|
47 | if n == sha1nodeconstants.nullid: | |
51 | return -1 |
|
48 | return -1 | |
52 | t = self._db.execute( |
|
49 | t = self._db.execute( | |
53 | 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),) |
|
50 | 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),) | |
54 | ).fetchone() |
|
51 | ).fetchone() | |
55 | if t is None: |
|
52 | if t is None: | |
56 | raise error.LookupError(n, b'00changelog.i', _(b'no node %d')) |
|
53 | raise error.LookupError(n, b'00changelog.i', _(b'no node %d')) | |
57 | return t[0] |
|
54 | return t[0] | |
58 |
|
55 | |||
59 | def node(self, r): |
|
56 | def node(self, r): | |
60 | if r == nullrev: |
|
57 | if r == nullrev: | |
61 | return nullid |
|
58 | return sha1nodeconstants.nullid | |
62 | t = self._db.execute( |
|
59 | t = self._db.execute( | |
63 | 'SELECT node FROM changelog WHERE rev = ?', (r,) |
|
60 | 'SELECT node FROM changelog WHERE rev = ?', (r,) | |
64 | ).fetchone() |
|
61 | ).fetchone() | |
65 | if t is None: |
|
62 | if t is None: | |
66 | raise error.LookupError(r, b'00changelog.i', _(b'no node')) |
|
63 | raise error.LookupError(r, b'00changelog.i', _(b'no node')) | |
67 | return bin(t[0]) |
|
64 | return bin(t[0]) | |
68 |
|
65 | |||
69 | def hasnode(self, n): |
|
66 | def hasnode(self, n): | |
70 | t = self._db.execute( |
|
67 | t = self._db.execute( | |
71 | 'SELECT node FROM changelog WHERE node = ?', (n,) |
|
68 | 'SELECT node FROM changelog WHERE node = ?', (n,) | |
72 | ).fetchone() |
|
69 | ).fetchone() | |
73 | return t is not None |
|
70 | return t is not None | |
74 |
|
71 | |||
75 |
|
72 | |||
76 | class baselogindex(object): |
|
73 | class baselogindex(object): | |
77 | def __init__(self, log): |
|
74 | def __init__(self, log): | |
78 | self._log = log |
|
75 | self._log = log | |
79 |
|
76 | |||
80 | def has_node(self, n): |
|
77 | def has_node(self, n): | |
81 | return self._log.rev(n) != -1 |
|
78 | return self._log.rev(n) != -1 | |
82 |
|
79 | |||
83 | def __len__(self): |
|
80 | def __len__(self): | |
84 | return len(self._log) |
|
81 | return len(self._log) | |
85 |
|
82 | |||
86 | def __getitem__(self, idx): |
|
83 | def __getitem__(self, idx): | |
87 | p1rev, p2rev = self._log.parentrevs(idx) |
|
84 | p1rev, p2rev = self._log.parentrevs(idx) | |
88 | # TODO: it's messy that the index leaks so far out of the |
|
85 | # TODO: it's messy that the index leaks so far out of the | |
89 | # storage layer that we have to implement things like reading |
|
86 | # storage layer that we have to implement things like reading | |
90 | # this raw tuple, which exposes revlog internals. |
|
87 | # this raw tuple, which exposes revlog internals. | |
91 | return ( |
|
88 | return ( | |
92 | # Pretend offset is just the index, since we don't really care. |
|
89 | # Pretend offset is just the index, since we don't really care. | |
93 | idx, |
|
90 | idx, | |
94 | # Same with lengths |
|
91 | # Same with lengths | |
95 | idx, # length |
|
92 | idx, # length | |
96 | idx, # rawsize |
|
93 | idx, # rawsize | |
97 | -1, # delta base |
|
94 | -1, # delta base | |
98 | idx, # linkrev TODO is this right? |
|
95 | idx, # linkrev TODO is this right? | |
99 | p1rev, |
|
96 | p1rev, | |
100 | p2rev, |
|
97 | p2rev, | |
101 | self._log.node(idx), |
|
98 | self._log.node(idx), | |
102 | ) |
|
99 | ) | |
103 |
|
100 | |||
104 |
|
101 | |||
105 | # TODO: an interface for the changelog type? |
|
102 | # TODO: an interface for the changelog type? | |
106 | class changelog(baselog): |
|
103 | class changelog(baselog): | |
107 | # TODO: this appears to be an enumerated type, and should probably |
|
104 | # TODO: this appears to be an enumerated type, and should probably | |
108 | # be part of the public changelog interface |
|
105 | # be part of the public changelog interface | |
109 | _copiesstorage = b'extra' |
|
106 | _copiesstorage = b'extra' | |
110 |
|
107 | |||
111 | def __contains__(self, rev): |
|
108 | def __contains__(self, rev): | |
112 | try: |
|
109 | try: | |
113 | self.node(rev) |
|
110 | self.node(rev) | |
114 | return True |
|
111 | return True | |
115 | except error.LookupError: |
|
112 | except error.LookupError: | |
116 | return False |
|
113 | return False | |
117 |
|
114 | |||
118 | def __iter__(self): |
|
115 | def __iter__(self): | |
119 | return iter(pycompat.xrange(len(self))) |
|
116 | return iter(pycompat.xrange(len(self))) | |
120 |
|
117 | |||
121 | @property |
|
118 | @property | |
122 | def filteredrevs(self): |
|
119 | def filteredrevs(self): | |
123 | # TODO: we should probably add a refs/hg/ namespace for hidden |
|
120 | # TODO: we should probably add a refs/hg/ namespace for hidden | |
124 | # heads etc, but that's an idea for later. |
|
121 | # heads etc, but that's an idea for later. | |
125 | return set() |
|
122 | return set() | |
126 |
|
123 | |||
127 | @property |
|
124 | @property | |
128 | def index(self): |
|
125 | def index(self): | |
129 | return baselogindex(self) |
|
126 | return baselogindex(self) | |
130 |
|
127 | |||
131 | @property |
|
128 | @property | |
132 | def nodemap(self): |
|
129 | def nodemap(self): | |
133 | r = { |
|
130 | r = { | |
134 | bin(v[0]): v[1] |
|
131 | bin(v[0]): v[1] | |
135 | for v in self._db.execute('SELECT node, rev FROM changelog') |
|
132 | for v in self._db.execute('SELECT node, rev FROM changelog') | |
136 | } |
|
133 | } | |
137 | r[nullid] = nullrev |
|
134 | r[sha1nodeconstants.nullid] = nullrev | |
138 | return r |
|
135 | return r | |
139 |
|
136 | |||
140 | def tip(self): |
|
137 | def tip(self): | |
141 | t = self._db.execute( |
|
138 | t = self._db.execute( | |
142 | 'SELECT node FROM changelog ORDER BY rev DESC LIMIT 1' |
|
139 | 'SELECT node FROM changelog ORDER BY rev DESC LIMIT 1' | |
143 | ).fetchone() |
|
140 | ).fetchone() | |
144 | if t: |
|
141 | if t: | |
145 | return bin(t[0]) |
|
142 | return bin(t[0]) | |
146 | return nullid |
|
143 | return sha1nodeconstants.nullid | |
147 |
|
144 | |||
148 | def revs(self, start=0, stop=None): |
|
145 | def revs(self, start=0, stop=None): | |
149 | if stop is None: |
|
146 | if stop is None: | |
150 | stop = self.tip() |
|
147 | stop = self.tip() | |
151 | t = self._db.execute( |
|
148 | t = self._db.execute( | |
152 | 'SELECT rev FROM changelog ' |
|
149 | 'SELECT rev FROM changelog ' | |
153 | 'WHERE rev >= ? AND rev <= ? ' |
|
150 | 'WHERE rev >= ? AND rev <= ? ' | |
154 | 'ORDER BY REV ASC', |
|
151 | 'ORDER BY REV ASC', | |
155 | (start, stop), |
|
152 | (start, stop), | |
156 | ) |
|
153 | ) | |
157 | return (int(r[0]) for r in t) |
|
154 | return (int(r[0]) for r in t) | |
158 |
|
155 | |||
159 | def tiprev(self): |
|
156 | def tiprev(self): | |
160 | t = self._db.execute( |
|
157 | t = self._db.execute( | |
161 | 'SELECT rev FROM changelog ' 'ORDER BY REV DESC ' 'LIMIT 1' |
|
158 | 'SELECT rev FROM changelog ' 'ORDER BY REV DESC ' 'LIMIT 1' | |
162 | ) |
|
159 | ) | |
163 | return next(t) |
|
160 | return next(t) | |
164 |
|
161 | |||
165 | def _partialmatch(self, id): |
|
162 | def _partialmatch(self, id): | |
166 | if wdirhex.startswith(id): |
|
163 | if sha1nodeconstants.wdirhex.startswith(id): | |
167 | raise error.WdirUnsupported |
|
164 | raise error.WdirUnsupported | |
168 | candidates = [ |
|
165 | candidates = [ | |
169 | bin(x[0]) |
|
166 | bin(x[0]) | |
170 | for x in self._db.execute( |
|
167 | for x in self._db.execute( | |
171 | 'SELECT node FROM changelog WHERE node LIKE ?', (id + b'%',) |
|
168 | 'SELECT node FROM changelog WHERE node LIKE ?', (id + b'%',) | |
172 | ) |
|
169 | ) | |
173 | ] |
|
170 | ] | |
174 | if nullhex.startswith(id): |
|
171 | if sha1nodeconstants.nullhex.startswith(id): | |
175 | candidates.append(nullid) |
|
172 | candidates.append(sha1nodeconstants.nullid) | |
176 | if len(candidates) > 1: |
|
173 | if len(candidates) > 1: | |
177 | raise error.AmbiguousPrefixLookupError( |
|
174 | raise error.AmbiguousPrefixLookupError( | |
178 | id, b'00changelog.i', _(b'ambiguous identifier') |
|
175 | id, b'00changelog.i', _(b'ambiguous identifier') | |
179 | ) |
|
176 | ) | |
180 | if candidates: |
|
177 | if candidates: | |
181 | return candidates[0] |
|
178 | return candidates[0] | |
182 | return None |
|
179 | return None | |
183 |
|
180 | |||
184 | def flags(self, rev): |
|
181 | def flags(self, rev): | |
185 | return 0 |
|
182 | return 0 | |
186 |
|
183 | |||
187 | def shortest(self, node, minlength=1): |
|
184 | def shortest(self, node, minlength=1): | |
188 | nodehex = hex(node) |
|
185 | nodehex = hex(node) | |
189 | for attempt in pycompat.xrange(minlength, len(nodehex) + 1): |
|
186 | for attempt in pycompat.xrange(minlength, len(nodehex) + 1): | |
190 | candidate = nodehex[:attempt] |
|
187 | candidate = nodehex[:attempt] | |
191 | matches = int( |
|
188 | matches = int( | |
192 | self._db.execute( |
|
189 | self._db.execute( | |
193 | 'SELECT COUNT(*) FROM changelog WHERE node LIKE ?', |
|
190 | 'SELECT COUNT(*) FROM changelog WHERE node LIKE ?', | |
194 | (pycompat.sysstr(candidate + b'%'),), |
|
191 | (pycompat.sysstr(candidate + b'%'),), | |
195 | ).fetchone()[0] |
|
192 | ).fetchone()[0] | |
196 | ) |
|
193 | ) | |
197 | if matches == 1: |
|
194 | if matches == 1: | |
198 | return candidate |
|
195 | return candidate | |
199 | return nodehex |
|
196 | return nodehex | |
200 |
|
197 | |||
201 | def headrevs(self, revs=None): |
|
198 | def headrevs(self, revs=None): | |
202 | realheads = [ |
|
199 | realheads = [ | |
203 | int(x[0]) |
|
200 | int(x[0]) | |
204 | for x in self._db.execute( |
|
201 | for x in self._db.execute( | |
205 | 'SELECT rev FROM changelog ' |
|
202 | 'SELECT rev FROM changelog ' | |
206 | 'INNER JOIN heads ON changelog.node = heads.node' |
|
203 | 'INNER JOIN heads ON changelog.node = heads.node' | |
207 | ) |
|
204 | ) | |
208 | ] |
|
205 | ] | |
209 | if revs: |
|
206 | if revs: | |
210 | return sorted([r for r in revs if r in realheads]) |
|
207 | return sorted([r for r in revs if r in realheads]) | |
211 | return sorted(realheads) |
|
208 | return sorted(realheads) | |
212 |
|
209 | |||
213 | def changelogrevision(self, nodeorrev): |
|
210 | def changelogrevision(self, nodeorrev): | |
214 | # Ensure we have a node id |
|
211 | # Ensure we have a node id | |
215 | if isinstance(nodeorrev, int): |
|
212 | if isinstance(nodeorrev, int): | |
216 | n = self.node(nodeorrev) |
|
213 | n = self.node(nodeorrev) | |
217 | else: |
|
214 | else: | |
218 | n = nodeorrev |
|
215 | n = nodeorrev | |
219 | # handle looking up nullid |
|
216 | # handle looking up nullid | |
220 | if n == nullid: |
|
217 | if n == sha1nodeconstants.nullid: | |
221 |
return hgchangelog._changelogrevision( |
|
218 | return hgchangelog._changelogrevision( | |
|
219 | extra={}, manifest=sha1nodeconstants.nullid | |||
|
220 | ) | |||
222 | hn = gitutil.togitnode(n) |
|
221 | hn = gitutil.togitnode(n) | |
223 | # We've got a real commit! |
|
222 | # We've got a real commit! | |
224 | files = [ |
|
223 | files = [ | |
225 | r[0] |
|
224 | r[0] | |
226 | for r in self._db.execute( |
|
225 | for r in self._db.execute( | |
227 | 'SELECT filename FROM changedfiles ' |
|
226 | 'SELECT filename FROM changedfiles ' | |
228 | 'WHERE node = ? and filenode != ?', |
|
227 | 'WHERE node = ? and filenode != ?', | |
229 | (hn, gitutil.nullgit), |
|
228 | (hn, gitutil.nullgit), | |
230 | ) |
|
229 | ) | |
231 | ] |
|
230 | ] | |
232 | filesremoved = [ |
|
231 | filesremoved = [ | |
233 | r[0] |
|
232 | r[0] | |
234 | for r in self._db.execute( |
|
233 | for r in self._db.execute( | |
235 | 'SELECT filename FROM changedfiles ' |
|
234 | 'SELECT filename FROM changedfiles ' | |
236 | 'WHERE node = ? and filenode = ?', |
|
235 | 'WHERE node = ? and filenode = ?', | |
237 | (hn, nullhex), |
|
236 | (hn, sha1nodeconstants.nullhex), | |
238 | ) |
|
237 | ) | |
239 | ] |
|
238 | ] | |
240 | c = self.gitrepo[hn] |
|
239 | c = self.gitrepo[hn] | |
241 | return hgchangelog._changelogrevision( |
|
240 | return hgchangelog._changelogrevision( | |
242 | manifest=n, # pretend manifest the same as the commit node |
|
241 | manifest=n, # pretend manifest the same as the commit node | |
243 | user=b'%s <%s>' |
|
242 | user=b'%s <%s>' | |
244 | % (c.author.name.encode('utf8'), c.author.email.encode('utf8')), |
|
243 | % (c.author.name.encode('utf8'), c.author.email.encode('utf8')), | |
245 | date=(c.author.time, -c.author.offset * 60), |
|
244 | date=(c.author.time, -c.author.offset * 60), | |
246 | files=files, |
|
245 | files=files, | |
247 | # TODO filesadded in the index |
|
246 | # TODO filesadded in the index | |
248 | filesremoved=filesremoved, |
|
247 | filesremoved=filesremoved, | |
249 | description=c.message.encode('utf8'), |
|
248 | description=c.message.encode('utf8'), | |
250 | # TODO do we want to handle extra? how? |
|
249 | # TODO do we want to handle extra? how? | |
251 | extra={b'branch': b'default'}, |
|
250 | extra={b'branch': b'default'}, | |
252 | ) |
|
251 | ) | |
253 |
|
252 | |||
254 | def ancestors(self, revs, stoprev=0, inclusive=False): |
|
253 | def ancestors(self, revs, stoprev=0, inclusive=False): | |
255 | revs = list(revs) |
|
254 | revs = list(revs) | |
256 | tip = self.rev(self.tip()) |
|
255 | tip = self.rev(self.tip()) | |
257 | for r in revs: |
|
256 | for r in revs: | |
258 | if r > tip: |
|
257 | if r > tip: | |
259 | raise IndexError(b'Invalid rev %r' % r) |
|
258 | raise IndexError(b'Invalid rev %r' % r) | |
260 | return ancestor.lazyancestors( |
|
259 | return ancestor.lazyancestors( | |
261 | self.parentrevs, revs, stoprev=stoprev, inclusive=inclusive |
|
260 | self.parentrevs, revs, stoprev=stoprev, inclusive=inclusive | |
262 | ) |
|
261 | ) | |
263 |
|
262 | |||
264 | # Cleanup opportunity: this is *identical* to the revlog.py version |
|
263 | # Cleanup opportunity: this is *identical* to the revlog.py version | |
265 | def descendants(self, revs): |
|
264 | def descendants(self, revs): | |
266 | return dagop.descendantrevs(revs, self.revs, self.parentrevs) |
|
265 | return dagop.descendantrevs(revs, self.revs, self.parentrevs) | |
267 |
|
266 | |||
268 | def incrementalmissingrevs(self, common=None): |
|
267 | def incrementalmissingrevs(self, common=None): | |
269 | """Return an object that can be used to incrementally compute the |
|
268 | """Return an object that can be used to incrementally compute the | |
270 | revision numbers of the ancestors of arbitrary sets that are not |
|
269 | revision numbers of the ancestors of arbitrary sets that are not | |
271 | ancestors of common. This is an ancestor.incrementalmissingancestors |
|
270 | ancestors of common. This is an ancestor.incrementalmissingancestors | |
272 | object. |
|
271 | object. | |
273 |
|
272 | |||
274 | 'common' is a list of revision numbers. If common is not supplied, uses |
|
273 | 'common' is a list of revision numbers. If common is not supplied, uses | |
275 | nullrev. |
|
274 | nullrev. | |
276 | """ |
|
275 | """ | |
277 | if common is None: |
|
276 | if common is None: | |
278 | common = [nullrev] |
|
277 | common = [nullrev] | |
279 |
|
278 | |||
280 | return ancestor.incrementalmissingancestors(self.parentrevs, common) |
|
279 | return ancestor.incrementalmissingancestors(self.parentrevs, common) | |
281 |
|
280 | |||
282 | def findmissing(self, common=None, heads=None): |
|
281 | def findmissing(self, common=None, heads=None): | |
283 | """Return the ancestors of heads that are not ancestors of common. |
|
282 | """Return the ancestors of heads that are not ancestors of common. | |
284 |
|
283 | |||
285 | More specifically, return a list of nodes N such that every N |
|
284 | More specifically, return a list of nodes N such that every N | |
286 | satisfies the following constraints: |
|
285 | satisfies the following constraints: | |
287 |
|
286 | |||
288 | 1. N is an ancestor of some node in 'heads' |
|
287 | 1. N is an ancestor of some node in 'heads' | |
289 | 2. N is not an ancestor of any node in 'common' |
|
288 | 2. N is not an ancestor of any node in 'common' | |
290 |
|
289 | |||
291 | The list is sorted by revision number, meaning it is |
|
290 | The list is sorted by revision number, meaning it is | |
292 | topologically sorted. |
|
291 | topologically sorted. | |
293 |
|
292 | |||
294 | 'heads' and 'common' are both lists of node IDs. If heads is |
|
293 | 'heads' and 'common' are both lists of node IDs. If heads is | |
295 | not supplied, uses all of the revlog's heads. If common is not |
|
294 | not supplied, uses all of the revlog's heads. If common is not | |
296 | supplied, uses nullid.""" |
|
295 | supplied, uses nullid.""" | |
297 | if common is None: |
|
296 | if common is None: | |
298 | common = [nullid] |
|
297 | common = [sha1nodeconstants.nullid] | |
299 | if heads is None: |
|
298 | if heads is None: | |
300 | heads = self.heads() |
|
299 | heads = self.heads() | |
301 |
|
300 | |||
302 | common = [self.rev(n) for n in common] |
|
301 | common = [self.rev(n) for n in common] | |
303 | heads = [self.rev(n) for n in heads] |
|
302 | heads = [self.rev(n) for n in heads] | |
304 |
|
303 | |||
305 | inc = self.incrementalmissingrevs(common=common) |
|
304 | inc = self.incrementalmissingrevs(common=common) | |
306 | return [self.node(r) for r in inc.missingancestors(heads)] |
|
305 | return [self.node(r) for r in inc.missingancestors(heads)] | |
307 |
|
306 | |||
308 | def children(self, node): |
|
307 | def children(self, node): | |
309 | """find the children of a given node""" |
|
308 | """find the children of a given node""" | |
310 | c = [] |
|
309 | c = [] | |
311 | p = self.rev(node) |
|
310 | p = self.rev(node) | |
312 | for r in self.revs(start=p + 1): |
|
311 | for r in self.revs(start=p + 1): | |
313 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] |
|
312 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] | |
314 | if prevs: |
|
313 | if prevs: | |
315 | for pr in prevs: |
|
314 | for pr in prevs: | |
316 | if pr == p: |
|
315 | if pr == p: | |
317 | c.append(self.node(r)) |
|
316 | c.append(self.node(r)) | |
318 | elif p == nullrev: |
|
317 | elif p == nullrev: | |
319 | c.append(self.node(r)) |
|
318 | c.append(self.node(r)) | |
320 | return c |
|
319 | return c | |
321 |
|
320 | |||
322 | def reachableroots(self, minroot, heads, roots, includepath=False): |
|
321 | def reachableroots(self, minroot, heads, roots, includepath=False): | |
323 | return dagop._reachablerootspure( |
|
322 | return dagop._reachablerootspure( | |
324 | self.parentrevs, minroot, roots, heads, includepath |
|
323 | self.parentrevs, minroot, roots, heads, includepath | |
325 | ) |
|
324 | ) | |
326 |
|
325 | |||
327 | # Cleanup opportunity: this is *identical* to the revlog.py version |
|
326 | # Cleanup opportunity: this is *identical* to the revlog.py version | |
328 | def isancestor(self, a, b): |
|
327 | def isancestor(self, a, b): | |
329 | a, b = self.rev(a), self.rev(b) |
|
328 | a, b = self.rev(a), self.rev(b) | |
330 | return self.isancestorrev(a, b) |
|
329 | return self.isancestorrev(a, b) | |
331 |
|
330 | |||
332 | # Cleanup opportunity: this is *identical* to the revlog.py version |
|
331 | # Cleanup opportunity: this is *identical* to the revlog.py version | |
333 | def isancestorrev(self, a, b): |
|
332 | def isancestorrev(self, a, b): | |
334 | if a == nullrev: |
|
333 | if a == nullrev: | |
335 | return True |
|
334 | return True | |
336 | elif a == b: |
|
335 | elif a == b: | |
337 | return True |
|
336 | return True | |
338 | elif a > b: |
|
337 | elif a > b: | |
339 | return False |
|
338 | return False | |
340 | return bool(self.reachableroots(a, [b], [a], includepath=False)) |
|
339 | return bool(self.reachableroots(a, [b], [a], includepath=False)) | |
341 |
|
340 | |||
342 | def parentrevs(self, rev): |
|
341 | def parentrevs(self, rev): | |
343 | n = self.node(rev) |
|
342 | n = self.node(rev) | |
344 | hn = gitutil.togitnode(n) |
|
343 | hn = gitutil.togitnode(n) | |
345 | if hn != gitutil.nullgit: |
|
344 | if hn != gitutil.nullgit: | |
346 | c = self.gitrepo[hn] |
|
345 | c = self.gitrepo[hn] | |
347 | else: |
|
346 | else: | |
348 | return nullrev, nullrev |
|
347 | return nullrev, nullrev | |
349 | p1 = p2 = nullrev |
|
348 | p1 = p2 = nullrev | |
350 | if c.parents: |
|
349 | if c.parents: | |
351 | p1 = self.rev(c.parents[0].id.raw) |
|
350 | p1 = self.rev(c.parents[0].id.raw) | |
352 | if len(c.parents) > 2: |
|
351 | if len(c.parents) > 2: | |
353 | raise error.Abort(b'TODO octopus merge handling') |
|
352 | raise error.Abort(b'TODO octopus merge handling') | |
354 | if len(c.parents) == 2: |
|
353 | if len(c.parents) == 2: | |
355 | p2 = self.rev(c.parents[1].id.raw) |
|
354 | p2 = self.rev(c.parents[1].id.raw) | |
356 | return p1, p2 |
|
355 | return p1, p2 | |
357 |
|
356 | |||
358 | # Private method is used at least by the tags code. |
|
357 | # Private method is used at least by the tags code. | |
359 | _uncheckedparentrevs = parentrevs |
|
358 | _uncheckedparentrevs = parentrevs | |
360 |
|
359 | |||
361 | def commonancestorsheads(self, a, b): |
|
360 | def commonancestorsheads(self, a, b): | |
362 | # TODO the revlog verson of this has a C path, so we probably |
|
361 | # TODO the revlog verson of this has a C path, so we probably | |
363 | # need to optimize this... |
|
362 | # need to optimize this... | |
364 | a, b = self.rev(a), self.rev(b) |
|
363 | a, b = self.rev(a), self.rev(b) | |
365 | return [ |
|
364 | return [ | |
366 | self.node(n) |
|
365 | self.node(n) | |
367 | for n in ancestor.commonancestorsheads(self.parentrevs, a, b) |
|
366 | for n in ancestor.commonancestorsheads(self.parentrevs, a, b) | |
368 | ] |
|
367 | ] | |
369 |
|
368 | |||
370 | def branchinfo(self, rev): |
|
369 | def branchinfo(self, rev): | |
371 | """Git doesn't do named branches, so just put everything on default.""" |
|
370 | """Git doesn't do named branches, so just put everything on default.""" | |
372 | return b'default', False |
|
371 | return b'default', False | |
373 |
|
372 | |||
374 | def delayupdate(self, tr): |
|
373 | def delayupdate(self, tr): | |
375 | # TODO: I think we can elide this because we're just dropping |
|
374 | # TODO: I think we can elide this because we're just dropping | |
376 | # an object in the git repo? |
|
375 | # an object in the git repo? | |
377 | pass |
|
376 | pass | |
378 |
|
377 | |||
379 | def add( |
|
378 | def add( | |
380 | self, |
|
379 | self, | |
381 | manifest, |
|
380 | manifest, | |
382 | files, |
|
381 | files, | |
383 | desc, |
|
382 | desc, | |
384 | transaction, |
|
383 | transaction, | |
385 | p1, |
|
384 | p1, | |
386 | p2, |
|
385 | p2, | |
387 | user, |
|
386 | user, | |
388 | date=None, |
|
387 | date=None, | |
389 | extra=None, |
|
388 | extra=None, | |
390 | p1copies=None, |
|
389 | p1copies=None, | |
391 | p2copies=None, |
|
390 | p2copies=None, | |
392 | filesadded=None, |
|
391 | filesadded=None, | |
393 | filesremoved=None, |
|
392 | filesremoved=None, | |
394 | ): |
|
393 | ): | |
395 | parents = [] |
|
394 | parents = [] | |
396 | hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2) |
|
395 | hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2) | |
397 | if p1 != nullid: |
|
396 | if p1 != sha1nodeconstants.nullid: | |
398 | parents.append(hp1) |
|
397 | parents.append(hp1) | |
399 | if p2 and p2 != nullid: |
|
398 | if p2 and p2 != sha1nodeconstants.nullid: | |
400 | parents.append(hp2) |
|
399 | parents.append(hp2) | |
401 | assert date is not None |
|
400 | assert date is not None | |
402 | timestamp, tz = date |
|
401 | timestamp, tz = date | |
403 | sig = pygit2.Signature( |
|
402 | sig = pygit2.Signature( | |
404 | encoding.unifromlocal(stringutil.person(user)), |
|
403 | encoding.unifromlocal(stringutil.person(user)), | |
405 | encoding.unifromlocal(stringutil.email(user)), |
|
404 | encoding.unifromlocal(stringutil.email(user)), | |
406 | int(timestamp), |
|
405 | int(timestamp), | |
407 | -int(tz // 60), |
|
406 | -int(tz // 60), | |
408 | ) |
|
407 | ) | |
409 | oid = self.gitrepo.create_commit( |
|
408 | oid = self.gitrepo.create_commit( | |
410 | None, sig, sig, desc, gitutil.togitnode(manifest), parents |
|
409 | None, sig, sig, desc, gitutil.togitnode(manifest), parents | |
411 | ) |
|
410 | ) | |
412 | # Set up an internal reference to force the commit into the |
|
411 | # Set up an internal reference to force the commit into the | |
413 | # changelog. Hypothetically, we could even use this refs/hg/ |
|
412 | # changelog. Hypothetically, we could even use this refs/hg/ | |
414 | # namespace to allow for anonymous heads on git repos, which |
|
413 | # namespace to allow for anonymous heads on git repos, which | |
415 | # would be neat. |
|
414 | # would be neat. | |
416 | self.gitrepo.references.create( |
|
415 | self.gitrepo.references.create( | |
417 | 'refs/hg/internal/latest-commit', oid, force=True |
|
416 | 'refs/hg/internal/latest-commit', oid, force=True | |
418 | ) |
|
417 | ) | |
419 | # Reindex now to pick up changes. We omit the progress |
|
418 | # Reindex now to pick up changes. We omit the progress | |
420 | # and log callbacks because this will be very quick. |
|
419 | # and log callbacks because this will be very quick. | |
421 | index._index_repo(self.gitrepo, self._db) |
|
420 | index._index_repo(self.gitrepo, self._db) | |
422 | return oid.raw |
|
421 | return oid.raw | |
423 |
|
422 | |||
424 |
|
423 | |||
425 | class manifestlog(baselog): |
|
424 | class manifestlog(baselog): | |
426 | nodeconstants = sha1nodeconstants |
|
425 | nodeconstants = sha1nodeconstants | |
427 |
|
426 | |||
428 | def __getitem__(self, node): |
|
427 | def __getitem__(self, node): | |
429 | return self.get(b'', node) |
|
428 | return self.get(b'', node) | |
430 |
|
429 | |||
431 | def get(self, relpath, node): |
|
430 | def get(self, relpath, node): | |
432 | if node == nullid: |
|
431 | if node == sha1nodeconstants.nullid: | |
433 | # TODO: this should almost certainly be a memgittreemanifestctx |
|
432 | # TODO: this should almost certainly be a memgittreemanifestctx | |
434 | return manifest.memtreemanifestctx(self, relpath) |
|
433 | return manifest.memtreemanifestctx(self, relpath) | |
435 | commit = self.gitrepo[gitutil.togitnode(node)] |
|
434 | commit = self.gitrepo[gitutil.togitnode(node)] | |
436 | t = commit.tree |
|
435 | t = commit.tree | |
437 | if relpath: |
|
436 | if relpath: | |
438 | parts = relpath.split(b'/') |
|
437 | parts = relpath.split(b'/') | |
439 | for p in parts: |
|
438 | for p in parts: | |
440 | te = t[p] |
|
439 | te = t[p] | |
441 | t = self.gitrepo[te.id] |
|
440 | t = self.gitrepo[te.id] | |
442 | return gitmanifest.gittreemanifestctx(self.gitrepo, t) |
|
441 | return gitmanifest.gittreemanifestctx(self.gitrepo, t) | |
443 |
|
442 | |||
444 |
|
443 | |||
445 | @interfaceutil.implementer(repository.ifilestorage) |
|
444 | @interfaceutil.implementer(repository.ifilestorage) | |
446 | class filelog(baselog): |
|
445 | class filelog(baselog): | |
447 | def __init__(self, gr, db, path): |
|
446 | def __init__(self, gr, db, path): | |
448 | super(filelog, self).__init__(gr, db) |
|
447 | super(filelog, self).__init__(gr, db) | |
449 | assert isinstance(path, bytes) |
|
448 | assert isinstance(path, bytes) | |
450 | self.path = path |
|
449 | self.path = path | |
|
450 | self.nullid = sha1nodeconstants.nullid | |||
451 |
|
451 | |||
452 | def read(self, node): |
|
452 | def read(self, node): | |
453 | if node == nullid: |
|
453 | if node == sha1nodeconstants.nullid: | |
454 | return b'' |
|
454 | return b'' | |
455 | return self.gitrepo[gitutil.togitnode(node)].data |
|
455 | return self.gitrepo[gitutil.togitnode(node)].data | |
456 |
|
456 | |||
457 | def lookup(self, node): |
|
457 | def lookup(self, node): | |
458 | if len(node) not in (20, 40): |
|
458 | if len(node) not in (20, 40): | |
459 | node = int(node) |
|
459 | node = int(node) | |
460 | if isinstance(node, int): |
|
460 | if isinstance(node, int): | |
461 | assert False, b'todo revnums for nodes' |
|
461 | assert False, b'todo revnums for nodes' | |
462 | if len(node) == 40: |
|
462 | if len(node) == 40: | |
463 | node = bin(node) |
|
463 | node = bin(node) | |
464 | hnode = gitutil.togitnode(node) |
|
464 | hnode = gitutil.togitnode(node) | |
465 | if hnode in self.gitrepo: |
|
465 | if hnode in self.gitrepo: | |
466 | return node |
|
466 | return node | |
467 | raise error.LookupError(self.path, node, _(b'no match found')) |
|
467 | raise error.LookupError(self.path, node, _(b'no match found')) | |
468 |
|
468 | |||
469 | def cmp(self, node, text): |
|
469 | def cmp(self, node, text): | |
470 | """Returns True if text is different than content at `node`.""" |
|
470 | """Returns True if text is different than content at `node`.""" | |
471 | return self.read(node) != text |
|
471 | return self.read(node) != text | |
472 |
|
472 | |||
473 | def add(self, text, meta, transaction, link, p1=None, p2=None): |
|
473 | def add(self, text, meta, transaction, link, p1=None, p2=None): | |
474 | assert not meta # Should we even try to handle this? |
|
474 | assert not meta # Should we even try to handle this? | |
475 | return self.gitrepo.create_blob(text).raw |
|
475 | return self.gitrepo.create_blob(text).raw | |
476 |
|
476 | |||
477 | def __iter__(self): |
|
477 | def __iter__(self): | |
478 | for clrev in self._db.execute( |
|
478 | for clrev in self._db.execute( | |
479 | ''' |
|
479 | ''' | |
480 | SELECT rev FROM changelog |
|
480 | SELECT rev FROM changelog | |
481 | INNER JOIN changedfiles ON changelog.node = changedfiles.node |
|
481 | INNER JOIN changedfiles ON changelog.node = changedfiles.node | |
482 | WHERE changedfiles.filename = ? AND changedfiles.filenode != ? |
|
482 | WHERE changedfiles.filename = ? AND changedfiles.filenode != ? | |
483 | ''', |
|
483 | ''', | |
484 | (pycompat.fsdecode(self.path), gitutil.nullgit), |
|
484 | (pycompat.fsdecode(self.path), gitutil.nullgit), | |
485 | ): |
|
485 | ): | |
486 | yield clrev[0] |
|
486 | yield clrev[0] | |
487 |
|
487 | |||
488 | def linkrev(self, fr): |
|
488 | def linkrev(self, fr): | |
489 | return fr |
|
489 | return fr | |
490 |
|
490 | |||
491 | def rev(self, node): |
|
491 | def rev(self, node): | |
492 | row = self._db.execute( |
|
492 | row = self._db.execute( | |
493 | ''' |
|
493 | ''' | |
494 | SELECT rev FROM changelog |
|
494 | SELECT rev FROM changelog | |
495 | INNER JOIN changedfiles ON changelog.node = changedfiles.node |
|
495 | INNER JOIN changedfiles ON changelog.node = changedfiles.node | |
496 | WHERE changedfiles.filename = ? AND changedfiles.filenode = ?''', |
|
496 | WHERE changedfiles.filename = ? AND changedfiles.filenode = ?''', | |
497 | (pycompat.fsdecode(self.path), gitutil.togitnode(node)), |
|
497 | (pycompat.fsdecode(self.path), gitutil.togitnode(node)), | |
498 | ).fetchone() |
|
498 | ).fetchone() | |
499 | if row is None: |
|
499 | if row is None: | |
500 | raise error.LookupError(self.path, node, _(b'no such node')) |
|
500 | raise error.LookupError(self.path, node, _(b'no such node')) | |
501 | return int(row[0]) |
|
501 | return int(row[0]) | |
502 |
|
502 | |||
503 | def node(self, rev): |
|
503 | def node(self, rev): | |
504 | maybe = self._db.execute( |
|
504 | maybe = self._db.execute( | |
505 | '''SELECT filenode FROM changedfiles |
|
505 | '''SELECT filenode FROM changedfiles | |
506 | INNER JOIN changelog ON changelog.node = changedfiles.node |
|
506 | INNER JOIN changelog ON changelog.node = changedfiles.node | |
507 | WHERE changelog.rev = ? AND filename = ? |
|
507 | WHERE changelog.rev = ? AND filename = ? | |
508 | ''', |
|
508 | ''', | |
509 | (rev, pycompat.fsdecode(self.path)), |
|
509 | (rev, pycompat.fsdecode(self.path)), | |
510 | ).fetchone() |
|
510 | ).fetchone() | |
511 | if maybe is None: |
|
511 | if maybe is None: | |
512 | raise IndexError('gitlog %r out of range %d' % (self.path, rev)) |
|
512 | raise IndexError('gitlog %r out of range %d' % (self.path, rev)) | |
513 | return bin(maybe[0]) |
|
513 | return bin(maybe[0]) | |
514 |
|
514 | |||
515 | def parents(self, node): |
|
515 | def parents(self, node): | |
516 | gn = gitutil.togitnode(node) |
|
516 | gn = gitutil.togitnode(node) | |
517 | gp = pycompat.fsdecode(self.path) |
|
517 | gp = pycompat.fsdecode(self.path) | |
518 | ps = [] |
|
518 | ps = [] | |
519 | for p in self._db.execute( |
|
519 | for p in self._db.execute( | |
520 | '''SELECT p1filenode, p2filenode FROM changedfiles |
|
520 | '''SELECT p1filenode, p2filenode FROM changedfiles | |
521 | WHERE filenode = ? AND filename = ? |
|
521 | WHERE filenode = ? AND filename = ? | |
522 | ''', |
|
522 | ''', | |
523 | (gn, gp), |
|
523 | (gn, gp), | |
524 | ).fetchone(): |
|
524 | ).fetchone(): | |
525 | if p is None: |
|
525 | if p is None: | |
526 | commit = self._db.execute( |
|
526 | commit = self._db.execute( | |
527 | "SELECT node FROM changedfiles " |
|
527 | "SELECT node FROM changedfiles " | |
528 | "WHERE filenode = ? AND filename = ?", |
|
528 | "WHERE filenode = ? AND filename = ?", | |
529 | (gn, gp), |
|
529 | (gn, gp), | |
530 | ).fetchone()[0] |
|
530 | ).fetchone()[0] | |
531 | # This filelog is missing some data. Build the |
|
531 | # This filelog is missing some data. Build the | |
532 | # filelog, then recurse (which will always find data). |
|
532 | # filelog, then recurse (which will always find data). | |
533 | if pycompat.ispy3: |
|
533 | if pycompat.ispy3: | |
534 | commit = commit.decode('ascii') |
|
534 | commit = commit.decode('ascii') | |
535 | index.fill_in_filelog(self.gitrepo, self._db, commit, gp, gn) |
|
535 | index.fill_in_filelog(self.gitrepo, self._db, commit, gp, gn) | |
536 | return self.parents(node) |
|
536 | return self.parents(node) | |
537 | else: |
|
537 | else: | |
538 | ps.append(bin(p)) |
|
538 | ps.append(bin(p)) | |
539 | return ps |
|
539 | return ps | |
540 |
|
540 | |||
541 | def renamed(self, node): |
|
541 | def renamed(self, node): | |
542 | # TODO: renames/copies |
|
542 | # TODO: renames/copies | |
543 | return False |
|
543 | return False |
@@ -1,53 +1,53 b'' | |||||
1 | """utilities to assist in working with pygit2""" |
|
1 | """utilities to assist in working with pygit2""" | |
2 | from __future__ import absolute_import |
|
2 | from __future__ import absolute_import | |
3 |
|
3 | |||
4 |
from mercurial.node import bin, hex, |
|
4 | from mercurial.node import bin, hex, sha1nodeconstants | |
5 |
|
5 | |||
6 | from mercurial import pycompat |
|
6 | from mercurial import pycompat | |
7 |
|
7 | |||
8 | pygit2_module = None |
|
8 | pygit2_module = None | |
9 |
|
9 | |||
10 |
|
10 | |||
11 | def get_pygit2(): |
|
11 | def get_pygit2(): | |
12 | global pygit2_module |
|
12 | global pygit2_module | |
13 | if pygit2_module is None: |
|
13 | if pygit2_module is None: | |
14 | try: |
|
14 | try: | |
15 | import pygit2 as pygit2_module |
|
15 | import pygit2 as pygit2_module | |
16 |
|
16 | |||
17 | pygit2_module.InvalidSpecError |
|
17 | pygit2_module.InvalidSpecError | |
18 | except (ImportError, AttributeError): |
|
18 | except (ImportError, AttributeError): | |
19 | pass |
|
19 | pass | |
20 | return pygit2_module |
|
20 | return pygit2_module | |
21 |
|
21 | |||
22 |
|
22 | |||
23 | def pygit2_version(): |
|
23 | def pygit2_version(): | |
24 | mod = get_pygit2() |
|
24 | mod = get_pygit2() | |
25 | v = "N/A" |
|
25 | v = "N/A" | |
26 |
|
26 | |||
27 | if mod: |
|
27 | if mod: | |
28 | try: |
|
28 | try: | |
29 | v = mod.__version__ |
|
29 | v = mod.__version__ | |
30 | except AttributeError: |
|
30 | except AttributeError: | |
31 | pass |
|
31 | pass | |
32 |
|
32 | |||
33 | return b"(pygit2 %s)" % v.encode("utf-8") |
|
33 | return b"(pygit2 %s)" % v.encode("utf-8") | |
34 |
|
34 | |||
35 |
|
35 | |||
36 | def togitnode(n): |
|
36 | def togitnode(n): | |
37 | """Wrapper to convert a Mercurial binary node to a unicode hexlified node. |
|
37 | """Wrapper to convert a Mercurial binary node to a unicode hexlified node. | |
38 |
|
38 | |||
39 | pygit2 and sqlite both need nodes as strings, not bytes. |
|
39 | pygit2 and sqlite both need nodes as strings, not bytes. | |
40 | """ |
|
40 | """ | |
41 | assert len(n) == 20 |
|
41 | assert len(n) == 20 | |
42 | return pycompat.sysstr(hex(n)) |
|
42 | return pycompat.sysstr(hex(n)) | |
43 |
|
43 | |||
44 |
|
44 | |||
45 | def fromgitnode(n): |
|
45 | def fromgitnode(n): | |
46 | """Opposite of togitnode.""" |
|
46 | """Opposite of togitnode.""" | |
47 | assert len(n) == 40 |
|
47 | assert len(n) == 40 | |
48 | if pycompat.ispy3: |
|
48 | if pycompat.ispy3: | |
49 | return bin(n.encode('ascii')) |
|
49 | return bin(n.encode('ascii')) | |
50 | return bin(n) |
|
50 | return bin(n) | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | nullgit = togitnode(nullid) |
|
53 | nullgit = togitnode(sha1nodeconstants.nullid) |
@@ -1,362 +1,361 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import collections |
|
3 | import collections | |
4 | import os |
|
4 | import os | |
5 | import sqlite3 |
|
5 | import sqlite3 | |
6 |
|
6 | |||
7 | from mercurial.i18n import _ |
|
7 | from mercurial.i18n import _ | |
8 |
from mercurial.node import |
|
8 | from mercurial.node import sha1nodeconstants | |
9 | nullhex, |
|
|||
10 | nullid, |
|
|||
11 | ) |
|
|||
12 |
|
9 | |||
13 | from mercurial import ( |
|
10 | from mercurial import ( | |
14 | encoding, |
|
11 | encoding, | |
15 | error, |
|
12 | error, | |
16 | pycompat, |
|
13 | pycompat, | |
17 | ) |
|
14 | ) | |
18 |
|
15 | |||
19 | from . import gitutil |
|
16 | from . import gitutil | |
20 |
|
17 | |||
21 |
|
18 | |||
22 | pygit2 = gitutil.get_pygit2() |
|
19 | pygit2 = gitutil.get_pygit2() | |
23 |
|
20 | |||
24 | _CURRENT_SCHEMA_VERSION = 1 |
|
21 | _CURRENT_SCHEMA_VERSION = 1 | |
25 | _SCHEMA = ( |
|
22 | _SCHEMA = ( | |
26 | """ |
|
23 | """ | |
27 | CREATE TABLE refs ( |
|
24 | CREATE TABLE refs ( | |
28 | -- node and name are unique together. There may be more than one name for |
|
25 | -- node and name are unique together. There may be more than one name for | |
29 | -- a given node, and there may be no name at all for a given node (in the |
|
26 | -- a given node, and there may be no name at all for a given node (in the | |
30 | -- case of an anonymous hg head). |
|
27 | -- case of an anonymous hg head). | |
31 | node TEXT NOT NULL, |
|
28 | node TEXT NOT NULL, | |
32 | name TEXT |
|
29 | name TEXT | |
33 | ); |
|
30 | ); | |
34 |
|
31 | |||
35 | -- The "possible heads" of the repository, which we use to figure out |
|
32 | -- The "possible heads" of the repository, which we use to figure out | |
36 | -- if we need to re-walk the changelog. |
|
33 | -- if we need to re-walk the changelog. | |
37 | CREATE TABLE possible_heads ( |
|
34 | CREATE TABLE possible_heads ( | |
38 | node TEXT NOT NULL |
|
35 | node TEXT NOT NULL | |
39 | ); |
|
36 | ); | |
40 |
|
37 | |||
41 | -- The topological heads of the changelog, which hg depends on. |
|
38 | -- The topological heads of the changelog, which hg depends on. | |
42 | CREATE TABLE heads ( |
|
39 | CREATE TABLE heads ( | |
43 | node TEXT NOT NULL |
|
40 | node TEXT NOT NULL | |
44 | ); |
|
41 | ); | |
45 |
|
42 | |||
46 | -- A total ordering of the changelog |
|
43 | -- A total ordering of the changelog | |
47 | CREATE TABLE changelog ( |
|
44 | CREATE TABLE changelog ( | |
48 | rev INTEGER NOT NULL PRIMARY KEY, |
|
45 | rev INTEGER NOT NULL PRIMARY KEY, | |
49 | node TEXT NOT NULL, |
|
46 | node TEXT NOT NULL, | |
50 | p1 TEXT, |
|
47 | p1 TEXT, | |
51 | p2 TEXT |
|
48 | p2 TEXT | |
52 | ); |
|
49 | ); | |
53 |
|
50 | |||
54 | CREATE UNIQUE INDEX changelog_node_idx ON changelog(node); |
|
51 | CREATE UNIQUE INDEX changelog_node_idx ON changelog(node); | |
55 | CREATE UNIQUE INDEX changelog_node_rev_idx ON changelog(rev, node); |
|
52 | CREATE UNIQUE INDEX changelog_node_rev_idx ON changelog(rev, node); | |
56 |
|
53 | |||
57 | -- Changed files for each commit, which lets us dynamically build |
|
54 | -- Changed files for each commit, which lets us dynamically build | |
58 | -- filelogs. |
|
55 | -- filelogs. | |
59 | CREATE TABLE changedfiles ( |
|
56 | CREATE TABLE changedfiles ( | |
60 | node TEXT NOT NULL, |
|
57 | node TEXT NOT NULL, | |
61 | filename TEXT NOT NULL, |
|
58 | filename TEXT NOT NULL, | |
62 | -- 40 zeroes for deletions |
|
59 | -- 40 zeroes for deletions | |
63 | filenode TEXT NOT NULL, |
|
60 | filenode TEXT NOT NULL, | |
64 | -- to handle filelog parentage: |
|
61 | -- to handle filelog parentage: | |
65 | p1node TEXT, |
|
62 | p1node TEXT, | |
66 | p1filenode TEXT, |
|
63 | p1filenode TEXT, | |
67 | p2node TEXT, |
|
64 | p2node TEXT, | |
68 | p2filenode TEXT |
|
65 | p2filenode TEXT | |
69 | ); |
|
66 | ); | |
70 |
|
67 | |||
71 | CREATE INDEX changedfiles_nodes_idx |
|
68 | CREATE INDEX changedfiles_nodes_idx | |
72 | ON changedfiles(node); |
|
69 | ON changedfiles(node); | |
73 |
|
70 | |||
74 | PRAGMA user_version=%d |
|
71 | PRAGMA user_version=%d | |
75 | """ |
|
72 | """ | |
76 | % _CURRENT_SCHEMA_VERSION |
|
73 | % _CURRENT_SCHEMA_VERSION | |
77 | ) |
|
74 | ) | |
78 |
|
75 | |||
79 |
|
76 | |||
80 | def _createdb(path): |
|
77 | def _createdb(path): | |
81 | # print('open db', path) |
|
78 | # print('open db', path) | |
82 | # import traceback |
|
79 | # import traceback | |
83 | # traceback.print_stack() |
|
80 | # traceback.print_stack() | |
84 | db = sqlite3.connect(encoding.strfromlocal(path)) |
|
81 | db = sqlite3.connect(encoding.strfromlocal(path)) | |
85 | db.text_factory = bytes |
|
82 | db.text_factory = bytes | |
86 |
|
83 | |||
87 | res = db.execute('PRAGMA user_version').fetchone()[0] |
|
84 | res = db.execute('PRAGMA user_version').fetchone()[0] | |
88 |
|
85 | |||
89 | # New database. |
|
86 | # New database. | |
90 | if res == 0: |
|
87 | if res == 0: | |
91 | for statement in _SCHEMA.split(';'): |
|
88 | for statement in _SCHEMA.split(';'): | |
92 | db.execute(statement.strip()) |
|
89 | db.execute(statement.strip()) | |
93 |
|
90 | |||
94 | db.commit() |
|
91 | db.commit() | |
95 |
|
92 | |||
96 | elif res == _CURRENT_SCHEMA_VERSION: |
|
93 | elif res == _CURRENT_SCHEMA_VERSION: | |
97 | pass |
|
94 | pass | |
98 |
|
95 | |||
99 | else: |
|
96 | else: | |
100 | raise error.Abort(_(b'sqlite database has unrecognized version')) |
|
97 | raise error.Abort(_(b'sqlite database has unrecognized version')) | |
101 |
|
98 | |||
102 | db.execute('PRAGMA journal_mode=WAL') |
|
99 | db.execute('PRAGMA journal_mode=WAL') | |
103 |
|
100 | |||
104 | return db |
|
101 | return db | |
105 |
|
102 | |||
106 |
|
103 | |||
107 | _OUR_ORDER = () |
|
104 | _OUR_ORDER = () | |
108 | if pygit2: |
|
105 | if pygit2: | |
109 | _OUR_ORDER = ( |
|
106 | _OUR_ORDER = ( | |
110 | pygit2.GIT_SORT_TOPOLOGICAL |
|
107 | pygit2.GIT_SORT_TOPOLOGICAL | |
111 | | pygit2.GIT_SORT_TIME |
|
108 | | pygit2.GIT_SORT_TIME | |
112 | | pygit2.GIT_SORT_REVERSE |
|
109 | | pygit2.GIT_SORT_REVERSE | |
113 | ) |
|
110 | ) | |
114 |
|
111 | |||
115 | _DIFF_FLAGS = 1 << 21 # GIT_DIFF_FORCE_BINARY, which isn't exposed by pygit2 |
|
112 | _DIFF_FLAGS = 1 << 21 # GIT_DIFF_FORCE_BINARY, which isn't exposed by pygit2 | |
116 |
|
113 | |||
117 |
|
114 | |||
118 | def _find_nearest_ancestor_introducing_node( |
|
115 | def _find_nearest_ancestor_introducing_node( | |
119 | db, gitrepo, file_path, walk_start, filenode |
|
116 | db, gitrepo, file_path, walk_start, filenode | |
120 | ): |
|
117 | ): | |
121 | """Find the nearest ancestor that introduces a file node. |
|
118 | """Find the nearest ancestor that introduces a file node. | |
122 |
|
119 | |||
123 | Args: |
|
120 | Args: | |
124 | db: a handle to our sqlite database. |
|
121 | db: a handle to our sqlite database. | |
125 | gitrepo: A pygit2.Repository instance. |
|
122 | gitrepo: A pygit2.Repository instance. | |
126 | file_path: the path of a file in the repo |
|
123 | file_path: the path of a file in the repo | |
127 | walk_start: a pygit2.Oid that is a commit where we should start walking |
|
124 | walk_start: a pygit2.Oid that is a commit where we should start walking | |
128 | for our nearest ancestor. |
|
125 | for our nearest ancestor. | |
129 |
|
126 | |||
130 | Returns: |
|
127 | Returns: | |
131 | A hexlified SHA that is the commit ID of the next-nearest parent. |
|
128 | A hexlified SHA that is the commit ID of the next-nearest parent. | |
132 | """ |
|
129 | """ | |
133 | assert isinstance(file_path, str), 'file_path must be str, got %r' % type( |
|
130 | assert isinstance(file_path, str), 'file_path must be str, got %r' % type( | |
134 | file_path |
|
131 | file_path | |
135 | ) |
|
132 | ) | |
136 | assert isinstance(filenode, str), 'filenode must be str, got %r' % type( |
|
133 | assert isinstance(filenode, str), 'filenode must be str, got %r' % type( | |
137 | filenode |
|
134 | filenode | |
138 | ) |
|
135 | ) | |
139 | parent_options = { |
|
136 | parent_options = { | |
140 | row[0].decode('ascii') |
|
137 | row[0].decode('ascii') | |
141 | for row in db.execute( |
|
138 | for row in db.execute( | |
142 | 'SELECT node FROM changedfiles ' |
|
139 | 'SELECT node FROM changedfiles ' | |
143 | 'WHERE filename = ? AND filenode = ?', |
|
140 | 'WHERE filename = ? AND filenode = ?', | |
144 | (file_path, filenode), |
|
141 | (file_path, filenode), | |
145 | ) |
|
142 | ) | |
146 | } |
|
143 | } | |
147 | inner_walker = gitrepo.walk(walk_start, _OUR_ORDER) |
|
144 | inner_walker = gitrepo.walk(walk_start, _OUR_ORDER) | |
148 | for w in inner_walker: |
|
145 | for w in inner_walker: | |
149 | if w.id.hex in parent_options: |
|
146 | if w.id.hex in parent_options: | |
150 | return w.id.hex |
|
147 | return w.id.hex | |
151 | raise error.ProgrammingError( |
|
148 | raise error.ProgrammingError( | |
152 | 'Unable to find introducing commit for %s node %s from %s', |
|
149 | 'Unable to find introducing commit for %s node %s from %s', | |
153 | (file_path, filenode, walk_start), |
|
150 | (file_path, filenode, walk_start), | |
154 | ) |
|
151 | ) | |
155 |
|
152 | |||
156 |
|
153 | |||
157 | def fill_in_filelog(gitrepo, db, startcommit, path, startfilenode): |
|
154 | def fill_in_filelog(gitrepo, db, startcommit, path, startfilenode): | |
158 | """Given a starting commit and path, fill in a filelog's parent pointers. |
|
155 | """Given a starting commit and path, fill in a filelog's parent pointers. | |
159 |
|
156 | |||
160 | Args: |
|
157 | Args: | |
161 | gitrepo: a pygit2.Repository |
|
158 | gitrepo: a pygit2.Repository | |
162 | db: a handle to our sqlite database |
|
159 | db: a handle to our sqlite database | |
163 | startcommit: a hexlified node id for the commit to start at |
|
160 | startcommit: a hexlified node id for the commit to start at | |
164 | path: the path of the file whose parent pointers we should fill in. |
|
161 | path: the path of the file whose parent pointers we should fill in. | |
165 | filenode: the hexlified node id of the file at startcommit |
|
162 | filenode: the hexlified node id of the file at startcommit | |
166 |
|
163 | |||
167 | TODO: make filenode optional |
|
164 | TODO: make filenode optional | |
168 | """ |
|
165 | """ | |
169 | assert isinstance( |
|
166 | assert isinstance( | |
170 | startcommit, str |
|
167 | startcommit, str | |
171 | ), 'startcommit must be str, got %r' % type(startcommit) |
|
168 | ), 'startcommit must be str, got %r' % type(startcommit) | |
172 | assert isinstance( |
|
169 | assert isinstance( | |
173 | startfilenode, str |
|
170 | startfilenode, str | |
174 | ), 'startfilenode must be str, got %r' % type(startfilenode) |
|
171 | ), 'startfilenode must be str, got %r' % type(startfilenode) | |
175 | visit = collections.deque([(startcommit, startfilenode)]) |
|
172 | visit = collections.deque([(startcommit, startfilenode)]) | |
176 | while visit: |
|
173 | while visit: | |
177 | cnode, filenode = visit.popleft() |
|
174 | cnode, filenode = visit.popleft() | |
178 | commit = gitrepo[cnode] |
|
175 | commit = gitrepo[cnode] | |
179 | parents = [] |
|
176 | parents = [] | |
180 | for parent in commit.parents: |
|
177 | for parent in commit.parents: | |
181 | t = parent.tree |
|
178 | t = parent.tree | |
182 | for comp in path.split('/'): |
|
179 | for comp in path.split('/'): | |
183 | try: |
|
180 | try: | |
184 | t = gitrepo[t[comp].id] |
|
181 | t = gitrepo[t[comp].id] | |
185 | except KeyError: |
|
182 | except KeyError: | |
186 | break |
|
183 | break | |
187 | else: |
|
184 | else: | |
188 | introducer = _find_nearest_ancestor_introducing_node( |
|
185 | introducer = _find_nearest_ancestor_introducing_node( | |
189 | db, gitrepo, path, parent.id, t.id.hex |
|
186 | db, gitrepo, path, parent.id, t.id.hex | |
190 | ) |
|
187 | ) | |
191 | parents.append((introducer, t.id.hex)) |
|
188 | parents.append((introducer, t.id.hex)) | |
192 | p1node = p1fnode = p2node = p2fnode = gitutil.nullgit |
|
189 | p1node = p1fnode = p2node = p2fnode = gitutil.nullgit | |
193 | for par, parfnode in parents: |
|
190 | for par, parfnode in parents: | |
194 | found = int( |
|
191 | found = int( | |
195 | db.execute( |
|
192 | db.execute( | |
196 | 'SELECT COUNT(*) FROM changedfiles WHERE ' |
|
193 | 'SELECT COUNT(*) FROM changedfiles WHERE ' | |
197 | 'node = ? AND filename = ? AND filenode = ? AND ' |
|
194 | 'node = ? AND filename = ? AND filenode = ? AND ' | |
198 | 'p1node NOT NULL', |
|
195 | 'p1node NOT NULL', | |
199 | (par, path, parfnode), |
|
196 | (par, path, parfnode), | |
200 | ).fetchone()[0] |
|
197 | ).fetchone()[0] | |
201 | ) |
|
198 | ) | |
202 | if found == 0: |
|
199 | if found == 0: | |
203 | assert par is not None |
|
200 | assert par is not None | |
204 | visit.append((par, parfnode)) |
|
201 | visit.append((par, parfnode)) | |
205 | if parents: |
|
202 | if parents: | |
206 | p1node, p1fnode = parents[0] |
|
203 | p1node, p1fnode = parents[0] | |
207 | if len(parents) == 2: |
|
204 | if len(parents) == 2: | |
208 | p2node, p2fnode = parents[1] |
|
205 | p2node, p2fnode = parents[1] | |
209 | if len(parents) > 2: |
|
206 | if len(parents) > 2: | |
210 | raise error.ProgrammingError( |
|
207 | raise error.ProgrammingError( | |
211 | b"git support can't handle octopus merges" |
|
208 | b"git support can't handle octopus merges" | |
212 | ) |
|
209 | ) | |
213 | db.execute( |
|
210 | db.execute( | |
214 | 'UPDATE changedfiles SET ' |
|
211 | 'UPDATE changedfiles SET ' | |
215 | 'p1node = ?, p1filenode = ?, p2node = ?, p2filenode = ? ' |
|
212 | 'p1node = ?, p1filenode = ?, p2node = ?, p2filenode = ? ' | |
216 | 'WHERE node = ? AND filename = ? AND filenode = ?', |
|
213 | 'WHERE node = ? AND filename = ? AND filenode = ?', | |
217 | (p1node, p1fnode, p2node, p2fnode, commit.id.hex, path, filenode), |
|
214 | (p1node, p1fnode, p2node, p2fnode, commit.id.hex, path, filenode), | |
218 | ) |
|
215 | ) | |
219 | db.commit() |
|
216 | db.commit() | |
220 |
|
217 | |||
221 |
|
218 | |||
222 | def _index_repo( |
|
219 | def _index_repo( | |
223 | gitrepo, |
|
220 | gitrepo, | |
224 | db, |
|
221 | db, | |
225 | logfn=lambda x: None, |
|
222 | logfn=lambda x: None, | |
226 | progress_factory=lambda *args, **kwargs: None, |
|
223 | progress_factory=lambda *args, **kwargs: None, | |
227 | ): |
|
224 | ): | |
228 | # Identify all references so we can tell the walker to visit all of them. |
|
225 | # Identify all references so we can tell the walker to visit all of them. | |
229 | all_refs = gitrepo.listall_references() |
|
226 | all_refs = gitrepo.listall_references() | |
230 | possible_heads = set() |
|
227 | possible_heads = set() | |
231 | prog = progress_factory(b'refs') |
|
228 | prog = progress_factory(b'refs') | |
232 | for pos, ref in enumerate(all_refs): |
|
229 | for pos, ref in enumerate(all_refs): | |
233 | if prog is not None: |
|
230 | if prog is not None: | |
234 | prog.update(pos) |
|
231 | prog.update(pos) | |
235 | if not ( |
|
232 | if not ( | |
236 | ref.startswith('refs/heads/') # local branch |
|
233 | ref.startswith('refs/heads/') # local branch | |
237 | or ref.startswith('refs/tags/') # tag |
|
234 | or ref.startswith('refs/tags/') # tag | |
238 | or ref.startswith('refs/remotes/') # remote branch |
|
235 | or ref.startswith('refs/remotes/') # remote branch | |
239 | or ref.startswith('refs/hg/') # from this extension |
|
236 | or ref.startswith('refs/hg/') # from this extension | |
240 | ): |
|
237 | ): | |
241 | continue |
|
238 | continue | |
242 | try: |
|
239 | try: | |
243 | start = gitrepo.lookup_reference(ref).peel(pygit2.GIT_OBJ_COMMIT) |
|
240 | start = gitrepo.lookup_reference(ref).peel(pygit2.GIT_OBJ_COMMIT) | |
244 | except ValueError: |
|
241 | except ValueError: | |
245 | # No commit to be found, so we don't care for hg's purposes. |
|
242 | # No commit to be found, so we don't care for hg's purposes. | |
246 | continue |
|
243 | continue | |
247 | possible_heads.add(start.id) |
|
244 | possible_heads.add(start.id) | |
248 | # Optimization: if the list of heads hasn't changed, don't |
|
245 | # Optimization: if the list of heads hasn't changed, don't | |
249 | # reindex, the changelog. This doesn't matter on small |
|
246 | # reindex, the changelog. This doesn't matter on small | |
250 | # repositories, but on even moderately deep histories (eg cpython) |
|
247 | # repositories, but on even moderately deep histories (eg cpython) | |
251 | # this is a very important performance win. |
|
248 | # this is a very important performance win. | |
252 | # |
|
249 | # | |
253 | # TODO: we should figure out how to incrementally index history |
|
250 | # TODO: we should figure out how to incrementally index history | |
254 | # (preferably by detecting rewinds!) so that we don't have to do a |
|
251 | # (preferably by detecting rewinds!) so that we don't have to do a | |
255 | # full changelog walk every time a new commit is created. |
|
252 | # full changelog walk every time a new commit is created. | |
256 | cache_heads = { |
|
253 | cache_heads = { | |
257 | pycompat.sysstr(x[0]) |
|
254 | pycompat.sysstr(x[0]) | |
258 | for x in db.execute('SELECT node FROM possible_heads') |
|
255 | for x in db.execute('SELECT node FROM possible_heads') | |
259 | } |
|
256 | } | |
260 | walker = None |
|
257 | walker = None | |
261 | cur_cache_heads = {h.hex for h in possible_heads} |
|
258 | cur_cache_heads = {h.hex for h in possible_heads} | |
262 | if cur_cache_heads == cache_heads: |
|
259 | if cur_cache_heads == cache_heads: | |
263 | return |
|
260 | return | |
264 | logfn(b'heads mismatch, rebuilding dagcache\n') |
|
261 | logfn(b'heads mismatch, rebuilding dagcache\n') | |
265 | for start in possible_heads: |
|
262 | for start in possible_heads: | |
266 | if walker is None: |
|
263 | if walker is None: | |
267 | walker = gitrepo.walk(start, _OUR_ORDER) |
|
264 | walker = gitrepo.walk(start, _OUR_ORDER) | |
268 | else: |
|
265 | else: | |
269 | walker.push(start) |
|
266 | walker.push(start) | |
270 |
|
267 | |||
271 | # Empty out the existing changelog. Even for large-ish histories |
|
268 | # Empty out the existing changelog. Even for large-ish histories | |
272 | # we can do the top-level "walk all the commits" dance very |
|
269 | # we can do the top-level "walk all the commits" dance very | |
273 | # quickly as long as we don't need to figure out the changed files |
|
270 | # quickly as long as we don't need to figure out the changed files | |
274 | # list. |
|
271 | # list. | |
275 | db.execute('DELETE FROM changelog') |
|
272 | db.execute('DELETE FROM changelog') | |
276 | if prog is not None: |
|
273 | if prog is not None: | |
277 | prog.complete() |
|
274 | prog.complete() | |
278 | prog = progress_factory(b'commits') |
|
275 | prog = progress_factory(b'commits') | |
279 | # This walker is sure to visit all the revisions in history, but |
|
276 | # This walker is sure to visit all the revisions in history, but | |
280 | # only once. |
|
277 | # only once. | |
281 | for pos, commit in enumerate(walker): |
|
278 | for pos, commit in enumerate(walker): | |
282 | if prog is not None: |
|
279 | if prog is not None: | |
283 | prog.update(pos) |
|
280 | prog.update(pos) | |
284 | p1 = p2 = nullhex |
|
281 | p1 = p2 = sha1nodeconstants.nullhex | |
285 | if len(commit.parents) > 2: |
|
282 | if len(commit.parents) > 2: | |
286 | raise error.ProgrammingError( |
|
283 | raise error.ProgrammingError( | |
287 | ( |
|
284 | ( | |
288 | b"git support can't handle octopus merges, " |
|
285 | b"git support can't handle octopus merges, " | |
289 | b"found a commit with %d parents :(" |
|
286 | b"found a commit with %d parents :(" | |
290 | ) |
|
287 | ) | |
291 | % len(commit.parents) |
|
288 | % len(commit.parents) | |
292 | ) |
|
289 | ) | |
293 | if commit.parents: |
|
290 | if commit.parents: | |
294 | p1 = commit.parents[0].id.hex |
|
291 | p1 = commit.parents[0].id.hex | |
295 | if len(commit.parents) == 2: |
|
292 | if len(commit.parents) == 2: | |
296 | p2 = commit.parents[1].id.hex |
|
293 | p2 = commit.parents[1].id.hex | |
297 | db.execute( |
|
294 | db.execute( | |
298 | 'INSERT INTO changelog (rev, node, p1, p2) VALUES(?, ?, ?, ?)', |
|
295 | 'INSERT INTO changelog (rev, node, p1, p2) VALUES(?, ?, ?, ?)', | |
299 | (pos, commit.id.hex, p1, p2), |
|
296 | (pos, commit.id.hex, p1, p2), | |
300 | ) |
|
297 | ) | |
301 |
|
298 | |||
302 | num_changedfiles = db.execute( |
|
299 | num_changedfiles = db.execute( | |
303 | "SELECT COUNT(*) from changedfiles WHERE node = ?", |
|
300 | "SELECT COUNT(*) from changedfiles WHERE node = ?", | |
304 | (commit.id.hex,), |
|
301 | (commit.id.hex,), | |
305 | ).fetchone()[0] |
|
302 | ).fetchone()[0] | |
306 | if not num_changedfiles: |
|
303 | if not num_changedfiles: | |
307 | files = {} |
|
304 | files = {} | |
308 | # I *think* we only need to check p1 for changed files |
|
305 | # I *think* we only need to check p1 for changed files | |
309 | # (and therefore linkrevs), because any node that would |
|
306 | # (and therefore linkrevs), because any node that would | |
310 | # actually have this commit as a linkrev would be |
|
307 | # actually have this commit as a linkrev would be | |
311 | # completely new in this rev. |
|
308 | # completely new in this rev. | |
312 | p1 = commit.parents[0].id.hex if commit.parents else None |
|
309 | p1 = commit.parents[0].id.hex if commit.parents else None | |
313 | if p1 is not None: |
|
310 | if p1 is not None: | |
314 | patchgen = gitrepo.diff(p1, commit.id.hex, flags=_DIFF_FLAGS) |
|
311 | patchgen = gitrepo.diff(p1, commit.id.hex, flags=_DIFF_FLAGS) | |
315 | else: |
|
312 | else: | |
316 | patchgen = commit.tree.diff_to_tree( |
|
313 | patchgen = commit.tree.diff_to_tree( | |
317 | swap=True, flags=_DIFF_FLAGS |
|
314 | swap=True, flags=_DIFF_FLAGS | |
318 | ) |
|
315 | ) | |
319 | new_files = (p.delta.new_file for p in patchgen) |
|
316 | new_files = (p.delta.new_file for p in patchgen) | |
320 | files = { |
|
317 | files = { | |
321 |
nf.path: nf.id.hex |
|
318 | nf.path: nf.id.hex | |
|
319 | for nf in new_files | |||
|
320 | if nf.id.raw != sha1nodeconstants.nullid | |||
322 | } |
|
321 | } | |
323 | for p, n in files.items(): |
|
322 | for p, n in files.items(): | |
324 | # We intentionally set NULLs for any file parentage |
|
323 | # We intentionally set NULLs for any file parentage | |
325 | # information so it'll get demand-computed later. We |
|
324 | # information so it'll get demand-computed later. We | |
326 | # used to do it right here, and it was _very_ slow. |
|
325 | # used to do it right here, and it was _very_ slow. | |
327 | db.execute( |
|
326 | db.execute( | |
328 | 'INSERT INTO changedfiles (' |
|
327 | 'INSERT INTO changedfiles (' | |
329 | 'node, filename, filenode, p1node, p1filenode, p2node, ' |
|
328 | 'node, filename, filenode, p1node, p1filenode, p2node, ' | |
330 | 'p2filenode) VALUES(?, ?, ?, ?, ?, ?, ?)', |
|
329 | 'p2filenode) VALUES(?, ?, ?, ?, ?, ?, ?)', | |
331 | (commit.id.hex, p, n, None, None, None, None), |
|
330 | (commit.id.hex, p, n, None, None, None, None), | |
332 | ) |
|
331 | ) | |
333 | db.execute('DELETE FROM heads') |
|
332 | db.execute('DELETE FROM heads') | |
334 | db.execute('DELETE FROM possible_heads') |
|
333 | db.execute('DELETE FROM possible_heads') | |
335 | for hid in possible_heads: |
|
334 | for hid in possible_heads: | |
336 | h = hid.hex |
|
335 | h = hid.hex | |
337 | db.execute('INSERT INTO possible_heads (node) VALUES(?)', (h,)) |
|
336 | db.execute('INSERT INTO possible_heads (node) VALUES(?)', (h,)) | |
338 | haschild = db.execute( |
|
337 | haschild = db.execute( | |
339 | 'SELECT COUNT(*) FROM changelog WHERE p1 = ? OR p2 = ?', (h, h) |
|
338 | 'SELECT COUNT(*) FROM changelog WHERE p1 = ? OR p2 = ?', (h, h) | |
340 | ).fetchone()[0] |
|
339 | ).fetchone()[0] | |
341 | if not haschild: |
|
340 | if not haschild: | |
342 | db.execute('INSERT INTO heads (node) VALUES(?)', (h,)) |
|
341 | db.execute('INSERT INTO heads (node) VALUES(?)', (h,)) | |
343 |
|
342 | |||
344 | db.commit() |
|
343 | db.commit() | |
345 | if prog is not None: |
|
344 | if prog is not None: | |
346 | prog.complete() |
|
345 | prog.complete() | |
347 |
|
346 | |||
348 |
|
347 | |||
349 | def get_index( |
|
348 | def get_index( | |
350 | gitrepo, logfn=lambda x: None, progress_factory=lambda *args, **kwargs: None |
|
349 | gitrepo, logfn=lambda x: None, progress_factory=lambda *args, **kwargs: None | |
351 | ): |
|
350 | ): | |
352 | cachepath = os.path.join( |
|
351 | cachepath = os.path.join( | |
353 | pycompat.fsencode(gitrepo.path), b'..', b'.hg', b'cache' |
|
352 | pycompat.fsencode(gitrepo.path), b'..', b'.hg', b'cache' | |
354 | ) |
|
353 | ) | |
355 | if not os.path.exists(cachepath): |
|
354 | if not os.path.exists(cachepath): | |
356 | os.makedirs(cachepath) |
|
355 | os.makedirs(cachepath) | |
357 | dbpath = os.path.join(cachepath, b'git-commits.sqlite') |
|
356 | dbpath = os.path.join(cachepath, b'git-commits.sqlite') | |
358 | db = _createdb(dbpath) |
|
357 | db = _createdb(dbpath) | |
359 | # TODO check against gitrepo heads before doing a full index |
|
358 | # TODO check against gitrepo heads before doing a full index | |
360 | # TODO thread a ui.progress call into this layer |
|
359 | # TODO thread a ui.progress call into this layer | |
361 | _index_repo(gitrepo, db, logfn, progress_factory) |
|
360 | _index_repo(gitrepo, db, logfn, progress_factory) | |
362 | return db |
|
361 | return db |
@@ -1,390 +1,391 b'' | |||||
1 | # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org> |
|
1 | # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org> | |
2 | # |
|
2 | # | |
3 | # This software may be used and distributed according to the terms of the |
|
3 | # This software may be used and distributed according to the terms of the | |
4 | # GNU General Public License version 2 or any later version. |
|
4 | # GNU General Public License version 2 or any later version. | |
5 |
|
5 | |||
6 | '''commands to sign and verify changesets''' |
|
6 | '''commands to sign and verify changesets''' | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import binascii |
|
10 | import binascii | |
11 | import os |
|
11 | import os | |
12 |
|
12 | |||
13 | from mercurial.i18n import _ |
|
13 | from mercurial.i18n import _ | |
14 | from mercurial.node import ( |
|
14 | from mercurial.node import ( | |
15 | bin, |
|
15 | bin, | |
16 | hex, |
|
16 | hex, | |
17 | nullid, |
|
|||
18 | short, |
|
17 | short, | |
19 | ) |
|
18 | ) | |
20 | from mercurial import ( |
|
19 | from mercurial import ( | |
21 | cmdutil, |
|
20 | cmdutil, | |
22 | error, |
|
21 | error, | |
23 | help, |
|
22 | help, | |
24 | match, |
|
23 | match, | |
25 | pycompat, |
|
24 | pycompat, | |
26 | registrar, |
|
25 | registrar, | |
27 | ) |
|
26 | ) | |
28 | from mercurial.utils import ( |
|
27 | from mercurial.utils import ( | |
29 | dateutil, |
|
28 | dateutil, | |
30 | procutil, |
|
29 | procutil, | |
31 | ) |
|
30 | ) | |
32 |
|
31 | |||
33 | cmdtable = {} |
|
32 | cmdtable = {} | |
34 | command = registrar.command(cmdtable) |
|
33 | command = registrar.command(cmdtable) | |
35 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
34 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
36 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
35 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
37 | # be specifying the version(s) of Mercurial they are tested with, or |
|
36 | # be specifying the version(s) of Mercurial they are tested with, or | |
38 | # leave the attribute unspecified. |
|
37 | # leave the attribute unspecified. | |
39 | testedwith = b'ships-with-hg-core' |
|
38 | testedwith = b'ships-with-hg-core' | |
40 |
|
39 | |||
41 | configtable = {} |
|
40 | configtable = {} | |
42 | configitem = registrar.configitem(configtable) |
|
41 | configitem = registrar.configitem(configtable) | |
43 |
|
42 | |||
44 | configitem( |
|
43 | configitem( | |
45 | b'gpg', |
|
44 | b'gpg', | |
46 | b'cmd', |
|
45 | b'cmd', | |
47 | default=b'gpg', |
|
46 | default=b'gpg', | |
48 | ) |
|
47 | ) | |
49 | configitem( |
|
48 | configitem( | |
50 | b'gpg', |
|
49 | b'gpg', | |
51 | b'key', |
|
50 | b'key', | |
52 | default=None, |
|
51 | default=None, | |
53 | ) |
|
52 | ) | |
54 | configitem( |
|
53 | configitem( | |
55 | b'gpg', |
|
54 | b'gpg', | |
56 | b'.*', |
|
55 | b'.*', | |
57 | default=None, |
|
56 | default=None, | |
58 | generic=True, |
|
57 | generic=True, | |
59 | ) |
|
58 | ) | |
60 |
|
59 | |||
61 | # Custom help category |
|
60 | # Custom help category | |
62 | _HELP_CATEGORY = b'gpg' |
|
61 | _HELP_CATEGORY = b'gpg' | |
63 | help.CATEGORY_ORDER.insert( |
|
62 | help.CATEGORY_ORDER.insert( | |
64 | help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), _HELP_CATEGORY |
|
63 | help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), _HELP_CATEGORY | |
65 | ) |
|
64 | ) | |
66 | help.CATEGORY_NAMES[_HELP_CATEGORY] = b'Signing changes (GPG)' |
|
65 | help.CATEGORY_NAMES[_HELP_CATEGORY] = b'Signing changes (GPG)' | |
67 |
|
66 | |||
68 |
|
67 | |||
69 | class gpg(object): |
|
68 | class gpg(object): | |
70 | def __init__(self, path, key=None): |
|
69 | def __init__(self, path, key=None): | |
71 | self.path = path |
|
70 | self.path = path | |
72 | self.key = (key and b" --local-user \"%s\"" % key) or b"" |
|
71 | self.key = (key and b" --local-user \"%s\"" % key) or b"" | |
73 |
|
72 | |||
74 | def sign(self, data): |
|
73 | def sign(self, data): | |
75 | gpgcmd = b"%s --sign --detach-sign%s" % (self.path, self.key) |
|
74 | gpgcmd = b"%s --sign --detach-sign%s" % (self.path, self.key) | |
76 | return procutil.filter(data, gpgcmd) |
|
75 | return procutil.filter(data, gpgcmd) | |
77 |
|
76 | |||
78 | def verify(self, data, sig): |
|
77 | def verify(self, data, sig): | |
79 | """ returns of the good and bad signatures""" |
|
78 | """ returns of the good and bad signatures""" | |
80 | sigfile = datafile = None |
|
79 | sigfile = datafile = None | |
81 | try: |
|
80 | try: | |
82 | # create temporary files |
|
81 | # create temporary files | |
83 | fd, sigfile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".sig") |
|
82 | fd, sigfile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".sig") | |
84 | fp = os.fdopen(fd, 'wb') |
|
83 | fp = os.fdopen(fd, 'wb') | |
85 | fp.write(sig) |
|
84 | fp.write(sig) | |
86 | fp.close() |
|
85 | fp.close() | |
87 | fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt") |
|
86 | fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt") | |
88 | fp = os.fdopen(fd, 'wb') |
|
87 | fp = os.fdopen(fd, 'wb') | |
89 | fp.write(data) |
|
88 | fp.write(data) | |
90 | fp.close() |
|
89 | fp.close() | |
91 | gpgcmd = ( |
|
90 | gpgcmd = ( | |
92 | b"%s --logger-fd 1 --status-fd 1 --verify \"%s\" \"%s\"" |
|
91 | b"%s --logger-fd 1 --status-fd 1 --verify \"%s\" \"%s\"" | |
93 | % ( |
|
92 | % ( | |
94 | self.path, |
|
93 | self.path, | |
95 | sigfile, |
|
94 | sigfile, | |
96 | datafile, |
|
95 | datafile, | |
97 | ) |
|
96 | ) | |
98 | ) |
|
97 | ) | |
99 | ret = procutil.filter(b"", gpgcmd) |
|
98 | ret = procutil.filter(b"", gpgcmd) | |
100 | finally: |
|
99 | finally: | |
101 | for f in (sigfile, datafile): |
|
100 | for f in (sigfile, datafile): | |
102 | try: |
|
101 | try: | |
103 | if f: |
|
102 | if f: | |
104 | os.unlink(f) |
|
103 | os.unlink(f) | |
105 | except OSError: |
|
104 | except OSError: | |
106 | pass |
|
105 | pass | |
107 | keys = [] |
|
106 | keys = [] | |
108 | key, fingerprint = None, None |
|
107 | key, fingerprint = None, None | |
109 | for l in ret.splitlines(): |
|
108 | for l in ret.splitlines(): | |
110 | # see DETAILS in the gnupg documentation |
|
109 | # see DETAILS in the gnupg documentation | |
111 | # filter the logger output |
|
110 | # filter the logger output | |
112 | if not l.startswith(b"[GNUPG:]"): |
|
111 | if not l.startswith(b"[GNUPG:]"): | |
113 | continue |
|
112 | continue | |
114 | l = l[9:] |
|
113 | l = l[9:] | |
115 | if l.startswith(b"VALIDSIG"): |
|
114 | if l.startswith(b"VALIDSIG"): | |
116 | # fingerprint of the primary key |
|
115 | # fingerprint of the primary key | |
117 | fingerprint = l.split()[10] |
|
116 | fingerprint = l.split()[10] | |
118 | elif l.startswith(b"ERRSIG"): |
|
117 | elif l.startswith(b"ERRSIG"): | |
119 | key = l.split(b" ", 3)[:2] |
|
118 | key = l.split(b" ", 3)[:2] | |
120 | key.append(b"") |
|
119 | key.append(b"") | |
121 | fingerprint = None |
|
120 | fingerprint = None | |
122 | elif ( |
|
121 | elif ( | |
123 | l.startswith(b"GOODSIG") |
|
122 | l.startswith(b"GOODSIG") | |
124 | or l.startswith(b"EXPSIG") |
|
123 | or l.startswith(b"EXPSIG") | |
125 | or l.startswith(b"EXPKEYSIG") |
|
124 | or l.startswith(b"EXPKEYSIG") | |
126 | or l.startswith(b"BADSIG") |
|
125 | or l.startswith(b"BADSIG") | |
127 | ): |
|
126 | ): | |
128 | if key is not None: |
|
127 | if key is not None: | |
129 | keys.append(key + [fingerprint]) |
|
128 | keys.append(key + [fingerprint]) | |
130 | key = l.split(b" ", 2) |
|
129 | key = l.split(b" ", 2) | |
131 | fingerprint = None |
|
130 | fingerprint = None | |
132 | if key is not None: |
|
131 | if key is not None: | |
133 | keys.append(key + [fingerprint]) |
|
132 | keys.append(key + [fingerprint]) | |
134 | return keys |
|
133 | return keys | |
135 |
|
134 | |||
136 |
|
135 | |||
137 | def newgpg(ui, **opts): |
|
136 | def newgpg(ui, **opts): | |
138 | """create a new gpg instance""" |
|
137 | """create a new gpg instance""" | |
139 | gpgpath = ui.config(b"gpg", b"cmd") |
|
138 | gpgpath = ui.config(b"gpg", b"cmd") | |
140 | gpgkey = opts.get('key') |
|
139 | gpgkey = opts.get('key') | |
141 | if not gpgkey: |
|
140 | if not gpgkey: | |
142 | gpgkey = ui.config(b"gpg", b"key") |
|
141 | gpgkey = ui.config(b"gpg", b"key") | |
143 | return gpg(gpgpath, gpgkey) |
|
142 | return gpg(gpgpath, gpgkey) | |
144 |
|
143 | |||
145 |
|
144 | |||
146 | def sigwalk(repo): |
|
145 | def sigwalk(repo): | |
147 | """ |
|
146 | """ | |
148 | walk over every sigs, yields a couple |
|
147 | walk over every sigs, yields a couple | |
149 | ((node, version, sig), (filename, linenumber)) |
|
148 | ((node, version, sig), (filename, linenumber)) | |
150 | """ |
|
149 | """ | |
151 |
|
150 | |||
152 | def parsefile(fileiter, context): |
|
151 | def parsefile(fileiter, context): | |
153 | ln = 1 |
|
152 | ln = 1 | |
154 | for l in fileiter: |
|
153 | for l in fileiter: | |
155 | if not l: |
|
154 | if not l: | |
156 | continue |
|
155 | continue | |
157 | yield (l.split(b" ", 2), (context, ln)) |
|
156 | yield (l.split(b" ", 2), (context, ln)) | |
158 | ln += 1 |
|
157 | ln += 1 | |
159 |
|
158 | |||
160 | # read the heads |
|
159 | # read the heads | |
161 | fl = repo.file(b".hgsigs") |
|
160 | fl = repo.file(b".hgsigs") | |
162 | for r in reversed(fl.heads()): |
|
161 | for r in reversed(fl.heads()): | |
163 | fn = b".hgsigs|%s" % short(r) |
|
162 | fn = b".hgsigs|%s" % short(r) | |
164 | for item in parsefile(fl.read(r).splitlines(), fn): |
|
163 | for item in parsefile(fl.read(r).splitlines(), fn): | |
165 | yield item |
|
164 | yield item | |
166 | try: |
|
165 | try: | |
167 | # read local signatures |
|
166 | # read local signatures | |
168 | fn = b"localsigs" |
|
167 | fn = b"localsigs" | |
169 | for item in parsefile(repo.vfs(fn), fn): |
|
168 | for item in parsefile(repo.vfs(fn), fn): | |
170 | yield item |
|
169 | yield item | |
171 | except IOError: |
|
170 | except IOError: | |
172 | pass |
|
171 | pass | |
173 |
|
172 | |||
174 |
|
173 | |||
175 | def getkeys(ui, repo, mygpg, sigdata, context): |
|
174 | def getkeys(ui, repo, mygpg, sigdata, context): | |
176 | """get the keys who signed a data""" |
|
175 | """get the keys who signed a data""" | |
177 | fn, ln = context |
|
176 | fn, ln = context | |
178 | node, version, sig = sigdata |
|
177 | node, version, sig = sigdata | |
179 | prefix = b"%s:%d" % (fn, ln) |
|
178 | prefix = b"%s:%d" % (fn, ln) | |
180 | node = bin(node) |
|
179 | node = bin(node) | |
181 |
|
180 | |||
182 | data = node2txt(repo, node, version) |
|
181 | data = node2txt(repo, node, version) | |
183 | sig = binascii.a2b_base64(sig) |
|
182 | sig = binascii.a2b_base64(sig) | |
184 | keys = mygpg.verify(data, sig) |
|
183 | keys = mygpg.verify(data, sig) | |
185 |
|
184 | |||
186 | validkeys = [] |
|
185 | validkeys = [] | |
187 | # warn for expired key and/or sigs |
|
186 | # warn for expired key and/or sigs | |
188 | for key in keys: |
|
187 | for key in keys: | |
189 | if key[0] == b"ERRSIG": |
|
188 | if key[0] == b"ERRSIG": | |
190 | ui.write(_(b"%s Unknown key ID \"%s\"\n") % (prefix, key[1])) |
|
189 | ui.write(_(b"%s Unknown key ID \"%s\"\n") % (prefix, key[1])) | |
191 | continue |
|
190 | continue | |
192 | if key[0] == b"BADSIG": |
|
191 | if key[0] == b"BADSIG": | |
193 | ui.write(_(b"%s Bad signature from \"%s\"\n") % (prefix, key[2])) |
|
192 | ui.write(_(b"%s Bad signature from \"%s\"\n") % (prefix, key[2])) | |
194 | continue |
|
193 | continue | |
195 | if key[0] == b"EXPSIG": |
|
194 | if key[0] == b"EXPSIG": | |
196 | ui.write( |
|
195 | ui.write( | |
197 | _(b"%s Note: Signature has expired (signed by: \"%s\")\n") |
|
196 | _(b"%s Note: Signature has expired (signed by: \"%s\")\n") | |
198 | % (prefix, key[2]) |
|
197 | % (prefix, key[2]) | |
199 | ) |
|
198 | ) | |
200 | elif key[0] == b"EXPKEYSIG": |
|
199 | elif key[0] == b"EXPKEYSIG": | |
201 | ui.write( |
|
200 | ui.write( | |
202 | _(b"%s Note: This key has expired (signed by: \"%s\")\n") |
|
201 | _(b"%s Note: This key has expired (signed by: \"%s\")\n") | |
203 | % (prefix, key[2]) |
|
202 | % (prefix, key[2]) | |
204 | ) |
|
203 | ) | |
205 | validkeys.append((key[1], key[2], key[3])) |
|
204 | validkeys.append((key[1], key[2], key[3])) | |
206 | return validkeys |
|
205 | return validkeys | |
207 |
|
206 | |||
208 |
|
207 | |||
209 | @command(b"sigs", [], _(b'hg sigs'), helpcategory=_HELP_CATEGORY) |
|
208 | @command(b"sigs", [], _(b'hg sigs'), helpcategory=_HELP_CATEGORY) | |
210 | def sigs(ui, repo): |
|
209 | def sigs(ui, repo): | |
211 | """list signed changesets""" |
|
210 | """list signed changesets""" | |
212 | mygpg = newgpg(ui) |
|
211 | mygpg = newgpg(ui) | |
213 | revs = {} |
|
212 | revs = {} | |
214 |
|
213 | |||
215 | for data, context in sigwalk(repo): |
|
214 | for data, context in sigwalk(repo): | |
216 | node, version, sig = data |
|
215 | node, version, sig = data | |
217 | fn, ln = context |
|
216 | fn, ln = context | |
218 | try: |
|
217 | try: | |
219 | n = repo.lookup(node) |
|
218 | n = repo.lookup(node) | |
220 | except KeyError: |
|
219 | except KeyError: | |
221 | ui.warn(_(b"%s:%d node does not exist\n") % (fn, ln)) |
|
220 | ui.warn(_(b"%s:%d node does not exist\n") % (fn, ln)) | |
222 | continue |
|
221 | continue | |
223 | r = repo.changelog.rev(n) |
|
222 | r = repo.changelog.rev(n) | |
224 | keys = getkeys(ui, repo, mygpg, data, context) |
|
223 | keys = getkeys(ui, repo, mygpg, data, context) | |
225 | if not keys: |
|
224 | if not keys: | |
226 | continue |
|
225 | continue | |
227 | revs.setdefault(r, []) |
|
226 | revs.setdefault(r, []) | |
228 | revs[r].extend(keys) |
|
227 | revs[r].extend(keys) | |
229 | for rev in sorted(revs, reverse=True): |
|
228 | for rev in sorted(revs, reverse=True): | |
230 | for k in revs[rev]: |
|
229 | for k in revs[rev]: | |
231 | r = b"%5d:%s" % (rev, hex(repo.changelog.node(rev))) |
|
230 | r = b"%5d:%s" % (rev, hex(repo.changelog.node(rev))) | |
232 | ui.write(b"%-30s %s\n" % (keystr(ui, k), r)) |
|
231 | ui.write(b"%-30s %s\n" % (keystr(ui, k), r)) | |
233 |
|
232 | |||
234 |
|
233 | |||
235 | @command(b"sigcheck", [], _(b'hg sigcheck REV'), helpcategory=_HELP_CATEGORY) |
|
234 | @command(b"sigcheck", [], _(b'hg sigcheck REV'), helpcategory=_HELP_CATEGORY) | |
236 | def sigcheck(ui, repo, rev): |
|
235 | def sigcheck(ui, repo, rev): | |
237 | """verify all the signatures there may be for a particular revision""" |
|
236 | """verify all the signatures there may be for a particular revision""" | |
238 | mygpg = newgpg(ui) |
|
237 | mygpg = newgpg(ui) | |
239 | rev = repo.lookup(rev) |
|
238 | rev = repo.lookup(rev) | |
240 | hexrev = hex(rev) |
|
239 | hexrev = hex(rev) | |
241 | keys = [] |
|
240 | keys = [] | |
242 |
|
241 | |||
243 | for data, context in sigwalk(repo): |
|
242 | for data, context in sigwalk(repo): | |
244 | node, version, sig = data |
|
243 | node, version, sig = data | |
245 | if node == hexrev: |
|
244 | if node == hexrev: | |
246 | k = getkeys(ui, repo, mygpg, data, context) |
|
245 | k = getkeys(ui, repo, mygpg, data, context) | |
247 | if k: |
|
246 | if k: | |
248 | keys.extend(k) |
|
247 | keys.extend(k) | |
249 |
|
248 | |||
250 | if not keys: |
|
249 | if not keys: | |
251 | ui.write(_(b"no valid signature for %s\n") % short(rev)) |
|
250 | ui.write(_(b"no valid signature for %s\n") % short(rev)) | |
252 | return |
|
251 | return | |
253 |
|
252 | |||
254 | # print summary |
|
253 | # print summary | |
255 | ui.write(_(b"%s is signed by:\n") % short(rev)) |
|
254 | ui.write(_(b"%s is signed by:\n") % short(rev)) | |
256 | for key in keys: |
|
255 | for key in keys: | |
257 | ui.write(b" %s\n" % keystr(ui, key)) |
|
256 | ui.write(b" %s\n" % keystr(ui, key)) | |
258 |
|
257 | |||
259 |
|
258 | |||
260 | def keystr(ui, key): |
|
259 | def keystr(ui, key): | |
261 | """associate a string to a key (username, comment)""" |
|
260 | """associate a string to a key (username, comment)""" | |
262 | keyid, user, fingerprint = key |
|
261 | keyid, user, fingerprint = key | |
263 | comment = ui.config(b"gpg", fingerprint) |
|
262 | comment = ui.config(b"gpg", fingerprint) | |
264 | if comment: |
|
263 | if comment: | |
265 | return b"%s (%s)" % (user, comment) |
|
264 | return b"%s (%s)" % (user, comment) | |
266 | else: |
|
265 | else: | |
267 | return user |
|
266 | return user | |
268 |
|
267 | |||
269 |
|
268 | |||
270 | @command( |
|
269 | @command( | |
271 | b"sign", |
|
270 | b"sign", | |
272 | [ |
|
271 | [ | |
273 | (b'l', b'local', None, _(b'make the signature local')), |
|
272 | (b'l', b'local', None, _(b'make the signature local')), | |
274 | (b'f', b'force', None, _(b'sign even if the sigfile is modified')), |
|
273 | (b'f', b'force', None, _(b'sign even if the sigfile is modified')), | |
275 | ( |
|
274 | ( | |
276 | b'', |
|
275 | b'', | |
277 | b'no-commit', |
|
276 | b'no-commit', | |
278 | None, |
|
277 | None, | |
279 | _(b'do not commit the sigfile after signing'), |
|
278 | _(b'do not commit the sigfile after signing'), | |
280 | ), |
|
279 | ), | |
281 | (b'k', b'key', b'', _(b'the key id to sign with'), _(b'ID')), |
|
280 | (b'k', b'key', b'', _(b'the key id to sign with'), _(b'ID')), | |
282 | (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')), |
|
281 | (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')), | |
283 | (b'e', b'edit', False, _(b'invoke editor on commit messages')), |
|
282 | (b'e', b'edit', False, _(b'invoke editor on commit messages')), | |
284 | ] |
|
283 | ] | |
285 | + cmdutil.commitopts2, |
|
284 | + cmdutil.commitopts2, | |
286 | _(b'hg sign [OPTION]... [REV]...'), |
|
285 | _(b'hg sign [OPTION]... [REV]...'), | |
287 | helpcategory=_HELP_CATEGORY, |
|
286 | helpcategory=_HELP_CATEGORY, | |
288 | ) |
|
287 | ) | |
289 | def sign(ui, repo, *revs, **opts): |
|
288 | def sign(ui, repo, *revs, **opts): | |
290 | """add a signature for the current or given revision |
|
289 | """add a signature for the current or given revision | |
291 |
|
290 | |||
292 | If no revision is given, the parent of the working directory is used, |
|
291 | If no revision is given, the parent of the working directory is used, | |
293 | or tip if no revision is checked out. |
|
292 | or tip if no revision is checked out. | |
294 |
|
293 | |||
295 | The ``gpg.cmd`` config setting can be used to specify the command |
|
294 | The ``gpg.cmd`` config setting can be used to specify the command | |
296 | to run. A default key can be specified with ``gpg.key``. |
|
295 | to run. A default key can be specified with ``gpg.key``. | |
297 |
|
296 | |||
298 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
297 | See :hg:`help dates` for a list of formats valid for -d/--date. | |
299 | """ |
|
298 | """ | |
300 | with repo.wlock(): |
|
299 | with repo.wlock(): | |
301 | return _dosign(ui, repo, *revs, **opts) |
|
300 | return _dosign(ui, repo, *revs, **opts) | |
302 |
|
301 | |||
303 |
|
302 | |||
304 | def _dosign(ui, repo, *revs, **opts): |
|
303 | def _dosign(ui, repo, *revs, **opts): | |
305 | mygpg = newgpg(ui, **opts) |
|
304 | mygpg = newgpg(ui, **opts) | |
306 | opts = pycompat.byteskwargs(opts) |
|
305 | opts = pycompat.byteskwargs(opts) | |
307 | sigver = b"0" |
|
306 | sigver = b"0" | |
308 | sigmessage = b"" |
|
307 | sigmessage = b"" | |
309 |
|
308 | |||
310 | date = opts.get(b'date') |
|
309 | date = opts.get(b'date') | |
311 | if date: |
|
310 | if date: | |
312 | opts[b'date'] = dateutil.parsedate(date) |
|
311 | opts[b'date'] = dateutil.parsedate(date) | |
313 |
|
312 | |||
314 | if revs: |
|
313 | if revs: | |
315 | nodes = [repo.lookup(n) for n in revs] |
|
314 | nodes = [repo.lookup(n) for n in revs] | |
316 | else: |
|
315 | else: | |
317 | nodes = [node for node in repo.dirstate.parents() if node != nullid] |
|
316 | nodes = [ | |
|
317 | node for node in repo.dirstate.parents() if node != repo.nullid | |||
|
318 | ] | |||
318 | if len(nodes) > 1: |
|
319 | if len(nodes) > 1: | |
319 | raise error.Abort( |
|
320 | raise error.Abort( | |
320 | _(b'uncommitted merge - please provide a specific revision') |
|
321 | _(b'uncommitted merge - please provide a specific revision') | |
321 | ) |
|
322 | ) | |
322 | if not nodes: |
|
323 | if not nodes: | |
323 | nodes = [repo.changelog.tip()] |
|
324 | nodes = [repo.changelog.tip()] | |
324 |
|
325 | |||
325 | for n in nodes: |
|
326 | for n in nodes: | |
326 | hexnode = hex(n) |
|
327 | hexnode = hex(n) | |
327 | ui.write(_(b"signing %d:%s\n") % (repo.changelog.rev(n), short(n))) |
|
328 | ui.write(_(b"signing %d:%s\n") % (repo.changelog.rev(n), short(n))) | |
328 | # build data |
|
329 | # build data | |
329 | data = node2txt(repo, n, sigver) |
|
330 | data = node2txt(repo, n, sigver) | |
330 | sig = mygpg.sign(data) |
|
331 | sig = mygpg.sign(data) | |
331 | if not sig: |
|
332 | if not sig: | |
332 | raise error.Abort(_(b"error while signing")) |
|
333 | raise error.Abort(_(b"error while signing")) | |
333 | sig = binascii.b2a_base64(sig) |
|
334 | sig = binascii.b2a_base64(sig) | |
334 | sig = sig.replace(b"\n", b"") |
|
335 | sig = sig.replace(b"\n", b"") | |
335 | sigmessage += b"%s %s %s\n" % (hexnode, sigver, sig) |
|
336 | sigmessage += b"%s %s %s\n" % (hexnode, sigver, sig) | |
336 |
|
337 | |||
337 | # write it |
|
338 | # write it | |
338 | if opts[b'local']: |
|
339 | if opts[b'local']: | |
339 | repo.vfs.append(b"localsigs", sigmessage) |
|
340 | repo.vfs.append(b"localsigs", sigmessage) | |
340 | return |
|
341 | return | |
341 |
|
342 | |||
342 | if not opts[b"force"]: |
|
343 | if not opts[b"force"]: | |
343 | msigs = match.exact([b'.hgsigs']) |
|
344 | msigs = match.exact([b'.hgsigs']) | |
344 | if any(repo.status(match=msigs, unknown=True, ignored=True)): |
|
345 | if any(repo.status(match=msigs, unknown=True, ignored=True)): | |
345 | raise error.Abort( |
|
346 | raise error.Abort( | |
346 | _(b"working copy of .hgsigs is changed "), |
|
347 | _(b"working copy of .hgsigs is changed "), | |
347 | hint=_(b"please commit .hgsigs manually"), |
|
348 | hint=_(b"please commit .hgsigs manually"), | |
348 | ) |
|
349 | ) | |
349 |
|
350 | |||
350 | sigsfile = repo.wvfs(b".hgsigs", b"ab") |
|
351 | sigsfile = repo.wvfs(b".hgsigs", b"ab") | |
351 | sigsfile.write(sigmessage) |
|
352 | sigsfile.write(sigmessage) | |
352 | sigsfile.close() |
|
353 | sigsfile.close() | |
353 |
|
354 | |||
354 | if b'.hgsigs' not in repo.dirstate: |
|
355 | if b'.hgsigs' not in repo.dirstate: | |
355 | repo[None].add([b".hgsigs"]) |
|
356 | repo[None].add([b".hgsigs"]) | |
356 |
|
357 | |||
357 | if opts[b"no_commit"]: |
|
358 | if opts[b"no_commit"]: | |
358 | return |
|
359 | return | |
359 |
|
360 | |||
360 | message = opts[b'message'] |
|
361 | message = opts[b'message'] | |
361 | if not message: |
|
362 | if not message: | |
362 | # we don't translate commit messages |
|
363 | # we don't translate commit messages | |
363 | message = b"\n".join( |
|
364 | message = b"\n".join( | |
364 | [b"Added signature for changeset %s" % short(n) for n in nodes] |
|
365 | [b"Added signature for changeset %s" % short(n) for n in nodes] | |
365 | ) |
|
366 | ) | |
366 | try: |
|
367 | try: | |
367 | editor = cmdutil.getcommiteditor( |
|
368 | editor = cmdutil.getcommiteditor( | |
368 | editform=b'gpg.sign', **pycompat.strkwargs(opts) |
|
369 | editform=b'gpg.sign', **pycompat.strkwargs(opts) | |
369 | ) |
|
370 | ) | |
370 | repo.commit( |
|
371 | repo.commit( | |
371 | message, opts[b'user'], opts[b'date'], match=msigs, editor=editor |
|
372 | message, opts[b'user'], opts[b'date'], match=msigs, editor=editor | |
372 | ) |
|
373 | ) | |
373 | except ValueError as inst: |
|
374 | except ValueError as inst: | |
374 | raise error.Abort(pycompat.bytestr(inst)) |
|
375 | raise error.Abort(pycompat.bytestr(inst)) | |
375 |
|
376 | |||
376 |
|
377 | |||
377 | def node2txt(repo, node, ver): |
|
378 | def node2txt(repo, node, ver): | |
378 | """map a manifest into some text""" |
|
379 | """map a manifest into some text""" | |
379 | if ver == b"0": |
|
380 | if ver == b"0": | |
380 | return b"%s\n" % hex(node) |
|
381 | return b"%s\n" % hex(node) | |
381 | else: |
|
382 | else: | |
382 | raise error.Abort(_(b"unknown signature version")) |
|
383 | raise error.Abort(_(b"unknown signature version")) | |
383 |
|
384 | |||
384 |
|
385 | |||
385 | def extsetup(ui): |
|
386 | def extsetup(ui): | |
386 | # Add our category before "Repository maintenance". |
|
387 | # Add our category before "Repository maintenance". | |
387 | help.CATEGORY_ORDER.insert( |
|
388 | help.CATEGORY_ORDER.insert( | |
388 | help.CATEGORY_ORDER.index(command.CATEGORY_MAINTENANCE), _HELP_CATEGORY |
|
389 | help.CATEGORY_ORDER.index(command.CATEGORY_MAINTENANCE), _HELP_CATEGORY | |
389 | ) |
|
390 | ) | |
390 | help.CATEGORY_NAMES[_HELP_CATEGORY] = b'GPG signing' |
|
391 | help.CATEGORY_NAMES[_HELP_CATEGORY] = b'GPG signing' |
@@ -1,389 +1,388 b'' | |||||
1 | # Minimal support for git commands on an hg repository |
|
1 | # Minimal support for git commands on an hg repository | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''browse the repository in a graphical way |
|
8 | '''browse the repository in a graphical way | |
9 |
|
9 | |||
10 | The hgk extension allows browsing the history of a repository in a |
|
10 | The hgk extension allows browsing the history of a repository in a | |
11 | graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not |
|
11 | graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not | |
12 | distributed with Mercurial.) |
|
12 | distributed with Mercurial.) | |
13 |
|
13 | |||
14 | hgk consists of two parts: a Tcl script that does the displaying and |
|
14 | hgk consists of two parts: a Tcl script that does the displaying and | |
15 | querying of information, and an extension to Mercurial named hgk.py, |
|
15 | querying of information, and an extension to Mercurial named hgk.py, | |
16 | which provides hooks for hgk to get information. hgk can be found in |
|
16 | which provides hooks for hgk to get information. hgk can be found in | |
17 | the contrib directory, and the extension is shipped in the hgext |
|
17 | the contrib directory, and the extension is shipped in the hgext | |
18 | repository, and needs to be enabled. |
|
18 | repository, and needs to be enabled. | |
19 |
|
19 | |||
20 | The :hg:`view` command will launch the hgk Tcl script. For this command |
|
20 | The :hg:`view` command will launch the hgk Tcl script. For this command | |
21 | to work, hgk must be in your search path. Alternately, you can specify |
|
21 | to work, hgk must be in your search path. Alternately, you can specify | |
22 | the path to hgk in your configuration file:: |
|
22 | the path to hgk in your configuration file:: | |
23 |
|
23 | |||
24 | [hgk] |
|
24 | [hgk] | |
25 | path = /location/of/hgk |
|
25 | path = /location/of/hgk | |
26 |
|
26 | |||
27 | hgk can make use of the extdiff extension to visualize revisions. |
|
27 | hgk can make use of the extdiff extension to visualize revisions. | |
28 | Assuming you had already configured extdiff vdiff command, just add:: |
|
28 | Assuming you had already configured extdiff vdiff command, just add:: | |
29 |
|
29 | |||
30 | [hgk] |
|
30 | [hgk] | |
31 | vdiff=vdiff |
|
31 | vdiff=vdiff | |
32 |
|
32 | |||
33 | Revisions context menu will now display additional entries to fire |
|
33 | Revisions context menu will now display additional entries to fire | |
34 | vdiff on hovered and selected revisions. |
|
34 | vdiff on hovered and selected revisions. | |
35 | ''' |
|
35 | ''' | |
36 |
|
36 | |||
37 | from __future__ import absolute_import |
|
37 | from __future__ import absolute_import | |
38 |
|
38 | |||
39 | import os |
|
39 | import os | |
40 |
|
40 | |||
41 | from mercurial.i18n import _ |
|
41 | from mercurial.i18n import _ | |
42 | from mercurial.node import ( |
|
42 | from mercurial.node import ( | |
43 | nullid, |
|
|||
44 | nullrev, |
|
43 | nullrev, | |
45 | short, |
|
44 | short, | |
46 | ) |
|
45 | ) | |
47 | from mercurial import ( |
|
46 | from mercurial import ( | |
48 | commands, |
|
47 | commands, | |
49 | obsolete, |
|
48 | obsolete, | |
50 | patch, |
|
49 | patch, | |
51 | pycompat, |
|
50 | pycompat, | |
52 | registrar, |
|
51 | registrar, | |
53 | scmutil, |
|
52 | scmutil, | |
54 | ) |
|
53 | ) | |
55 |
|
54 | |||
56 | cmdtable = {} |
|
55 | cmdtable = {} | |
57 | command = registrar.command(cmdtable) |
|
56 | command = registrar.command(cmdtable) | |
58 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
57 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
59 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
58 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
60 | # be specifying the version(s) of Mercurial they are tested with, or |
|
59 | # be specifying the version(s) of Mercurial they are tested with, or | |
61 | # leave the attribute unspecified. |
|
60 | # leave the attribute unspecified. | |
62 | testedwith = b'ships-with-hg-core' |
|
61 | testedwith = b'ships-with-hg-core' | |
63 |
|
62 | |||
64 | configtable = {} |
|
63 | configtable = {} | |
65 | configitem = registrar.configitem(configtable) |
|
64 | configitem = registrar.configitem(configtable) | |
66 |
|
65 | |||
67 | configitem( |
|
66 | configitem( | |
68 | b'hgk', |
|
67 | b'hgk', | |
69 | b'path', |
|
68 | b'path', | |
70 | default=b'hgk', |
|
69 | default=b'hgk', | |
71 | ) |
|
70 | ) | |
72 |
|
71 | |||
73 |
|
72 | |||
74 | @command( |
|
73 | @command( | |
75 | b'debug-diff-tree', |
|
74 | b'debug-diff-tree', | |
76 | [ |
|
75 | [ | |
77 | (b'p', b'patch', None, _(b'generate patch')), |
|
76 | (b'p', b'patch', None, _(b'generate patch')), | |
78 | (b'r', b'recursive', None, _(b'recursive')), |
|
77 | (b'r', b'recursive', None, _(b'recursive')), | |
79 | (b'P', b'pretty', None, _(b'pretty')), |
|
78 | (b'P', b'pretty', None, _(b'pretty')), | |
80 | (b's', b'stdin', None, _(b'stdin')), |
|
79 | (b's', b'stdin', None, _(b'stdin')), | |
81 | (b'C', b'copy', None, _(b'detect copies')), |
|
80 | (b'C', b'copy', None, _(b'detect copies')), | |
82 | (b'S', b'search', b"", _(b'search')), |
|
81 | (b'S', b'search', b"", _(b'search')), | |
83 | ], |
|
82 | ], | |
84 | b'[OPTION]... NODE1 NODE2 [FILE]...', |
|
83 | b'[OPTION]... NODE1 NODE2 [FILE]...', | |
85 | inferrepo=True, |
|
84 | inferrepo=True, | |
86 | ) |
|
85 | ) | |
87 | def difftree(ui, repo, node1=None, node2=None, *files, **opts): |
|
86 | def difftree(ui, repo, node1=None, node2=None, *files, **opts): | |
88 | """diff trees from two commits""" |
|
87 | """diff trees from two commits""" | |
89 |
|
88 | |||
90 | def __difftree(repo, node1, node2, files=None): |
|
89 | def __difftree(repo, node1, node2, files=None): | |
91 | assert node2 is not None |
|
90 | assert node2 is not None | |
92 | if files is None: |
|
91 | if files is None: | |
93 | files = [] |
|
92 | files = [] | |
94 | mmap = repo[node1].manifest() |
|
93 | mmap = repo[node1].manifest() | |
95 | mmap2 = repo[node2].manifest() |
|
94 | mmap2 = repo[node2].manifest() | |
96 | m = scmutil.match(repo[node1], files) |
|
95 | m = scmutil.match(repo[node1], files) | |
97 | st = repo.status(node1, node2, m) |
|
96 | st = repo.status(node1, node2, m) | |
98 | empty = short(nullid) |
|
97 | empty = short(repo.nullid) | |
99 |
|
98 | |||
100 | for f in st.modified: |
|
99 | for f in st.modified: | |
101 | # TODO get file permissions |
|
100 | # TODO get file permissions | |
102 | ui.writenoi18n( |
|
101 | ui.writenoi18n( | |
103 | b":100664 100664 %s %s M\t%s\t%s\n" |
|
102 | b":100664 100664 %s %s M\t%s\t%s\n" | |
104 | % (short(mmap[f]), short(mmap2[f]), f, f) |
|
103 | % (short(mmap[f]), short(mmap2[f]), f, f) | |
105 | ) |
|
104 | ) | |
106 | for f in st.added: |
|
105 | for f in st.added: | |
107 | ui.writenoi18n( |
|
106 | ui.writenoi18n( | |
108 | b":000000 100664 %s %s N\t%s\t%s\n" |
|
107 | b":000000 100664 %s %s N\t%s\t%s\n" | |
109 | % (empty, short(mmap2[f]), f, f) |
|
108 | % (empty, short(mmap2[f]), f, f) | |
110 | ) |
|
109 | ) | |
111 | for f in st.removed: |
|
110 | for f in st.removed: | |
112 | ui.writenoi18n( |
|
111 | ui.writenoi18n( | |
113 | b":100664 000000 %s %s D\t%s\t%s\n" |
|
112 | b":100664 000000 %s %s D\t%s\t%s\n" | |
114 | % (short(mmap[f]), empty, f, f) |
|
113 | % (short(mmap[f]), empty, f, f) | |
115 | ) |
|
114 | ) | |
116 |
|
115 | |||
117 | ## |
|
116 | ## | |
118 |
|
117 | |||
119 | while True: |
|
118 | while True: | |
120 | if opts['stdin']: |
|
119 | if opts['stdin']: | |
121 | line = ui.fin.readline() |
|
120 | line = ui.fin.readline() | |
122 | if not line: |
|
121 | if not line: | |
123 | break |
|
122 | break | |
124 | line = line.rstrip(pycompat.oslinesep).split(b' ') |
|
123 | line = line.rstrip(pycompat.oslinesep).split(b' ') | |
125 | node1 = line[0] |
|
124 | node1 = line[0] | |
126 | if len(line) > 1: |
|
125 | if len(line) > 1: | |
127 | node2 = line[1] |
|
126 | node2 = line[1] | |
128 | else: |
|
127 | else: | |
129 | node2 = None |
|
128 | node2 = None | |
130 | node1 = repo.lookup(node1) |
|
129 | node1 = repo.lookup(node1) | |
131 | if node2: |
|
130 | if node2: | |
132 | node2 = repo.lookup(node2) |
|
131 | node2 = repo.lookup(node2) | |
133 | else: |
|
132 | else: | |
134 | node2 = node1 |
|
133 | node2 = node1 | |
135 | node1 = repo.changelog.parents(node1)[0] |
|
134 | node1 = repo.changelog.parents(node1)[0] | |
136 | if opts['patch']: |
|
135 | if opts['patch']: | |
137 | if opts['pretty']: |
|
136 | if opts['pretty']: | |
138 | catcommit(ui, repo, node2, b"") |
|
137 | catcommit(ui, repo, node2, b"") | |
139 | m = scmutil.match(repo[node1], files) |
|
138 | m = scmutil.match(repo[node1], files) | |
140 | diffopts = patch.difffeatureopts(ui) |
|
139 | diffopts = patch.difffeatureopts(ui) | |
141 | diffopts.git = True |
|
140 | diffopts.git = True | |
142 | chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts) |
|
141 | chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts) | |
143 | for chunk in chunks: |
|
142 | for chunk in chunks: | |
144 | ui.write(chunk) |
|
143 | ui.write(chunk) | |
145 | else: |
|
144 | else: | |
146 | __difftree(repo, node1, node2, files=files) |
|
145 | __difftree(repo, node1, node2, files=files) | |
147 | if not opts['stdin']: |
|
146 | if not opts['stdin']: | |
148 | break |
|
147 | break | |
149 |
|
148 | |||
150 |
|
149 | |||
151 | def catcommit(ui, repo, n, prefix, ctx=None): |
|
150 | def catcommit(ui, repo, n, prefix, ctx=None): | |
152 | nlprefix = b'\n' + prefix |
|
151 | nlprefix = b'\n' + prefix | |
153 | if ctx is None: |
|
152 | if ctx is None: | |
154 | ctx = repo[n] |
|
153 | ctx = repo[n] | |
155 | # use ctx.node() instead ?? |
|
154 | # use ctx.node() instead ?? | |
156 | ui.write((b"tree %s\n" % short(ctx.changeset()[0]))) |
|
155 | ui.write((b"tree %s\n" % short(ctx.changeset()[0]))) | |
157 | for p in ctx.parents(): |
|
156 | for p in ctx.parents(): | |
158 | ui.write((b"parent %s\n" % p)) |
|
157 | ui.write((b"parent %s\n" % p)) | |
159 |
|
158 | |||
160 | date = ctx.date() |
|
159 | date = ctx.date() | |
161 | description = ctx.description().replace(b"\0", b"") |
|
160 | description = ctx.description().replace(b"\0", b"") | |
162 | ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1]))) |
|
161 | ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1]))) | |
163 |
|
162 | |||
164 | if b'committer' in ctx.extra(): |
|
163 | if b'committer' in ctx.extra(): | |
165 | ui.write((b"committer %s\n" % ctx.extra()[b'committer'])) |
|
164 | ui.write((b"committer %s\n" % ctx.extra()[b'committer'])) | |
166 |
|
165 | |||
167 | ui.write((b"revision %d\n" % ctx.rev())) |
|
166 | ui.write((b"revision %d\n" % ctx.rev())) | |
168 | ui.write((b"branch %s\n" % ctx.branch())) |
|
167 | ui.write((b"branch %s\n" % ctx.branch())) | |
169 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
168 | if obsolete.isenabled(repo, obsolete.createmarkersopt): | |
170 | if ctx.obsolete(): |
|
169 | if ctx.obsolete(): | |
171 | ui.writenoi18n(b"obsolete\n") |
|
170 | ui.writenoi18n(b"obsolete\n") | |
172 | ui.write((b"phase %s\n\n" % ctx.phasestr())) |
|
171 | ui.write((b"phase %s\n\n" % ctx.phasestr())) | |
173 |
|
172 | |||
174 | if prefix != b"": |
|
173 | if prefix != b"": | |
175 | ui.write( |
|
174 | ui.write( | |
176 | b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip()) |
|
175 | b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip()) | |
177 | ) |
|
176 | ) | |
178 | else: |
|
177 | else: | |
179 | ui.write(description + b"\n") |
|
178 | ui.write(description + b"\n") | |
180 | if prefix: |
|
179 | if prefix: | |
181 | ui.write(b'\0') |
|
180 | ui.write(b'\0') | |
182 |
|
181 | |||
183 |
|
182 | |||
184 | @command(b'debug-merge-base', [], _(b'REV REV')) |
|
183 | @command(b'debug-merge-base', [], _(b'REV REV')) | |
185 | def base(ui, repo, node1, node2): |
|
184 | def base(ui, repo, node1, node2): | |
186 | """output common ancestor information""" |
|
185 | """output common ancestor information""" | |
187 | node1 = repo.lookup(node1) |
|
186 | node1 = repo.lookup(node1) | |
188 | node2 = repo.lookup(node2) |
|
187 | node2 = repo.lookup(node2) | |
189 | n = repo.changelog.ancestor(node1, node2) |
|
188 | n = repo.changelog.ancestor(node1, node2) | |
190 | ui.write(short(n) + b"\n") |
|
189 | ui.write(short(n) + b"\n") | |
191 |
|
190 | |||
192 |
|
191 | |||
193 | @command( |
|
192 | @command( | |
194 | b'debug-cat-file', |
|
193 | b'debug-cat-file', | |
195 | [(b's', b'stdin', None, _(b'stdin'))], |
|
194 | [(b's', b'stdin', None, _(b'stdin'))], | |
196 | _(b'[OPTION]... TYPE FILE'), |
|
195 | _(b'[OPTION]... TYPE FILE'), | |
197 | inferrepo=True, |
|
196 | inferrepo=True, | |
198 | ) |
|
197 | ) | |
199 | def catfile(ui, repo, type=None, r=None, **opts): |
|
198 | def catfile(ui, repo, type=None, r=None, **opts): | |
200 | """cat a specific revision""" |
|
199 | """cat a specific revision""" | |
201 | # in stdin mode, every line except the commit is prefixed with two |
|
200 | # in stdin mode, every line except the commit is prefixed with two | |
202 | # spaces. This way the our caller can find the commit without magic |
|
201 | # spaces. This way the our caller can find the commit without magic | |
203 | # strings |
|
202 | # strings | |
204 | # |
|
203 | # | |
205 | prefix = b"" |
|
204 | prefix = b"" | |
206 | if opts['stdin']: |
|
205 | if opts['stdin']: | |
207 | line = ui.fin.readline() |
|
206 | line = ui.fin.readline() | |
208 | if not line: |
|
207 | if not line: | |
209 | return |
|
208 | return | |
210 | (type, r) = line.rstrip(pycompat.oslinesep).split(b' ') |
|
209 | (type, r) = line.rstrip(pycompat.oslinesep).split(b' ') | |
211 | prefix = b" " |
|
210 | prefix = b" " | |
212 | else: |
|
211 | else: | |
213 | if not type or not r: |
|
212 | if not type or not r: | |
214 | ui.warn(_(b"cat-file: type or revision not supplied\n")) |
|
213 | ui.warn(_(b"cat-file: type or revision not supplied\n")) | |
215 | commands.help_(ui, b'cat-file') |
|
214 | commands.help_(ui, b'cat-file') | |
216 |
|
215 | |||
217 | while r: |
|
216 | while r: | |
218 | if type != b"commit": |
|
217 | if type != b"commit": | |
219 | ui.warn(_(b"aborting hg cat-file only understands commits\n")) |
|
218 | ui.warn(_(b"aborting hg cat-file only understands commits\n")) | |
220 | return 1 |
|
219 | return 1 | |
221 | n = repo.lookup(r) |
|
220 | n = repo.lookup(r) | |
222 | catcommit(ui, repo, n, prefix) |
|
221 | catcommit(ui, repo, n, prefix) | |
223 | if opts['stdin']: |
|
222 | if opts['stdin']: | |
224 | line = ui.fin.readline() |
|
223 | line = ui.fin.readline() | |
225 | if not line: |
|
224 | if not line: | |
226 | break |
|
225 | break | |
227 | (type, r) = line.rstrip(pycompat.oslinesep).split(b' ') |
|
226 | (type, r) = line.rstrip(pycompat.oslinesep).split(b' ') | |
228 | else: |
|
227 | else: | |
229 | break |
|
228 | break | |
230 |
|
229 | |||
231 |
|
230 | |||
232 | # git rev-tree is a confusing thing. You can supply a number of |
|
231 | # git rev-tree is a confusing thing. You can supply a number of | |
233 | # commit sha1s on the command line, and it walks the commit history |
|
232 | # commit sha1s on the command line, and it walks the commit history | |
234 | # telling you which commits are reachable from the supplied ones via |
|
233 | # telling you which commits are reachable from the supplied ones via | |
235 | # a bitmask based on arg position. |
|
234 | # a bitmask based on arg position. | |
236 | # you can specify a commit to stop at by starting the sha1 with ^ |
|
235 | # you can specify a commit to stop at by starting the sha1 with ^ | |
237 | def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False): |
|
236 | def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False): | |
238 | def chlogwalk(): |
|
237 | def chlogwalk(): | |
239 | count = len(repo) |
|
238 | count = len(repo) | |
240 | i = count |
|
239 | i = count | |
241 | l = [0] * 100 |
|
240 | l = [0] * 100 | |
242 | chunk = 100 |
|
241 | chunk = 100 | |
243 | while True: |
|
242 | while True: | |
244 | if chunk > i: |
|
243 | if chunk > i: | |
245 | chunk = i |
|
244 | chunk = i | |
246 | i = 0 |
|
245 | i = 0 | |
247 | else: |
|
246 | else: | |
248 | i -= chunk |
|
247 | i -= chunk | |
249 |
|
248 | |||
250 | for x in pycompat.xrange(chunk): |
|
249 | for x in pycompat.xrange(chunk): | |
251 | if i + x >= count: |
|
250 | if i + x >= count: | |
252 | l[chunk - x :] = [0] * (chunk - x) |
|
251 | l[chunk - x :] = [0] * (chunk - x) | |
253 | break |
|
252 | break | |
254 | if full is not None: |
|
253 | if full is not None: | |
255 | if (i + x) in repo: |
|
254 | if (i + x) in repo: | |
256 | l[x] = repo[i + x] |
|
255 | l[x] = repo[i + x] | |
257 | l[x].changeset() # force reading |
|
256 | l[x].changeset() # force reading | |
258 | else: |
|
257 | else: | |
259 | if (i + x) in repo: |
|
258 | if (i + x) in repo: | |
260 | l[x] = 1 |
|
259 | l[x] = 1 | |
261 | for x in pycompat.xrange(chunk - 1, -1, -1): |
|
260 | for x in pycompat.xrange(chunk - 1, -1, -1): | |
262 | if l[x] != 0: |
|
261 | if l[x] != 0: | |
263 | yield (i + x, full is not None and l[x] or None) |
|
262 | yield (i + x, full is not None and l[x] or None) | |
264 | if i == 0: |
|
263 | if i == 0: | |
265 | break |
|
264 | break | |
266 |
|
265 | |||
267 | # calculate and return the reachability bitmask for sha |
|
266 | # calculate and return the reachability bitmask for sha | |
268 | def is_reachable(ar, reachable, sha): |
|
267 | def is_reachable(ar, reachable, sha): | |
269 | if len(ar) == 0: |
|
268 | if len(ar) == 0: | |
270 | return 1 |
|
269 | return 1 | |
271 | mask = 0 |
|
270 | mask = 0 | |
272 | for i in pycompat.xrange(len(ar)): |
|
271 | for i in pycompat.xrange(len(ar)): | |
273 | if sha in reachable[i]: |
|
272 | if sha in reachable[i]: | |
274 | mask |= 1 << i |
|
273 | mask |= 1 << i | |
275 |
|
274 | |||
276 | return mask |
|
275 | return mask | |
277 |
|
276 | |||
278 | reachable = [] |
|
277 | reachable = [] | |
279 | stop_sha1 = [] |
|
278 | stop_sha1 = [] | |
280 | want_sha1 = [] |
|
279 | want_sha1 = [] | |
281 | count = 0 |
|
280 | count = 0 | |
282 |
|
281 | |||
283 | # figure out which commits they are asking for and which ones they |
|
282 | # figure out which commits they are asking for and which ones they | |
284 | # want us to stop on |
|
283 | # want us to stop on | |
285 | for i, arg in enumerate(args): |
|
284 | for i, arg in enumerate(args): | |
286 | if arg.startswith(b'^'): |
|
285 | if arg.startswith(b'^'): | |
287 | s = repo.lookup(arg[1:]) |
|
286 | s = repo.lookup(arg[1:]) | |
288 | stop_sha1.append(s) |
|
287 | stop_sha1.append(s) | |
289 | want_sha1.append(s) |
|
288 | want_sha1.append(s) | |
290 | elif arg != b'HEAD': |
|
289 | elif arg != b'HEAD': | |
291 | want_sha1.append(repo.lookup(arg)) |
|
290 | want_sha1.append(repo.lookup(arg)) | |
292 |
|
291 | |||
293 | # calculate the graph for the supplied commits |
|
292 | # calculate the graph for the supplied commits | |
294 | for i, n in enumerate(want_sha1): |
|
293 | for i, n in enumerate(want_sha1): | |
295 | reachable.append(set()) |
|
294 | reachable.append(set()) | |
296 | visit = [n] |
|
295 | visit = [n] | |
297 | reachable[i].add(n) |
|
296 | reachable[i].add(n) | |
298 | while visit: |
|
297 | while visit: | |
299 | n = visit.pop(0) |
|
298 | n = visit.pop(0) | |
300 | if n in stop_sha1: |
|
299 | if n in stop_sha1: | |
301 | continue |
|
300 | continue | |
302 | for p in repo.changelog.parents(n): |
|
301 | for p in repo.changelog.parents(n): | |
303 | if p not in reachable[i]: |
|
302 | if p not in reachable[i]: | |
304 | reachable[i].add(p) |
|
303 | reachable[i].add(p) | |
305 | visit.append(p) |
|
304 | visit.append(p) | |
306 | if p in stop_sha1: |
|
305 | if p in stop_sha1: | |
307 | continue |
|
306 | continue | |
308 |
|
307 | |||
309 | # walk the repository looking for commits that are in our |
|
308 | # walk the repository looking for commits that are in our | |
310 | # reachability graph |
|
309 | # reachability graph | |
311 | for i, ctx in chlogwalk(): |
|
310 | for i, ctx in chlogwalk(): | |
312 | if i not in repo: |
|
311 | if i not in repo: | |
313 | continue |
|
312 | continue | |
314 | n = repo.changelog.node(i) |
|
313 | n = repo.changelog.node(i) | |
315 | mask = is_reachable(want_sha1, reachable, n) |
|
314 | mask = is_reachable(want_sha1, reachable, n) | |
316 | if mask: |
|
315 | if mask: | |
317 | parentstr = b"" |
|
316 | parentstr = b"" | |
318 | if parents: |
|
317 | if parents: | |
319 | pp = repo.changelog.parents(n) |
|
318 | pp = repo.changelog.parents(n) | |
320 | if pp[0] != nullid: |
|
319 | if pp[0] != repo.nullid: | |
321 | parentstr += b" " + short(pp[0]) |
|
320 | parentstr += b" " + short(pp[0]) | |
322 | if pp[1] != nullid: |
|
321 | if pp[1] != repo.nullid: | |
323 | parentstr += b" " + short(pp[1]) |
|
322 | parentstr += b" " + short(pp[1]) | |
324 | if not full: |
|
323 | if not full: | |
325 | ui.write(b"%s%s\n" % (short(n), parentstr)) |
|
324 | ui.write(b"%s%s\n" % (short(n), parentstr)) | |
326 | elif full == b"commit": |
|
325 | elif full == b"commit": | |
327 | ui.write(b"%s%s\n" % (short(n), parentstr)) |
|
326 | ui.write(b"%s%s\n" % (short(n), parentstr)) | |
328 | catcommit(ui, repo, n, b' ', ctx) |
|
327 | catcommit(ui, repo, n, b' ', ctx) | |
329 | else: |
|
328 | else: | |
330 | (p1, p2) = repo.changelog.parents(n) |
|
329 | (p1, p2) = repo.changelog.parents(n) | |
331 | (h, h1, h2) = map(short, (n, p1, p2)) |
|
330 | (h, h1, h2) = map(short, (n, p1, p2)) | |
332 | (i1, i2) = map(repo.changelog.rev, (p1, p2)) |
|
331 | (i1, i2) = map(repo.changelog.rev, (p1, p2)) | |
333 |
|
332 | |||
334 | date = ctx.date()[0] |
|
333 | date = ctx.date()[0] | |
335 | ui.write(b"%s %s:%s" % (date, h, mask)) |
|
334 | ui.write(b"%s %s:%s" % (date, h, mask)) | |
336 | mask = is_reachable(want_sha1, reachable, p1) |
|
335 | mask = is_reachable(want_sha1, reachable, p1) | |
337 | if i1 != nullrev and mask > 0: |
|
336 | if i1 != nullrev and mask > 0: | |
338 | ui.write(b"%s:%s " % (h1, mask)), |
|
337 | ui.write(b"%s:%s " % (h1, mask)), | |
339 | mask = is_reachable(want_sha1, reachable, p2) |
|
338 | mask = is_reachable(want_sha1, reachable, p2) | |
340 | if i2 != nullrev and mask > 0: |
|
339 | if i2 != nullrev and mask > 0: | |
341 | ui.write(b"%s:%s " % (h2, mask)) |
|
340 | ui.write(b"%s:%s " % (h2, mask)) | |
342 | ui.write(b"\n") |
|
341 | ui.write(b"\n") | |
343 | if maxnr and count >= maxnr: |
|
342 | if maxnr and count >= maxnr: | |
344 | break |
|
343 | break | |
345 | count += 1 |
|
344 | count += 1 | |
346 |
|
345 | |||
347 |
|
346 | |||
348 | # git rev-list tries to order things by date, and has the ability to stop |
|
347 | # git rev-list tries to order things by date, and has the ability to stop | |
349 | # at a given commit without walking the whole repo. TODO add the stop |
|
348 | # at a given commit without walking the whole repo. TODO add the stop | |
350 | # parameter |
|
349 | # parameter | |
351 | @command( |
|
350 | @command( | |
352 | b'debug-rev-list', |
|
351 | b'debug-rev-list', | |
353 | [ |
|
352 | [ | |
354 | (b'H', b'header', None, _(b'header')), |
|
353 | (b'H', b'header', None, _(b'header')), | |
355 | (b't', b'topo-order', None, _(b'topo-order')), |
|
354 | (b't', b'topo-order', None, _(b'topo-order')), | |
356 | (b'p', b'parents', None, _(b'parents')), |
|
355 | (b'p', b'parents', None, _(b'parents')), | |
357 | (b'n', b'max-count', 0, _(b'max-count')), |
|
356 | (b'n', b'max-count', 0, _(b'max-count')), | |
358 | ], |
|
357 | ], | |
359 | b'[OPTION]... REV...', |
|
358 | b'[OPTION]... REV...', | |
360 | ) |
|
359 | ) | |
361 | def revlist(ui, repo, *revs, **opts): |
|
360 | def revlist(ui, repo, *revs, **opts): | |
362 | """print revisions""" |
|
361 | """print revisions""" | |
363 | if opts['header']: |
|
362 | if opts['header']: | |
364 | full = b"commit" |
|
363 | full = b"commit" | |
365 | else: |
|
364 | else: | |
366 | full = None |
|
365 | full = None | |
367 | copy = [x for x in revs] |
|
366 | copy = [x for x in revs] | |
368 | revtree(ui, copy, repo, full, opts['max_count'], opts[r'parents']) |
|
367 | revtree(ui, copy, repo, full, opts['max_count'], opts[r'parents']) | |
369 |
|
368 | |||
370 |
|
369 | |||
371 | @command( |
|
370 | @command( | |
372 | b'view', |
|
371 | b'view', | |
373 | [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))], |
|
372 | [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))], | |
374 | _(b'[-l LIMIT] [REVRANGE]'), |
|
373 | _(b'[-l LIMIT] [REVRANGE]'), | |
375 | helpcategory=command.CATEGORY_CHANGE_NAVIGATION, |
|
374 | helpcategory=command.CATEGORY_CHANGE_NAVIGATION, | |
376 | ) |
|
375 | ) | |
377 | def view(ui, repo, *etc, **opts): |
|
376 | def view(ui, repo, *etc, **opts): | |
378 | """start interactive history viewer""" |
|
377 | """start interactive history viewer""" | |
379 | opts = pycompat.byteskwargs(opts) |
|
378 | opts = pycompat.byteskwargs(opts) | |
380 | os.chdir(repo.root) |
|
379 | os.chdir(repo.root) | |
381 | optstr = b' '.join( |
|
380 | optstr = b' '.join( | |
382 | [b'--%s %s' % (k, v) for k, v in pycompat.iteritems(opts) if v] |
|
381 | [b'--%s %s' % (k, v) for k, v in pycompat.iteritems(opts) if v] | |
383 | ) |
|
382 | ) | |
384 | if repo.filtername is None: |
|
383 | if repo.filtername is None: | |
385 | optstr += b'--hidden' |
|
384 | optstr += b'--hidden' | |
386 |
|
385 | |||
387 | cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc)) |
|
386 | cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc)) | |
388 | ui.debug(b"running %s\n" % cmd) |
|
387 | ui.debug(b"running %s\n" % cmd) | |
389 | ui.system(cmd, blockedtag=b'hgk_view') |
|
388 | ui.system(cmd, blockedtag=b'hgk_view') |
@@ -1,608 +1,607 b'' | |||||
1 | # journal.py |
|
1 | # journal.py | |
2 | # |
|
2 | # | |
3 | # Copyright 2014-2016 Facebook, Inc. |
|
3 | # Copyright 2014-2016 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | """track previous positions of bookmarks (EXPERIMENTAL) |
|
7 | """track previous positions of bookmarks (EXPERIMENTAL) | |
8 |
|
8 | |||
9 | This extension adds a new command: `hg journal`, which shows you where |
|
9 | This extension adds a new command: `hg journal`, which shows you where | |
10 | bookmarks were previously located. |
|
10 | bookmarks were previously located. | |
11 |
|
11 | |||
12 | """ |
|
12 | """ | |
13 |
|
13 | |||
14 | from __future__ import absolute_import |
|
14 | from __future__ import absolute_import | |
15 |
|
15 | |||
16 | import collections |
|
16 | import collections | |
17 | import errno |
|
17 | import errno | |
18 | import os |
|
18 | import os | |
19 | import weakref |
|
19 | import weakref | |
20 |
|
20 | |||
21 | from mercurial.i18n import _ |
|
21 | from mercurial.i18n import _ | |
22 | from mercurial.node import ( |
|
22 | from mercurial.node import ( | |
23 | bin, |
|
23 | bin, | |
24 | hex, |
|
24 | hex, | |
25 | nullid, |
|
|||
26 | ) |
|
25 | ) | |
27 |
|
26 | |||
28 | from mercurial import ( |
|
27 | from mercurial import ( | |
29 | bookmarks, |
|
28 | bookmarks, | |
30 | cmdutil, |
|
29 | cmdutil, | |
31 | dispatch, |
|
30 | dispatch, | |
32 | encoding, |
|
31 | encoding, | |
33 | error, |
|
32 | error, | |
34 | extensions, |
|
33 | extensions, | |
35 | hg, |
|
34 | hg, | |
36 | localrepo, |
|
35 | localrepo, | |
37 | lock, |
|
36 | lock, | |
38 | logcmdutil, |
|
37 | logcmdutil, | |
39 | pycompat, |
|
38 | pycompat, | |
40 | registrar, |
|
39 | registrar, | |
41 | util, |
|
40 | util, | |
42 | ) |
|
41 | ) | |
43 | from mercurial.utils import ( |
|
42 | from mercurial.utils import ( | |
44 | dateutil, |
|
43 | dateutil, | |
45 | procutil, |
|
44 | procutil, | |
46 | stringutil, |
|
45 | stringutil, | |
47 | ) |
|
46 | ) | |
48 |
|
47 | |||
49 | cmdtable = {} |
|
48 | cmdtable = {} | |
50 | command = registrar.command(cmdtable) |
|
49 | command = registrar.command(cmdtable) | |
51 |
|
50 | |||
52 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
51 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
53 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
52 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
54 | # be specifying the version(s) of Mercurial they are tested with, or |
|
53 | # be specifying the version(s) of Mercurial they are tested with, or | |
55 | # leave the attribute unspecified. |
|
54 | # leave the attribute unspecified. | |
56 | testedwith = b'ships-with-hg-core' |
|
55 | testedwith = b'ships-with-hg-core' | |
57 |
|
56 | |||
58 | # storage format version; increment when the format changes |
|
57 | # storage format version; increment when the format changes | |
59 | storageversion = 0 |
|
58 | storageversion = 0 | |
60 |
|
59 | |||
61 | # namespaces |
|
60 | # namespaces | |
62 | bookmarktype = b'bookmark' |
|
61 | bookmarktype = b'bookmark' | |
63 | wdirparenttype = b'wdirparent' |
|
62 | wdirparenttype = b'wdirparent' | |
64 | # In a shared repository, what shared feature name is used |
|
63 | # In a shared repository, what shared feature name is used | |
65 | # to indicate this namespace is shared with the source? |
|
64 | # to indicate this namespace is shared with the source? | |
66 | sharednamespaces = { |
|
65 | sharednamespaces = { | |
67 | bookmarktype: hg.sharedbookmarks, |
|
66 | bookmarktype: hg.sharedbookmarks, | |
68 | } |
|
67 | } | |
69 |
|
68 | |||
70 | # Journal recording, register hooks and storage object |
|
69 | # Journal recording, register hooks and storage object | |
71 | def extsetup(ui): |
|
70 | def extsetup(ui): | |
72 | extensions.wrapfunction(dispatch, b'runcommand', runcommand) |
|
71 | extensions.wrapfunction(dispatch, b'runcommand', runcommand) | |
73 | extensions.wrapfunction(bookmarks.bmstore, b'_write', recordbookmarks) |
|
72 | extensions.wrapfunction(bookmarks.bmstore, b'_write', recordbookmarks) | |
74 | extensions.wrapfilecache( |
|
73 | extensions.wrapfilecache( | |
75 | localrepo.localrepository, b'dirstate', wrapdirstate |
|
74 | localrepo.localrepository, b'dirstate', wrapdirstate | |
76 | ) |
|
75 | ) | |
77 | extensions.wrapfunction(hg, b'postshare', wrappostshare) |
|
76 | extensions.wrapfunction(hg, b'postshare', wrappostshare) | |
78 | extensions.wrapfunction(hg, b'copystore', unsharejournal) |
|
77 | extensions.wrapfunction(hg, b'copystore', unsharejournal) | |
79 |
|
78 | |||
80 |
|
79 | |||
81 | def reposetup(ui, repo): |
|
80 | def reposetup(ui, repo): | |
82 | if repo.local(): |
|
81 | if repo.local(): | |
83 | repo.journal = journalstorage(repo) |
|
82 | repo.journal = journalstorage(repo) | |
84 | repo._wlockfreeprefix.add(b'namejournal') |
|
83 | repo._wlockfreeprefix.add(b'namejournal') | |
85 |
|
84 | |||
86 | dirstate, cached = localrepo.isfilecached(repo, b'dirstate') |
|
85 | dirstate, cached = localrepo.isfilecached(repo, b'dirstate') | |
87 | if cached: |
|
86 | if cached: | |
88 | # already instantiated dirstate isn't yet marked as |
|
87 | # already instantiated dirstate isn't yet marked as | |
89 | # "journal"-ing, even though repo.dirstate() was already |
|
88 | # "journal"-ing, even though repo.dirstate() was already | |
90 | # wrapped by own wrapdirstate() |
|
89 | # wrapped by own wrapdirstate() | |
91 | _setupdirstate(repo, dirstate) |
|
90 | _setupdirstate(repo, dirstate) | |
92 |
|
91 | |||
93 |
|
92 | |||
94 | def runcommand(orig, lui, repo, cmd, fullargs, *args): |
|
93 | def runcommand(orig, lui, repo, cmd, fullargs, *args): | |
95 | """Track the command line options for recording in the journal""" |
|
94 | """Track the command line options for recording in the journal""" | |
96 | journalstorage.recordcommand(*fullargs) |
|
95 | journalstorage.recordcommand(*fullargs) | |
97 | return orig(lui, repo, cmd, fullargs, *args) |
|
96 | return orig(lui, repo, cmd, fullargs, *args) | |
98 |
|
97 | |||
99 |
|
98 | |||
100 | def _setupdirstate(repo, dirstate): |
|
99 | def _setupdirstate(repo, dirstate): | |
101 | dirstate.journalstorage = repo.journal |
|
100 | dirstate.journalstorage = repo.journal | |
102 | dirstate.addparentchangecallback(b'journal', recorddirstateparents) |
|
101 | dirstate.addparentchangecallback(b'journal', recorddirstateparents) | |
103 |
|
102 | |||
104 |
|
103 | |||
105 | # hooks to record dirstate changes |
|
104 | # hooks to record dirstate changes | |
106 | def wrapdirstate(orig, repo): |
|
105 | def wrapdirstate(orig, repo): | |
107 | """Make journal storage available to the dirstate object""" |
|
106 | """Make journal storage available to the dirstate object""" | |
108 | dirstate = orig(repo) |
|
107 | dirstate = orig(repo) | |
109 | if util.safehasattr(repo, 'journal'): |
|
108 | if util.safehasattr(repo, 'journal'): | |
110 | _setupdirstate(repo, dirstate) |
|
109 | _setupdirstate(repo, dirstate) | |
111 | return dirstate |
|
110 | return dirstate | |
112 |
|
111 | |||
113 |
|
112 | |||
114 | def recorddirstateparents(dirstate, old, new): |
|
113 | def recorddirstateparents(dirstate, old, new): | |
115 | """Records all dirstate parent changes in the journal.""" |
|
114 | """Records all dirstate parent changes in the journal.""" | |
116 | old = list(old) |
|
115 | old = list(old) | |
117 | new = list(new) |
|
116 | new = list(new) | |
118 | if util.safehasattr(dirstate, 'journalstorage'): |
|
117 | if util.safehasattr(dirstate, 'journalstorage'): | |
119 | # only record two hashes if there was a merge |
|
118 | # only record two hashes if there was a merge | |
120 | oldhashes = old[:1] if old[1] == nullid else old |
|
119 | oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old | |
121 | newhashes = new[:1] if new[1] == nullid else new |
|
120 | newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new | |
122 | dirstate.journalstorage.record( |
|
121 | dirstate.journalstorage.record( | |
123 | wdirparenttype, b'.', oldhashes, newhashes |
|
122 | wdirparenttype, b'.', oldhashes, newhashes | |
124 | ) |
|
123 | ) | |
125 |
|
124 | |||
126 |
|
125 | |||
127 | # hooks to record bookmark changes (both local and remote) |
|
126 | # hooks to record bookmark changes (both local and remote) | |
128 | def recordbookmarks(orig, store, fp): |
|
127 | def recordbookmarks(orig, store, fp): | |
129 | """Records all bookmark changes in the journal.""" |
|
128 | """Records all bookmark changes in the journal.""" | |
130 | repo = store._repo |
|
129 | repo = store._repo | |
131 | if util.safehasattr(repo, 'journal'): |
|
130 | if util.safehasattr(repo, 'journal'): | |
132 | oldmarks = bookmarks.bmstore(repo) |
|
131 | oldmarks = bookmarks.bmstore(repo) | |
133 | for mark, value in pycompat.iteritems(store): |
|
132 | for mark, value in pycompat.iteritems(store): | |
134 | oldvalue = oldmarks.get(mark, nullid) |
|
133 | oldvalue = oldmarks.get(mark, repo.nullid) | |
135 | if value != oldvalue: |
|
134 | if value != oldvalue: | |
136 | repo.journal.record(bookmarktype, mark, oldvalue, value) |
|
135 | repo.journal.record(bookmarktype, mark, oldvalue, value) | |
137 | return orig(store, fp) |
|
136 | return orig(store, fp) | |
138 |
|
137 | |||
139 |
|
138 | |||
140 | # shared repository support |
|
139 | # shared repository support | |
141 | def _readsharedfeatures(repo): |
|
140 | def _readsharedfeatures(repo): | |
142 | """A set of shared features for this repository""" |
|
141 | """A set of shared features for this repository""" | |
143 | try: |
|
142 | try: | |
144 | return set(repo.vfs.read(b'shared').splitlines()) |
|
143 | return set(repo.vfs.read(b'shared').splitlines()) | |
145 | except IOError as inst: |
|
144 | except IOError as inst: | |
146 | if inst.errno != errno.ENOENT: |
|
145 | if inst.errno != errno.ENOENT: | |
147 | raise |
|
146 | raise | |
148 | return set() |
|
147 | return set() | |
149 |
|
148 | |||
150 |
|
149 | |||
151 | def _mergeentriesiter(*iterables, **kwargs): |
|
150 | def _mergeentriesiter(*iterables, **kwargs): | |
152 | """Given a set of sorted iterables, yield the next entry in merged order |
|
151 | """Given a set of sorted iterables, yield the next entry in merged order | |
153 |
|
152 | |||
154 | Note that by default entries go from most recent to oldest. |
|
153 | Note that by default entries go from most recent to oldest. | |
155 | """ |
|
154 | """ | |
156 | order = kwargs.pop('order', max) |
|
155 | order = kwargs.pop('order', max) | |
157 | iterables = [iter(it) for it in iterables] |
|
156 | iterables = [iter(it) for it in iterables] | |
158 | # this tracks still active iterables; iterables are deleted as they are |
|
157 | # this tracks still active iterables; iterables are deleted as they are | |
159 | # exhausted, which is why this is a dictionary and why each entry also |
|
158 | # exhausted, which is why this is a dictionary and why each entry also | |
160 | # stores the key. Entries are mutable so we can store the next value each |
|
159 | # stores the key. Entries are mutable so we can store the next value each | |
161 | # time. |
|
160 | # time. | |
162 | iterable_map = {} |
|
161 | iterable_map = {} | |
163 | for key, it in enumerate(iterables): |
|
162 | for key, it in enumerate(iterables): | |
164 | try: |
|
163 | try: | |
165 | iterable_map[key] = [next(it), key, it] |
|
164 | iterable_map[key] = [next(it), key, it] | |
166 | except StopIteration: |
|
165 | except StopIteration: | |
167 | # empty entry, can be ignored |
|
166 | # empty entry, can be ignored | |
168 | pass |
|
167 | pass | |
169 |
|
168 | |||
170 | while iterable_map: |
|
169 | while iterable_map: | |
171 | value, key, it = order(pycompat.itervalues(iterable_map)) |
|
170 | value, key, it = order(pycompat.itervalues(iterable_map)) | |
172 | yield value |
|
171 | yield value | |
173 | try: |
|
172 | try: | |
174 | iterable_map[key][0] = next(it) |
|
173 | iterable_map[key][0] = next(it) | |
175 | except StopIteration: |
|
174 | except StopIteration: | |
176 | # this iterable is empty, remove it from consideration |
|
175 | # this iterable is empty, remove it from consideration | |
177 | del iterable_map[key] |
|
176 | del iterable_map[key] | |
178 |
|
177 | |||
179 |
|
178 | |||
180 | def wrappostshare(orig, sourcerepo, destrepo, **kwargs): |
|
179 | def wrappostshare(orig, sourcerepo, destrepo, **kwargs): | |
181 | """Mark this shared working copy as sharing journal information""" |
|
180 | """Mark this shared working copy as sharing journal information""" | |
182 | with destrepo.wlock(): |
|
181 | with destrepo.wlock(): | |
183 | orig(sourcerepo, destrepo, **kwargs) |
|
182 | orig(sourcerepo, destrepo, **kwargs) | |
184 | with destrepo.vfs(b'shared', b'a') as fp: |
|
183 | with destrepo.vfs(b'shared', b'a') as fp: | |
185 | fp.write(b'journal\n') |
|
184 | fp.write(b'journal\n') | |
186 |
|
185 | |||
187 |
|
186 | |||
188 | def unsharejournal(orig, ui, repo, repopath): |
|
187 | def unsharejournal(orig, ui, repo, repopath): | |
189 | """Copy shared journal entries into this repo when unsharing""" |
|
188 | """Copy shared journal entries into this repo when unsharing""" | |
190 | if ( |
|
189 | if ( | |
191 | repo.path == repopath |
|
190 | repo.path == repopath | |
192 | and repo.shared() |
|
191 | and repo.shared() | |
193 | and util.safehasattr(repo, 'journal') |
|
192 | and util.safehasattr(repo, 'journal') | |
194 | ): |
|
193 | ): | |
195 | sharedrepo = hg.sharedreposource(repo) |
|
194 | sharedrepo = hg.sharedreposource(repo) | |
196 | sharedfeatures = _readsharedfeatures(repo) |
|
195 | sharedfeatures = _readsharedfeatures(repo) | |
197 | if sharedrepo and sharedfeatures > {b'journal'}: |
|
196 | if sharedrepo and sharedfeatures > {b'journal'}: | |
198 | # there is a shared repository and there are shared journal entries |
|
197 | # there is a shared repository and there are shared journal entries | |
199 | # to copy. move shared date over from source to destination but |
|
198 | # to copy. move shared date over from source to destination but | |
200 | # move the local file first |
|
199 | # move the local file first | |
201 | if repo.vfs.exists(b'namejournal'): |
|
200 | if repo.vfs.exists(b'namejournal'): | |
202 | journalpath = repo.vfs.join(b'namejournal') |
|
201 | journalpath = repo.vfs.join(b'namejournal') | |
203 | util.rename(journalpath, journalpath + b'.bak') |
|
202 | util.rename(journalpath, journalpath + b'.bak') | |
204 | storage = repo.journal |
|
203 | storage = repo.journal | |
205 | local = storage._open( |
|
204 | local = storage._open( | |
206 | repo.vfs, filename=b'namejournal.bak', _newestfirst=False |
|
205 | repo.vfs, filename=b'namejournal.bak', _newestfirst=False | |
207 | ) |
|
206 | ) | |
208 | shared = ( |
|
207 | shared = ( | |
209 | e |
|
208 | e | |
210 | for e in storage._open(sharedrepo.vfs, _newestfirst=False) |
|
209 | for e in storage._open(sharedrepo.vfs, _newestfirst=False) | |
211 | if sharednamespaces.get(e.namespace) in sharedfeatures |
|
210 | if sharednamespaces.get(e.namespace) in sharedfeatures | |
212 | ) |
|
211 | ) | |
213 | for entry in _mergeentriesiter(local, shared, order=min): |
|
212 | for entry in _mergeentriesiter(local, shared, order=min): | |
214 | storage._write(repo.vfs, entry) |
|
213 | storage._write(repo.vfs, entry) | |
215 |
|
214 | |||
216 | return orig(ui, repo, repopath) |
|
215 | return orig(ui, repo, repopath) | |
217 |
|
216 | |||
218 |
|
217 | |||
219 | class journalentry( |
|
218 | class journalentry( | |
220 | collections.namedtuple( |
|
219 | collections.namedtuple( | |
221 | 'journalentry', |
|
220 | 'journalentry', | |
222 | 'timestamp user command namespace name oldhashes newhashes', |
|
221 | 'timestamp user command namespace name oldhashes newhashes', | |
223 | ) |
|
222 | ) | |
224 | ): |
|
223 | ): | |
225 | """Individual journal entry |
|
224 | """Individual journal entry | |
226 |
|
225 | |||
227 | * timestamp: a mercurial (time, timezone) tuple |
|
226 | * timestamp: a mercurial (time, timezone) tuple | |
228 | * user: the username that ran the command |
|
227 | * user: the username that ran the command | |
229 | * namespace: the entry namespace, an opaque string |
|
228 | * namespace: the entry namespace, an opaque string | |
230 | * name: the name of the changed item, opaque string with meaning in the |
|
229 | * name: the name of the changed item, opaque string with meaning in the | |
231 | namespace |
|
230 | namespace | |
232 | * command: the hg command that triggered this record |
|
231 | * command: the hg command that triggered this record | |
233 | * oldhashes: a tuple of one or more binary hashes for the old location |
|
232 | * oldhashes: a tuple of one or more binary hashes for the old location | |
234 | * newhashes: a tuple of one or more binary hashes for the new location |
|
233 | * newhashes: a tuple of one or more binary hashes for the new location | |
235 |
|
234 | |||
236 | Handles serialisation from and to the storage format. Fields are |
|
235 | Handles serialisation from and to the storage format. Fields are | |
237 | separated by newlines, hashes are written out in hex separated by commas, |
|
236 | separated by newlines, hashes are written out in hex separated by commas, | |
238 | timestamp and timezone are separated by a space. |
|
237 | timestamp and timezone are separated by a space. | |
239 |
|
238 | |||
240 | """ |
|
239 | """ | |
241 |
|
240 | |||
242 | @classmethod |
|
241 | @classmethod | |
243 | def fromstorage(cls, line): |
|
242 | def fromstorage(cls, line): | |
244 | ( |
|
243 | ( | |
245 | time, |
|
244 | time, | |
246 | user, |
|
245 | user, | |
247 | command, |
|
246 | command, | |
248 | namespace, |
|
247 | namespace, | |
249 | name, |
|
248 | name, | |
250 | oldhashes, |
|
249 | oldhashes, | |
251 | newhashes, |
|
250 | newhashes, | |
252 | ) = line.split(b'\n') |
|
251 | ) = line.split(b'\n') | |
253 | timestamp, tz = time.split() |
|
252 | timestamp, tz = time.split() | |
254 | timestamp, tz = float(timestamp), int(tz) |
|
253 | timestamp, tz = float(timestamp), int(tz) | |
255 | oldhashes = tuple(bin(hash) for hash in oldhashes.split(b',')) |
|
254 | oldhashes = tuple(bin(hash) for hash in oldhashes.split(b',')) | |
256 | newhashes = tuple(bin(hash) for hash in newhashes.split(b',')) |
|
255 | newhashes = tuple(bin(hash) for hash in newhashes.split(b',')) | |
257 | return cls( |
|
256 | return cls( | |
258 | (timestamp, tz), |
|
257 | (timestamp, tz), | |
259 | user, |
|
258 | user, | |
260 | command, |
|
259 | command, | |
261 | namespace, |
|
260 | namespace, | |
262 | name, |
|
261 | name, | |
263 | oldhashes, |
|
262 | oldhashes, | |
264 | newhashes, |
|
263 | newhashes, | |
265 | ) |
|
264 | ) | |
266 |
|
265 | |||
267 | def __bytes__(self): |
|
266 | def __bytes__(self): | |
268 | """bytes representation for storage""" |
|
267 | """bytes representation for storage""" | |
269 | time = b' '.join(map(pycompat.bytestr, self.timestamp)) |
|
268 | time = b' '.join(map(pycompat.bytestr, self.timestamp)) | |
270 | oldhashes = b','.join([hex(hash) for hash in self.oldhashes]) |
|
269 | oldhashes = b','.join([hex(hash) for hash in self.oldhashes]) | |
271 | newhashes = b','.join([hex(hash) for hash in self.newhashes]) |
|
270 | newhashes = b','.join([hex(hash) for hash in self.newhashes]) | |
272 | return b'\n'.join( |
|
271 | return b'\n'.join( | |
273 | ( |
|
272 | ( | |
274 | time, |
|
273 | time, | |
275 | self.user, |
|
274 | self.user, | |
276 | self.command, |
|
275 | self.command, | |
277 | self.namespace, |
|
276 | self.namespace, | |
278 | self.name, |
|
277 | self.name, | |
279 | oldhashes, |
|
278 | oldhashes, | |
280 | newhashes, |
|
279 | newhashes, | |
281 | ) |
|
280 | ) | |
282 | ) |
|
281 | ) | |
283 |
|
282 | |||
284 | __str__ = encoding.strmethod(__bytes__) |
|
283 | __str__ = encoding.strmethod(__bytes__) | |
285 |
|
284 | |||
286 |
|
285 | |||
287 | class journalstorage(object): |
|
286 | class journalstorage(object): | |
288 | """Storage for journal entries |
|
287 | """Storage for journal entries | |
289 |
|
288 | |||
290 | Entries are divided over two files; one with entries that pertain to the |
|
289 | Entries are divided over two files; one with entries that pertain to the | |
291 | local working copy *only*, and one with entries that are shared across |
|
290 | local working copy *only*, and one with entries that are shared across | |
292 | multiple working copies when shared using the share extension. |
|
291 | multiple working copies when shared using the share extension. | |
293 |
|
292 | |||
294 | Entries are stored with NUL bytes as separators. See the journalentry |
|
293 | Entries are stored with NUL bytes as separators. See the journalentry | |
295 | class for the per-entry structure. |
|
294 | class for the per-entry structure. | |
296 |
|
295 | |||
297 | The file format starts with an integer version, delimited by a NUL. |
|
296 | The file format starts with an integer version, delimited by a NUL. | |
298 |
|
297 | |||
299 | This storage uses a dedicated lock; this makes it easier to avoid issues |
|
298 | This storage uses a dedicated lock; this makes it easier to avoid issues | |
300 | with adding entries that added when the regular wlock is unlocked (e.g. |
|
299 | with adding entries that added when the regular wlock is unlocked (e.g. | |
301 | the dirstate). |
|
300 | the dirstate). | |
302 |
|
301 | |||
303 | """ |
|
302 | """ | |
304 |
|
303 | |||
305 | _currentcommand = () |
|
304 | _currentcommand = () | |
306 | _lockref = None |
|
305 | _lockref = None | |
307 |
|
306 | |||
308 | def __init__(self, repo): |
|
307 | def __init__(self, repo): | |
309 | self.user = procutil.getuser() |
|
308 | self.user = procutil.getuser() | |
310 | self.ui = repo.ui |
|
309 | self.ui = repo.ui | |
311 | self.vfs = repo.vfs |
|
310 | self.vfs = repo.vfs | |
312 |
|
311 | |||
313 | # is this working copy using a shared storage? |
|
312 | # is this working copy using a shared storage? | |
314 | self.sharedfeatures = self.sharedvfs = None |
|
313 | self.sharedfeatures = self.sharedvfs = None | |
315 | if repo.shared(): |
|
314 | if repo.shared(): | |
316 | features = _readsharedfeatures(repo) |
|
315 | features = _readsharedfeatures(repo) | |
317 | sharedrepo = hg.sharedreposource(repo) |
|
316 | sharedrepo = hg.sharedreposource(repo) | |
318 | if sharedrepo is not None and b'journal' in features: |
|
317 | if sharedrepo is not None and b'journal' in features: | |
319 | self.sharedvfs = sharedrepo.vfs |
|
318 | self.sharedvfs = sharedrepo.vfs | |
320 | self.sharedfeatures = features |
|
319 | self.sharedfeatures = features | |
321 |
|
320 | |||
322 | # track the current command for recording in journal entries |
|
321 | # track the current command for recording in journal entries | |
323 | @property |
|
322 | @property | |
324 | def command(self): |
|
323 | def command(self): | |
325 | commandstr = b' '.join( |
|
324 | commandstr = b' '.join( | |
326 | map(procutil.shellquote, journalstorage._currentcommand) |
|
325 | map(procutil.shellquote, journalstorage._currentcommand) | |
327 | ) |
|
326 | ) | |
328 | if b'\n' in commandstr: |
|
327 | if b'\n' in commandstr: | |
329 | # truncate multi-line commands |
|
328 | # truncate multi-line commands | |
330 | commandstr = commandstr.partition(b'\n')[0] + b' ...' |
|
329 | commandstr = commandstr.partition(b'\n')[0] + b' ...' | |
331 | return commandstr |
|
330 | return commandstr | |
332 |
|
331 | |||
333 | @classmethod |
|
332 | @classmethod | |
334 | def recordcommand(cls, *fullargs): |
|
333 | def recordcommand(cls, *fullargs): | |
335 | """Set the current hg arguments, stored with recorded entries""" |
|
334 | """Set the current hg arguments, stored with recorded entries""" | |
336 | # Set the current command on the class because we may have started |
|
335 | # Set the current command on the class because we may have started | |
337 | # with a non-local repo (cloning for example). |
|
336 | # with a non-local repo (cloning for example). | |
338 | cls._currentcommand = fullargs |
|
337 | cls._currentcommand = fullargs | |
339 |
|
338 | |||
340 | def _currentlock(self, lockref): |
|
339 | def _currentlock(self, lockref): | |
341 | """Returns the lock if it's held, or None if it's not. |
|
340 | """Returns the lock if it's held, or None if it's not. | |
342 |
|
341 | |||
343 | (This is copied from the localrepo class) |
|
342 | (This is copied from the localrepo class) | |
344 | """ |
|
343 | """ | |
345 | if lockref is None: |
|
344 | if lockref is None: | |
346 | return None |
|
345 | return None | |
347 | l = lockref() |
|
346 | l = lockref() | |
348 | if l is None or not l.held: |
|
347 | if l is None or not l.held: | |
349 | return None |
|
348 | return None | |
350 | return l |
|
349 | return l | |
351 |
|
350 | |||
352 | def jlock(self, vfs): |
|
351 | def jlock(self, vfs): | |
353 | """Create a lock for the journal file""" |
|
352 | """Create a lock for the journal file""" | |
354 | if self._currentlock(self._lockref) is not None: |
|
353 | if self._currentlock(self._lockref) is not None: | |
355 | raise error.Abort(_(b'journal lock does not support nesting')) |
|
354 | raise error.Abort(_(b'journal lock does not support nesting')) | |
356 | desc = _(b'journal of %s') % vfs.base |
|
355 | desc = _(b'journal of %s') % vfs.base | |
357 | try: |
|
356 | try: | |
358 | l = lock.lock(vfs, b'namejournal.lock', 0, desc=desc) |
|
357 | l = lock.lock(vfs, b'namejournal.lock', 0, desc=desc) | |
359 | except error.LockHeld as inst: |
|
358 | except error.LockHeld as inst: | |
360 | self.ui.warn( |
|
359 | self.ui.warn( | |
361 | _(b"waiting for lock on %s held by %r\n") % (desc, inst.locker) |
|
360 | _(b"waiting for lock on %s held by %r\n") % (desc, inst.locker) | |
362 | ) |
|
361 | ) | |
363 | # default to 600 seconds timeout |
|
362 | # default to 600 seconds timeout | |
364 | l = lock.lock( |
|
363 | l = lock.lock( | |
365 | vfs, |
|
364 | vfs, | |
366 | b'namejournal.lock', |
|
365 | b'namejournal.lock', | |
367 | self.ui.configint(b"ui", b"timeout"), |
|
366 | self.ui.configint(b"ui", b"timeout"), | |
368 | desc=desc, |
|
367 | desc=desc, | |
369 | ) |
|
368 | ) | |
370 | self.ui.warn(_(b"got lock after %s seconds\n") % l.delay) |
|
369 | self.ui.warn(_(b"got lock after %s seconds\n") % l.delay) | |
371 | self._lockref = weakref.ref(l) |
|
370 | self._lockref = weakref.ref(l) | |
372 | return l |
|
371 | return l | |
373 |
|
372 | |||
374 | def record(self, namespace, name, oldhashes, newhashes): |
|
373 | def record(self, namespace, name, oldhashes, newhashes): | |
375 | """Record a new journal entry |
|
374 | """Record a new journal entry | |
376 |
|
375 | |||
377 | * namespace: an opaque string; this can be used to filter on the type |
|
376 | * namespace: an opaque string; this can be used to filter on the type | |
378 | of recorded entries. |
|
377 | of recorded entries. | |
379 | * name: the name defining this entry; for bookmarks, this is the |
|
378 | * name: the name defining this entry; for bookmarks, this is the | |
380 | bookmark name. Can be filtered on when retrieving entries. |
|
379 | bookmark name. Can be filtered on when retrieving entries. | |
381 | * oldhashes and newhashes: each a single binary hash, or a list of |
|
380 | * oldhashes and newhashes: each a single binary hash, or a list of | |
382 | binary hashes. These represent the old and new position of the named |
|
381 | binary hashes. These represent the old and new position of the named | |
383 | item. |
|
382 | item. | |
384 |
|
383 | |||
385 | """ |
|
384 | """ | |
386 | if not isinstance(oldhashes, list): |
|
385 | if not isinstance(oldhashes, list): | |
387 | oldhashes = [oldhashes] |
|
386 | oldhashes = [oldhashes] | |
388 | if not isinstance(newhashes, list): |
|
387 | if not isinstance(newhashes, list): | |
389 | newhashes = [newhashes] |
|
388 | newhashes = [newhashes] | |
390 |
|
389 | |||
391 | entry = journalentry( |
|
390 | entry = journalentry( | |
392 | dateutil.makedate(), |
|
391 | dateutil.makedate(), | |
393 | self.user, |
|
392 | self.user, | |
394 | self.command, |
|
393 | self.command, | |
395 | namespace, |
|
394 | namespace, | |
396 | name, |
|
395 | name, | |
397 | oldhashes, |
|
396 | oldhashes, | |
398 | newhashes, |
|
397 | newhashes, | |
399 | ) |
|
398 | ) | |
400 |
|
399 | |||
401 | vfs = self.vfs |
|
400 | vfs = self.vfs | |
402 | if self.sharedvfs is not None: |
|
401 | if self.sharedvfs is not None: | |
403 | # write to the shared repository if this feature is being |
|
402 | # write to the shared repository if this feature is being | |
404 | # shared between working copies. |
|
403 | # shared between working copies. | |
405 | if sharednamespaces.get(namespace) in self.sharedfeatures: |
|
404 | if sharednamespaces.get(namespace) in self.sharedfeatures: | |
406 | vfs = self.sharedvfs |
|
405 | vfs = self.sharedvfs | |
407 |
|
406 | |||
408 | self._write(vfs, entry) |
|
407 | self._write(vfs, entry) | |
409 |
|
408 | |||
410 | def _write(self, vfs, entry): |
|
409 | def _write(self, vfs, entry): | |
411 | with self.jlock(vfs): |
|
410 | with self.jlock(vfs): | |
412 | # open file in amend mode to ensure it is created if missing |
|
411 | # open file in amend mode to ensure it is created if missing | |
413 | with vfs(b'namejournal', mode=b'a+b') as f: |
|
412 | with vfs(b'namejournal', mode=b'a+b') as f: | |
414 | f.seek(0, os.SEEK_SET) |
|
413 | f.seek(0, os.SEEK_SET) | |
415 | # Read just enough bytes to get a version number (up to 2 |
|
414 | # Read just enough bytes to get a version number (up to 2 | |
416 | # digits plus separator) |
|
415 | # digits plus separator) | |
417 | version = f.read(3).partition(b'\0')[0] |
|
416 | version = f.read(3).partition(b'\0')[0] | |
418 | if version and version != b"%d" % storageversion: |
|
417 | if version and version != b"%d" % storageversion: | |
419 | # different version of the storage. Exit early (and not |
|
418 | # different version of the storage. Exit early (and not | |
420 | # write anything) if this is not a version we can handle or |
|
419 | # write anything) if this is not a version we can handle or | |
421 | # the file is corrupt. In future, perhaps rotate the file |
|
420 | # the file is corrupt. In future, perhaps rotate the file | |
422 | # instead? |
|
421 | # instead? | |
423 | self.ui.warn( |
|
422 | self.ui.warn( | |
424 | _(b"unsupported journal file version '%s'\n") % version |
|
423 | _(b"unsupported journal file version '%s'\n") % version | |
425 | ) |
|
424 | ) | |
426 | return |
|
425 | return | |
427 | if not version: |
|
426 | if not version: | |
428 | # empty file, write version first |
|
427 | # empty file, write version first | |
429 | f.write((b"%d" % storageversion) + b'\0') |
|
428 | f.write((b"%d" % storageversion) + b'\0') | |
430 | f.seek(0, os.SEEK_END) |
|
429 | f.seek(0, os.SEEK_END) | |
431 | f.write(bytes(entry) + b'\0') |
|
430 | f.write(bytes(entry) + b'\0') | |
432 |
|
431 | |||
433 | def filtered(self, namespace=None, name=None): |
|
432 | def filtered(self, namespace=None, name=None): | |
434 | """Yield all journal entries with the given namespace or name |
|
433 | """Yield all journal entries with the given namespace or name | |
435 |
|
434 | |||
436 | Both the namespace and the name are optional; if neither is given all |
|
435 | Both the namespace and the name are optional; if neither is given all | |
437 | entries in the journal are produced. |
|
436 | entries in the journal are produced. | |
438 |
|
437 | |||
439 | Matching supports regular expressions by using the `re:` prefix |
|
438 | Matching supports regular expressions by using the `re:` prefix | |
440 | (use `literal:` to match names or namespaces that start with `re:`) |
|
439 | (use `literal:` to match names or namespaces that start with `re:`) | |
441 |
|
440 | |||
442 | """ |
|
441 | """ | |
443 | if namespace is not None: |
|
442 | if namespace is not None: | |
444 | namespace = stringutil.stringmatcher(namespace)[-1] |
|
443 | namespace = stringutil.stringmatcher(namespace)[-1] | |
445 | if name is not None: |
|
444 | if name is not None: | |
446 | name = stringutil.stringmatcher(name)[-1] |
|
445 | name = stringutil.stringmatcher(name)[-1] | |
447 | for entry in self: |
|
446 | for entry in self: | |
448 | if namespace is not None and not namespace(entry.namespace): |
|
447 | if namespace is not None and not namespace(entry.namespace): | |
449 | continue |
|
448 | continue | |
450 | if name is not None and not name(entry.name): |
|
449 | if name is not None and not name(entry.name): | |
451 | continue |
|
450 | continue | |
452 | yield entry |
|
451 | yield entry | |
453 |
|
452 | |||
454 | def __iter__(self): |
|
453 | def __iter__(self): | |
455 | """Iterate over the storage |
|
454 | """Iterate over the storage | |
456 |
|
455 | |||
457 | Yields journalentry instances for each contained journal record. |
|
456 | Yields journalentry instances for each contained journal record. | |
458 |
|
457 | |||
459 | """ |
|
458 | """ | |
460 | local = self._open(self.vfs) |
|
459 | local = self._open(self.vfs) | |
461 |
|
460 | |||
462 | if self.sharedvfs is None: |
|
461 | if self.sharedvfs is None: | |
463 | return local |
|
462 | return local | |
464 |
|
463 | |||
465 | # iterate over both local and shared entries, but only those |
|
464 | # iterate over both local and shared entries, but only those | |
466 | # shared entries that are among the currently shared features |
|
465 | # shared entries that are among the currently shared features | |
467 | shared = ( |
|
466 | shared = ( | |
468 | e |
|
467 | e | |
469 | for e in self._open(self.sharedvfs) |
|
468 | for e in self._open(self.sharedvfs) | |
470 | if sharednamespaces.get(e.namespace) in self.sharedfeatures |
|
469 | if sharednamespaces.get(e.namespace) in self.sharedfeatures | |
471 | ) |
|
470 | ) | |
472 | return _mergeentriesiter(local, shared) |
|
471 | return _mergeentriesiter(local, shared) | |
473 |
|
472 | |||
474 | def _open(self, vfs, filename=b'namejournal', _newestfirst=True): |
|
473 | def _open(self, vfs, filename=b'namejournal', _newestfirst=True): | |
475 | if not vfs.exists(filename): |
|
474 | if not vfs.exists(filename): | |
476 | return |
|
475 | return | |
477 |
|
476 | |||
478 | with vfs(filename) as f: |
|
477 | with vfs(filename) as f: | |
479 | raw = f.read() |
|
478 | raw = f.read() | |
480 |
|
479 | |||
481 | lines = raw.split(b'\0') |
|
480 | lines = raw.split(b'\0') | |
482 | version = lines and lines[0] |
|
481 | version = lines and lines[0] | |
483 | if version != b"%d" % storageversion: |
|
482 | if version != b"%d" % storageversion: | |
484 | version = version or _(b'not available') |
|
483 | version = version or _(b'not available') | |
485 | raise error.Abort(_(b"unknown journal file version '%s'") % version) |
|
484 | raise error.Abort(_(b"unknown journal file version '%s'") % version) | |
486 |
|
485 | |||
487 | # Skip the first line, it's a version number. Normally we iterate over |
|
486 | # Skip the first line, it's a version number. Normally we iterate over | |
488 | # these in reverse order to list newest first; only when copying across |
|
487 | # these in reverse order to list newest first; only when copying across | |
489 | # a shared storage do we forgo reversing. |
|
488 | # a shared storage do we forgo reversing. | |
490 | lines = lines[1:] |
|
489 | lines = lines[1:] | |
491 | if _newestfirst: |
|
490 | if _newestfirst: | |
492 | lines = reversed(lines) |
|
491 | lines = reversed(lines) | |
493 | for line in lines: |
|
492 | for line in lines: | |
494 | if not line: |
|
493 | if not line: | |
495 | continue |
|
494 | continue | |
496 | yield journalentry.fromstorage(line) |
|
495 | yield journalentry.fromstorage(line) | |
497 |
|
496 | |||
498 |
|
497 | |||
499 | # journal reading |
|
498 | # journal reading | |
500 | # log options that don't make sense for journal |
|
499 | # log options that don't make sense for journal | |
501 | _ignoreopts = (b'no-merges', b'graph') |
|
500 | _ignoreopts = (b'no-merges', b'graph') | |
502 |
|
501 | |||
503 |
|
502 | |||
504 | @command( |
|
503 | @command( | |
505 | b'journal', |
|
504 | b'journal', | |
506 | [ |
|
505 | [ | |
507 | (b'', b'all', None, b'show history for all names'), |
|
506 | (b'', b'all', None, b'show history for all names'), | |
508 | (b'c', b'commits', None, b'show commit metadata'), |
|
507 | (b'c', b'commits', None, b'show commit metadata'), | |
509 | ] |
|
508 | ] | |
510 | + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts], |
|
509 | + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts], | |
511 | b'[OPTION]... [BOOKMARKNAME]', |
|
510 | b'[OPTION]... [BOOKMARKNAME]', | |
512 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
511 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
513 | ) |
|
512 | ) | |
514 | def journal(ui, repo, *args, **opts): |
|
513 | def journal(ui, repo, *args, **opts): | |
515 | """show the previous position of bookmarks and the working copy |
|
514 | """show the previous position of bookmarks and the working copy | |
516 |
|
515 | |||
517 | The journal is used to see the previous commits that bookmarks and the |
|
516 | The journal is used to see the previous commits that bookmarks and the | |
518 | working copy pointed to. By default the previous locations for the working |
|
517 | working copy pointed to. By default the previous locations for the working | |
519 | copy. Passing a bookmark name will show all the previous positions of |
|
518 | copy. Passing a bookmark name will show all the previous positions of | |
520 | that bookmark. Use the --all switch to show previous locations for all |
|
519 | that bookmark. Use the --all switch to show previous locations for all | |
521 | bookmarks and the working copy; each line will then include the bookmark |
|
520 | bookmarks and the working copy; each line will then include the bookmark | |
522 | name, or '.' for the working copy, as well. |
|
521 | name, or '.' for the working copy, as well. | |
523 |
|
522 | |||
524 | If `name` starts with `re:`, the remainder of the name is treated as |
|
523 | If `name` starts with `re:`, the remainder of the name is treated as | |
525 | a regular expression. To match a name that actually starts with `re:`, |
|
524 | a regular expression. To match a name that actually starts with `re:`, | |
526 | use the prefix `literal:`. |
|
525 | use the prefix `literal:`. | |
527 |
|
526 | |||
528 | By default hg journal only shows the commit hash and the command that was |
|
527 | By default hg journal only shows the commit hash and the command that was | |
529 | running at that time. -v/--verbose will show the prior hash, the user, and |
|
528 | running at that time. -v/--verbose will show the prior hash, the user, and | |
530 | the time at which it happened. |
|
529 | the time at which it happened. | |
531 |
|
530 | |||
532 | Use -c/--commits to output log information on each commit hash; at this |
|
531 | Use -c/--commits to output log information on each commit hash; at this | |
533 | point you can use the usual `--patch`, `--git`, `--stat` and `--template` |
|
532 | point you can use the usual `--patch`, `--git`, `--stat` and `--template` | |
534 | switches to alter the log output for these. |
|
533 | switches to alter the log output for these. | |
535 |
|
534 | |||
536 | `hg journal -T json` can be used to produce machine readable output. |
|
535 | `hg journal -T json` can be used to produce machine readable output. | |
537 |
|
536 | |||
538 | """ |
|
537 | """ | |
539 | opts = pycompat.byteskwargs(opts) |
|
538 | opts = pycompat.byteskwargs(opts) | |
540 | name = b'.' |
|
539 | name = b'.' | |
541 | if opts.get(b'all'): |
|
540 | if opts.get(b'all'): | |
542 | if args: |
|
541 | if args: | |
543 | raise error.Abort( |
|
542 | raise error.Abort( | |
544 | _(b"You can't combine --all and filtering on a name") |
|
543 | _(b"You can't combine --all and filtering on a name") | |
545 | ) |
|
544 | ) | |
546 | name = None |
|
545 | name = None | |
547 | if args: |
|
546 | if args: | |
548 | name = args[0] |
|
547 | name = args[0] | |
549 |
|
548 | |||
550 | fm = ui.formatter(b'journal', opts) |
|
549 | fm = ui.formatter(b'journal', opts) | |
551 |
|
550 | |||
552 | def formatnodes(nodes): |
|
551 | def formatnodes(nodes): | |
553 | return fm.formatlist(map(fm.hexfunc, nodes), name=b'node', sep=b',') |
|
552 | return fm.formatlist(map(fm.hexfunc, nodes), name=b'node', sep=b',') | |
554 |
|
553 | |||
555 | if opts.get(b"template") != b"json": |
|
554 | if opts.get(b"template") != b"json": | |
556 | if name is None: |
|
555 | if name is None: | |
557 | displayname = _(b'the working copy and bookmarks') |
|
556 | displayname = _(b'the working copy and bookmarks') | |
558 | else: |
|
557 | else: | |
559 | displayname = b"'%s'" % name |
|
558 | displayname = b"'%s'" % name | |
560 | ui.status(_(b"previous locations of %s:\n") % displayname) |
|
559 | ui.status(_(b"previous locations of %s:\n") % displayname) | |
561 |
|
560 | |||
562 | limit = logcmdutil.getlimit(opts) |
|
561 | limit = logcmdutil.getlimit(opts) | |
563 | entry = None |
|
562 | entry = None | |
564 | ui.pager(b'journal') |
|
563 | ui.pager(b'journal') | |
565 | for count, entry in enumerate(repo.journal.filtered(name=name)): |
|
564 | for count, entry in enumerate(repo.journal.filtered(name=name)): | |
566 | if count == limit: |
|
565 | if count == limit: | |
567 | break |
|
566 | break | |
568 |
|
567 | |||
569 | fm.startitem() |
|
568 | fm.startitem() | |
570 | fm.condwrite( |
|
569 | fm.condwrite( | |
571 | ui.verbose, b'oldnodes', b'%s -> ', formatnodes(entry.oldhashes) |
|
570 | ui.verbose, b'oldnodes', b'%s -> ', formatnodes(entry.oldhashes) | |
572 | ) |
|
571 | ) | |
573 | fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes)) |
|
572 | fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes)) | |
574 | fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user) |
|
573 | fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user) | |
575 | fm.condwrite( |
|
574 | fm.condwrite( | |
576 | opts.get(b'all') or name.startswith(b're:'), |
|
575 | opts.get(b'all') or name.startswith(b're:'), | |
577 | b'name', |
|
576 | b'name', | |
578 | b' %-8s', |
|
577 | b' %-8s', | |
579 | entry.name, |
|
578 | entry.name, | |
580 | ) |
|
579 | ) | |
581 |
|
580 | |||
582 | fm.condwrite( |
|
581 | fm.condwrite( | |
583 | ui.verbose, |
|
582 | ui.verbose, | |
584 | b'date', |
|
583 | b'date', | |
585 | b' %s', |
|
584 | b' %s', | |
586 | fm.formatdate(entry.timestamp, b'%Y-%m-%d %H:%M %1%2'), |
|
585 | fm.formatdate(entry.timestamp, b'%Y-%m-%d %H:%M %1%2'), | |
587 | ) |
|
586 | ) | |
588 | fm.write(b'command', b' %s\n', entry.command) |
|
587 | fm.write(b'command', b' %s\n', entry.command) | |
589 |
|
588 | |||
590 | if opts.get(b"commits"): |
|
589 | if opts.get(b"commits"): | |
591 | if fm.isplain(): |
|
590 | if fm.isplain(): | |
592 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
591 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) | |
593 | else: |
|
592 | else: | |
594 | displayer = logcmdutil.changesetformatter( |
|
593 | displayer = logcmdutil.changesetformatter( | |
595 | ui, repo, fm.nested(b'changesets'), diffopts=opts |
|
594 | ui, repo, fm.nested(b'changesets'), diffopts=opts | |
596 | ) |
|
595 | ) | |
597 | for hash in entry.newhashes: |
|
596 | for hash in entry.newhashes: | |
598 | try: |
|
597 | try: | |
599 | ctx = repo[hash] |
|
598 | ctx = repo[hash] | |
600 | displayer.show(ctx) |
|
599 | displayer.show(ctx) | |
601 | except error.RepoLookupError as e: |
|
600 | except error.RepoLookupError as e: | |
602 | fm.plain(b"%s\n\n" % pycompat.bytestr(e)) |
|
601 | fm.plain(b"%s\n\n" % pycompat.bytestr(e)) | |
603 | displayer.close() |
|
602 | displayer.close() | |
604 |
|
603 | |||
605 | fm.end() |
|
604 | fm.end() | |
606 |
|
605 | |||
607 | if entry is None: |
|
606 | if entry is None: | |
608 | ui.status(_(b"no recorded locations\n")) |
|
607 | ui.status(_(b"no recorded locations\n")) |
@@ -1,182 +1,183 b'' | |||||
1 | # Copyright 2009-2010 Gregory P. Ward |
|
1 | # Copyright 2009-2010 Gregory P. Ward | |
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
3 | # Copyright 2010-2011 Fog Creek Software |
|
3 | # Copyright 2010-2011 Fog Creek Software | |
4 | # Copyright 2010-2011 Unity Technologies |
|
4 | # Copyright 2010-2011 Unity Technologies | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | '''base class for store implementations and store-related utility code''' |
|
9 | '''base class for store implementations and store-related utility code''' | |
10 | from __future__ import absolute_import |
|
10 | from __future__ import absolute_import | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 |
|
13 | |||
14 |
from mercurial import |
|
14 | from mercurial.node import short | |
|
15 | from mercurial import util | |||
15 | from mercurial.utils import ( |
|
16 | from mercurial.utils import ( | |
16 | urlutil, |
|
17 | urlutil, | |
17 | ) |
|
18 | ) | |
18 |
|
19 | |||
19 | from . import lfutil |
|
20 | from . import lfutil | |
20 |
|
21 | |||
21 |
|
22 | |||
22 | class StoreError(Exception): |
|
23 | class StoreError(Exception): | |
23 | """Raised when there is a problem getting files from or putting |
|
24 | """Raised when there is a problem getting files from or putting | |
24 | files to a central store.""" |
|
25 | files to a central store.""" | |
25 |
|
26 | |||
26 | def __init__(self, filename, hash, url, detail): |
|
27 | def __init__(self, filename, hash, url, detail): | |
27 | self.filename = filename |
|
28 | self.filename = filename | |
28 | self.hash = hash |
|
29 | self.hash = hash | |
29 | self.url = url |
|
30 | self.url = url | |
30 | self.detail = detail |
|
31 | self.detail = detail | |
31 |
|
32 | |||
32 | def longmessage(self): |
|
33 | def longmessage(self): | |
33 | return _(b"error getting id %s from url %s for file %s: %s\n") % ( |
|
34 | return _(b"error getting id %s from url %s for file %s: %s\n") % ( | |
34 | self.hash, |
|
35 | self.hash, | |
35 | urlutil.hidepassword(self.url), |
|
36 | urlutil.hidepassword(self.url), | |
36 | self.filename, |
|
37 | self.filename, | |
37 | self.detail, |
|
38 | self.detail, | |
38 | ) |
|
39 | ) | |
39 |
|
40 | |||
40 | def __str__(self): |
|
41 | def __str__(self): | |
41 | return b"%s: %s" % (urlutil.hidepassword(self.url), self.detail) |
|
42 | return b"%s: %s" % (urlutil.hidepassword(self.url), self.detail) | |
42 |
|
43 | |||
43 |
|
44 | |||
44 | class basestore(object): |
|
45 | class basestore(object): | |
45 | def __init__(self, ui, repo, url): |
|
46 | def __init__(self, ui, repo, url): | |
46 | self.ui = ui |
|
47 | self.ui = ui | |
47 | self.repo = repo |
|
48 | self.repo = repo | |
48 | self.url = url |
|
49 | self.url = url | |
49 |
|
50 | |||
50 | def put(self, source, hash): |
|
51 | def put(self, source, hash): | |
51 | '''Put source file into the store so it can be retrieved by hash.''' |
|
52 | '''Put source file into the store so it can be retrieved by hash.''' | |
52 | raise NotImplementedError(b'abstract method') |
|
53 | raise NotImplementedError(b'abstract method') | |
53 |
|
54 | |||
54 | def exists(self, hashes): |
|
55 | def exists(self, hashes): | |
55 | """Check to see if the store contains the given hashes. Given an |
|
56 | """Check to see if the store contains the given hashes. Given an | |
56 | iterable of hashes it returns a mapping from hash to bool.""" |
|
57 | iterable of hashes it returns a mapping from hash to bool.""" | |
57 | raise NotImplementedError(b'abstract method') |
|
58 | raise NotImplementedError(b'abstract method') | |
58 |
|
59 | |||
59 | def get(self, files): |
|
60 | def get(self, files): | |
60 | """Get the specified largefiles from the store and write to local |
|
61 | """Get the specified largefiles from the store and write to local | |
61 | files under repo.root. files is a list of (filename, hash) |
|
62 | files under repo.root. files is a list of (filename, hash) | |
62 | tuples. Return (success, missing), lists of files successfully |
|
63 | tuples. Return (success, missing), lists of files successfully | |
63 | downloaded and those not found in the store. success is a list |
|
64 | downloaded and those not found in the store. success is a list | |
64 | of (filename, hash) tuples; missing is a list of filenames that |
|
65 | of (filename, hash) tuples; missing is a list of filenames that | |
65 | we could not get. (The detailed error message will already have |
|
66 | we could not get. (The detailed error message will already have | |
66 | been presented to the user, so missing is just supplied as a |
|
67 | been presented to the user, so missing is just supplied as a | |
67 | summary.)""" |
|
68 | summary.)""" | |
68 | success = [] |
|
69 | success = [] | |
69 | missing = [] |
|
70 | missing = [] | |
70 | ui = self.ui |
|
71 | ui = self.ui | |
71 |
|
72 | |||
72 | at = 0 |
|
73 | at = 0 | |
73 | available = self.exists({hash for (_filename, hash) in files}) |
|
74 | available = self.exists({hash for (_filename, hash) in files}) | |
74 | with ui.makeprogress( |
|
75 | with ui.makeprogress( | |
75 | _(b'getting largefiles'), unit=_(b'files'), total=len(files) |
|
76 | _(b'getting largefiles'), unit=_(b'files'), total=len(files) | |
76 | ) as progress: |
|
77 | ) as progress: | |
77 | for filename, hash in files: |
|
78 | for filename, hash in files: | |
78 | progress.update(at) |
|
79 | progress.update(at) | |
79 | at += 1 |
|
80 | at += 1 | |
80 | ui.note(_(b'getting %s:%s\n') % (filename, hash)) |
|
81 | ui.note(_(b'getting %s:%s\n') % (filename, hash)) | |
81 |
|
82 | |||
82 | if not available.get(hash): |
|
83 | if not available.get(hash): | |
83 | ui.warn( |
|
84 | ui.warn( | |
84 | _(b'%s: largefile %s not available from %s\n') |
|
85 | _(b'%s: largefile %s not available from %s\n') | |
85 | % (filename, hash, urlutil.hidepassword(self.url)) |
|
86 | % (filename, hash, urlutil.hidepassword(self.url)) | |
86 | ) |
|
87 | ) | |
87 | missing.append(filename) |
|
88 | missing.append(filename) | |
88 | continue |
|
89 | continue | |
89 |
|
90 | |||
90 | if self._gethash(filename, hash): |
|
91 | if self._gethash(filename, hash): | |
91 | success.append((filename, hash)) |
|
92 | success.append((filename, hash)) | |
92 | else: |
|
93 | else: | |
93 | missing.append(filename) |
|
94 | missing.append(filename) | |
94 |
|
95 | |||
95 | return (success, missing) |
|
96 | return (success, missing) | |
96 |
|
97 | |||
97 | def _gethash(self, filename, hash): |
|
98 | def _gethash(self, filename, hash): | |
98 | """Get file with the provided hash and store it in the local repo's |
|
99 | """Get file with the provided hash and store it in the local repo's | |
99 | store and in the usercache. |
|
100 | store and in the usercache. | |
100 | filename is for informational messages only. |
|
101 | filename is for informational messages only. | |
101 | """ |
|
102 | """ | |
102 | util.makedirs(lfutil.storepath(self.repo, b'')) |
|
103 | util.makedirs(lfutil.storepath(self.repo, b'')) | |
103 | storefilename = lfutil.storepath(self.repo, hash) |
|
104 | storefilename = lfutil.storepath(self.repo, hash) | |
104 |
|
105 | |||
105 | tmpname = storefilename + b'.tmp' |
|
106 | tmpname = storefilename + b'.tmp' | |
106 | with util.atomictempfile( |
|
107 | with util.atomictempfile( | |
107 | tmpname, createmode=self.repo.store.createmode |
|
108 | tmpname, createmode=self.repo.store.createmode | |
108 | ) as tmpfile: |
|
109 | ) as tmpfile: | |
109 | try: |
|
110 | try: | |
110 | gothash = self._getfile(tmpfile, filename, hash) |
|
111 | gothash = self._getfile(tmpfile, filename, hash) | |
111 | except StoreError as err: |
|
112 | except StoreError as err: | |
112 | self.ui.warn(err.longmessage()) |
|
113 | self.ui.warn(err.longmessage()) | |
113 | gothash = b"" |
|
114 | gothash = b"" | |
114 |
|
115 | |||
115 | if gothash != hash: |
|
116 | if gothash != hash: | |
116 | if gothash != b"": |
|
117 | if gothash != b"": | |
117 | self.ui.warn( |
|
118 | self.ui.warn( | |
118 | _(b'%s: data corruption (expected %s, got %s)\n') |
|
119 | _(b'%s: data corruption (expected %s, got %s)\n') | |
119 | % (filename, hash, gothash) |
|
120 | % (filename, hash, gothash) | |
120 | ) |
|
121 | ) | |
121 | util.unlink(tmpname) |
|
122 | util.unlink(tmpname) | |
122 | return False |
|
123 | return False | |
123 |
|
124 | |||
124 | util.rename(tmpname, storefilename) |
|
125 | util.rename(tmpname, storefilename) | |
125 | lfutil.linktousercache(self.repo, hash) |
|
126 | lfutil.linktousercache(self.repo, hash) | |
126 | return True |
|
127 | return True | |
127 |
|
128 | |||
128 | def verify(self, revs, contents=False): |
|
129 | def verify(self, revs, contents=False): | |
129 | """Verify the existence (and, optionally, contents) of every big |
|
130 | """Verify the existence (and, optionally, contents) of every big | |
130 | file revision referenced by every changeset in revs. |
|
131 | file revision referenced by every changeset in revs. | |
131 | Return 0 if all is well, non-zero on any errors.""" |
|
132 | Return 0 if all is well, non-zero on any errors.""" | |
132 |
|
133 | |||
133 | self.ui.status( |
|
134 | self.ui.status( | |
134 | _(b'searching %d changesets for largefiles\n') % len(revs) |
|
135 | _(b'searching %d changesets for largefiles\n') % len(revs) | |
135 | ) |
|
136 | ) | |
136 | verified = set() # set of (filename, filenode) tuples |
|
137 | verified = set() # set of (filename, filenode) tuples | |
137 | filestocheck = [] # list of (cset, filename, expectedhash) |
|
138 | filestocheck = [] # list of (cset, filename, expectedhash) | |
138 | for rev in revs: |
|
139 | for rev in revs: | |
139 | cctx = self.repo[rev] |
|
140 | cctx = self.repo[rev] | |
140 |
cset = b"%d:%s" % (cctx.rev(), |
|
141 | cset = b"%d:%s" % (cctx.rev(), short(cctx.node())) | |
141 |
|
142 | |||
142 | for standin in cctx: |
|
143 | for standin in cctx: | |
143 | filename = lfutil.splitstandin(standin) |
|
144 | filename = lfutil.splitstandin(standin) | |
144 | if filename: |
|
145 | if filename: | |
145 | fctx = cctx[standin] |
|
146 | fctx = cctx[standin] | |
146 | key = (filename, fctx.filenode()) |
|
147 | key = (filename, fctx.filenode()) | |
147 | if key not in verified: |
|
148 | if key not in verified: | |
148 | verified.add(key) |
|
149 | verified.add(key) | |
149 | expectedhash = lfutil.readasstandin(fctx) |
|
150 | expectedhash = lfutil.readasstandin(fctx) | |
150 | filestocheck.append((cset, filename, expectedhash)) |
|
151 | filestocheck.append((cset, filename, expectedhash)) | |
151 |
|
152 | |||
152 | failed = self._verifyfiles(contents, filestocheck) |
|
153 | failed = self._verifyfiles(contents, filestocheck) | |
153 |
|
154 | |||
154 | numrevs = len(verified) |
|
155 | numrevs = len(verified) | |
155 | numlfiles = len({fname for (fname, fnode) in verified}) |
|
156 | numlfiles = len({fname for (fname, fnode) in verified}) | |
156 | if contents: |
|
157 | if contents: | |
157 | self.ui.status( |
|
158 | self.ui.status( | |
158 | _(b'verified contents of %d revisions of %d largefiles\n') |
|
159 | _(b'verified contents of %d revisions of %d largefiles\n') | |
159 | % (numrevs, numlfiles) |
|
160 | % (numrevs, numlfiles) | |
160 | ) |
|
161 | ) | |
161 | else: |
|
162 | else: | |
162 | self.ui.status( |
|
163 | self.ui.status( | |
163 | _(b'verified existence of %d revisions of %d largefiles\n') |
|
164 | _(b'verified existence of %d revisions of %d largefiles\n') | |
164 | % (numrevs, numlfiles) |
|
165 | % (numrevs, numlfiles) | |
165 | ) |
|
166 | ) | |
166 | return int(failed) |
|
167 | return int(failed) | |
167 |
|
168 | |||
168 | def _getfile(self, tmpfile, filename, hash): |
|
169 | def _getfile(self, tmpfile, filename, hash): | |
169 | """Fetch one revision of one file from the store and write it |
|
170 | """Fetch one revision of one file from the store and write it | |
170 | to tmpfile. Compute the hash of the file on-the-fly as it |
|
171 | to tmpfile. Compute the hash of the file on-the-fly as it | |
171 | downloads and return the hash. Close tmpfile. Raise |
|
172 | downloads and return the hash. Close tmpfile. Raise | |
172 | StoreError if unable to download the file (e.g. it does not |
|
173 | StoreError if unable to download the file (e.g. it does not | |
173 | exist in the store).""" |
|
174 | exist in the store).""" | |
174 | raise NotImplementedError(b'abstract method') |
|
175 | raise NotImplementedError(b'abstract method') | |
175 |
|
176 | |||
176 | def _verifyfiles(self, contents, filestocheck): |
|
177 | def _verifyfiles(self, contents, filestocheck): | |
177 | """Perform the actual verification of files in the store. |
|
178 | """Perform the actual verification of files in the store. | |
178 | 'contents' controls verification of content hash. |
|
179 | 'contents' controls verification of content hash. | |
179 | 'filestocheck' is list of files to check. |
|
180 | 'filestocheck' is list of files to check. | |
180 | Returns _true_ if any problems are found! |
|
181 | Returns _true_ if any problems are found! | |
181 | """ |
|
182 | """ | |
182 | raise NotImplementedError(b'abstract method') |
|
183 | raise NotImplementedError(b'abstract method') |
@@ -1,668 +1,667 b'' | |||||
1 | # Copyright 2009-2010 Gregory P. Ward |
|
1 | # Copyright 2009-2010 Gregory P. Ward | |
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
3 | # Copyright 2010-2011 Fog Creek Software |
|
3 | # Copyright 2010-2011 Fog Creek Software | |
4 | # Copyright 2010-2011 Unity Technologies |
|
4 | # Copyright 2010-2011 Unity Technologies | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | '''High-level command function for lfconvert, plus the cmdtable.''' |
|
9 | '''High-level command function for lfconvert, plus the cmdtable.''' | |
10 | from __future__ import absolute_import |
|
10 | from __future__ import absolute_import | |
11 |
|
11 | |||
12 | import errno |
|
12 | import errno | |
13 | import os |
|
13 | import os | |
14 | import shutil |
|
14 | import shutil | |
15 |
|
15 | |||
16 | from mercurial.i18n import _ |
|
16 | from mercurial.i18n import _ | |
17 | from mercurial.node import ( |
|
17 | from mercurial.node import ( | |
18 | bin, |
|
18 | bin, | |
19 | hex, |
|
19 | hex, | |
20 | nullid, |
|
|||
21 | ) |
|
20 | ) | |
22 |
|
21 | |||
23 | from mercurial import ( |
|
22 | from mercurial import ( | |
24 | cmdutil, |
|
23 | cmdutil, | |
25 | context, |
|
24 | context, | |
26 | error, |
|
25 | error, | |
27 | exthelper, |
|
26 | exthelper, | |
28 | hg, |
|
27 | hg, | |
29 | lock, |
|
28 | lock, | |
30 | match as matchmod, |
|
29 | match as matchmod, | |
31 | pycompat, |
|
30 | pycompat, | |
32 | scmutil, |
|
31 | scmutil, | |
33 | util, |
|
32 | util, | |
34 | ) |
|
33 | ) | |
35 | from mercurial.utils import hashutil |
|
34 | from mercurial.utils import hashutil | |
36 |
|
35 | |||
37 | from ..convert import ( |
|
36 | from ..convert import ( | |
38 | convcmd, |
|
37 | convcmd, | |
39 | filemap, |
|
38 | filemap, | |
40 | ) |
|
39 | ) | |
41 |
|
40 | |||
42 | from . import lfutil, storefactory |
|
41 | from . import lfutil, storefactory | |
43 |
|
42 | |||
44 | release = lock.release |
|
43 | release = lock.release | |
45 |
|
44 | |||
46 | # -- Commands ---------------------------------------------------------- |
|
45 | # -- Commands ---------------------------------------------------------- | |
47 |
|
46 | |||
48 | eh = exthelper.exthelper() |
|
47 | eh = exthelper.exthelper() | |
49 |
|
48 | |||
50 |
|
49 | |||
51 | @eh.command( |
|
50 | @eh.command( | |
52 | b'lfconvert', |
|
51 | b'lfconvert', | |
53 | [ |
|
52 | [ | |
54 | ( |
|
53 | ( | |
55 | b's', |
|
54 | b's', | |
56 | b'size', |
|
55 | b'size', | |
57 | b'', |
|
56 | b'', | |
58 | _(b'minimum size (MB) for files to be converted as largefiles'), |
|
57 | _(b'minimum size (MB) for files to be converted as largefiles'), | |
59 | b'SIZE', |
|
58 | b'SIZE', | |
60 | ), |
|
59 | ), | |
61 | ( |
|
60 | ( | |
62 | b'', |
|
61 | b'', | |
63 | b'to-normal', |
|
62 | b'to-normal', | |
64 | False, |
|
63 | False, | |
65 | _(b'convert from a largefiles repo to a normal repo'), |
|
64 | _(b'convert from a largefiles repo to a normal repo'), | |
66 | ), |
|
65 | ), | |
67 | ], |
|
66 | ], | |
68 | _(b'hg lfconvert SOURCE DEST [FILE ...]'), |
|
67 | _(b'hg lfconvert SOURCE DEST [FILE ...]'), | |
69 | norepo=True, |
|
68 | norepo=True, | |
70 | inferrepo=True, |
|
69 | inferrepo=True, | |
71 | ) |
|
70 | ) | |
72 | def lfconvert(ui, src, dest, *pats, **opts): |
|
71 | def lfconvert(ui, src, dest, *pats, **opts): | |
73 | """convert a normal repository to a largefiles repository |
|
72 | """convert a normal repository to a largefiles repository | |
74 |
|
73 | |||
75 | Convert repository SOURCE to a new repository DEST, identical to |
|
74 | Convert repository SOURCE to a new repository DEST, identical to | |
76 | SOURCE except that certain files will be converted as largefiles: |
|
75 | SOURCE except that certain files will be converted as largefiles: | |
77 | specifically, any file that matches any PATTERN *or* whose size is |
|
76 | specifically, any file that matches any PATTERN *or* whose size is | |
78 | above the minimum size threshold is converted as a largefile. The |
|
77 | above the minimum size threshold is converted as a largefile. The | |
79 | size used to determine whether or not to track a file as a |
|
78 | size used to determine whether or not to track a file as a | |
80 | largefile is the size of the first version of the file. The |
|
79 | largefile is the size of the first version of the file. The | |
81 | minimum size can be specified either with --size or in |
|
80 | minimum size can be specified either with --size or in | |
82 | configuration as ``largefiles.size``. |
|
81 | configuration as ``largefiles.size``. | |
83 |
|
82 | |||
84 | After running this command you will need to make sure that |
|
83 | After running this command you will need to make sure that | |
85 | largefiles is enabled anywhere you intend to push the new |
|
84 | largefiles is enabled anywhere you intend to push the new | |
86 | repository. |
|
85 | repository. | |
87 |
|
86 | |||
88 | Use --to-normal to convert largefiles back to normal files; after |
|
87 | Use --to-normal to convert largefiles back to normal files; after | |
89 | this, the DEST repository can be used without largefiles at all.""" |
|
88 | this, the DEST repository can be used without largefiles at all.""" | |
90 |
|
89 | |||
91 | opts = pycompat.byteskwargs(opts) |
|
90 | opts = pycompat.byteskwargs(opts) | |
92 | if opts[b'to_normal']: |
|
91 | if opts[b'to_normal']: | |
93 | tolfile = False |
|
92 | tolfile = False | |
94 | else: |
|
93 | else: | |
95 | tolfile = True |
|
94 | tolfile = True | |
96 | size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None) |
|
95 | size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None) | |
97 |
|
96 | |||
98 | if not hg.islocal(src): |
|
97 | if not hg.islocal(src): | |
99 | raise error.Abort(_(b'%s is not a local Mercurial repo') % src) |
|
98 | raise error.Abort(_(b'%s is not a local Mercurial repo') % src) | |
100 | if not hg.islocal(dest): |
|
99 | if not hg.islocal(dest): | |
101 | raise error.Abort(_(b'%s is not a local Mercurial repo') % dest) |
|
100 | raise error.Abort(_(b'%s is not a local Mercurial repo') % dest) | |
102 |
|
101 | |||
103 | rsrc = hg.repository(ui, src) |
|
102 | rsrc = hg.repository(ui, src) | |
104 | ui.status(_(b'initializing destination %s\n') % dest) |
|
103 | ui.status(_(b'initializing destination %s\n') % dest) | |
105 | rdst = hg.repository(ui, dest, create=True) |
|
104 | rdst = hg.repository(ui, dest, create=True) | |
106 |
|
105 | |||
107 | success = False |
|
106 | success = False | |
108 | dstwlock = dstlock = None |
|
107 | dstwlock = dstlock = None | |
109 | try: |
|
108 | try: | |
110 | # Get a list of all changesets in the source. The easy way to do this |
|
109 | # Get a list of all changesets in the source. The easy way to do this | |
111 | # is to simply walk the changelog, using changelog.nodesbetween(). |
|
110 | # is to simply walk the changelog, using changelog.nodesbetween(). | |
112 | # Take a look at mercurial/revlog.py:639 for more details. |
|
111 | # Take a look at mercurial/revlog.py:639 for more details. | |
113 | # Use a generator instead of a list to decrease memory usage |
|
112 | # Use a generator instead of a list to decrease memory usage | |
114 | ctxs = ( |
|
113 | ctxs = ( | |
115 | rsrc[ctx] |
|
114 | rsrc[ctx] | |
116 | for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0] |
|
115 | for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0] | |
117 | ) |
|
116 | ) | |
118 | revmap = {nullid: nullid} |
|
117 | revmap = {rsrc.nullid: rdst.nullid} | |
119 | if tolfile: |
|
118 | if tolfile: | |
120 | # Lock destination to prevent modification while it is converted to. |
|
119 | # Lock destination to prevent modification while it is converted to. | |
121 | # Don't need to lock src because we are just reading from its |
|
120 | # Don't need to lock src because we are just reading from its | |
122 | # history which can't change. |
|
121 | # history which can't change. | |
123 | dstwlock = rdst.wlock() |
|
122 | dstwlock = rdst.wlock() | |
124 | dstlock = rdst.lock() |
|
123 | dstlock = rdst.lock() | |
125 |
|
124 | |||
126 | lfiles = set() |
|
125 | lfiles = set() | |
127 | normalfiles = set() |
|
126 | normalfiles = set() | |
128 | if not pats: |
|
127 | if not pats: | |
129 | pats = ui.configlist(lfutil.longname, b'patterns') |
|
128 | pats = ui.configlist(lfutil.longname, b'patterns') | |
130 | if pats: |
|
129 | if pats: | |
131 | matcher = matchmod.match(rsrc.root, b'', list(pats)) |
|
130 | matcher = matchmod.match(rsrc.root, b'', list(pats)) | |
132 | else: |
|
131 | else: | |
133 | matcher = None |
|
132 | matcher = None | |
134 |
|
133 | |||
135 | lfiletohash = {} |
|
134 | lfiletohash = {} | |
136 | with ui.makeprogress( |
|
135 | with ui.makeprogress( | |
137 | _(b'converting revisions'), |
|
136 | _(b'converting revisions'), | |
138 | unit=_(b'revisions'), |
|
137 | unit=_(b'revisions'), | |
139 | total=rsrc[b'tip'].rev(), |
|
138 | total=rsrc[b'tip'].rev(), | |
140 | ) as progress: |
|
139 | ) as progress: | |
141 | for ctx in ctxs: |
|
140 | for ctx in ctxs: | |
142 | progress.update(ctx.rev()) |
|
141 | progress.update(ctx.rev()) | |
143 | _lfconvert_addchangeset( |
|
142 | _lfconvert_addchangeset( | |
144 | rsrc, |
|
143 | rsrc, | |
145 | rdst, |
|
144 | rdst, | |
146 | ctx, |
|
145 | ctx, | |
147 | revmap, |
|
146 | revmap, | |
148 | lfiles, |
|
147 | lfiles, | |
149 | normalfiles, |
|
148 | normalfiles, | |
150 | matcher, |
|
149 | matcher, | |
151 | size, |
|
150 | size, | |
152 | lfiletohash, |
|
151 | lfiletohash, | |
153 | ) |
|
152 | ) | |
154 |
|
153 | |||
155 | if rdst.wvfs.exists(lfutil.shortname): |
|
154 | if rdst.wvfs.exists(lfutil.shortname): | |
156 | rdst.wvfs.rmtree(lfutil.shortname) |
|
155 | rdst.wvfs.rmtree(lfutil.shortname) | |
157 |
|
156 | |||
158 | for f in lfiletohash.keys(): |
|
157 | for f in lfiletohash.keys(): | |
159 | if rdst.wvfs.isfile(f): |
|
158 | if rdst.wvfs.isfile(f): | |
160 | rdst.wvfs.unlink(f) |
|
159 | rdst.wvfs.unlink(f) | |
161 | try: |
|
160 | try: | |
162 | rdst.wvfs.removedirs(rdst.wvfs.dirname(f)) |
|
161 | rdst.wvfs.removedirs(rdst.wvfs.dirname(f)) | |
163 | except OSError: |
|
162 | except OSError: | |
164 | pass |
|
163 | pass | |
165 |
|
164 | |||
166 | # If there were any files converted to largefiles, add largefiles |
|
165 | # If there were any files converted to largefiles, add largefiles | |
167 | # to the destination repository's requirements. |
|
166 | # to the destination repository's requirements. | |
168 | if lfiles: |
|
167 | if lfiles: | |
169 | rdst.requirements.add(b'largefiles') |
|
168 | rdst.requirements.add(b'largefiles') | |
170 | scmutil.writereporequirements(rdst) |
|
169 | scmutil.writereporequirements(rdst) | |
171 | else: |
|
170 | else: | |
172 |
|
171 | |||
173 | class lfsource(filemap.filemap_source): |
|
172 | class lfsource(filemap.filemap_source): | |
174 | def __init__(self, ui, source): |
|
173 | def __init__(self, ui, source): | |
175 | super(lfsource, self).__init__(ui, source, None) |
|
174 | super(lfsource, self).__init__(ui, source, None) | |
176 | self.filemapper.rename[lfutil.shortname] = b'.' |
|
175 | self.filemapper.rename[lfutil.shortname] = b'.' | |
177 |
|
176 | |||
178 | def getfile(self, name, rev): |
|
177 | def getfile(self, name, rev): | |
179 | realname, realrev = rev |
|
178 | realname, realrev = rev | |
180 | f = super(lfsource, self).getfile(name, rev) |
|
179 | f = super(lfsource, self).getfile(name, rev) | |
181 |
|
180 | |||
182 | if ( |
|
181 | if ( | |
183 | not realname.startswith(lfutil.shortnameslash) |
|
182 | not realname.startswith(lfutil.shortnameslash) | |
184 | or f[0] is None |
|
183 | or f[0] is None | |
185 | ): |
|
184 | ): | |
186 | return f |
|
185 | return f | |
187 |
|
186 | |||
188 | # Substitute in the largefile data for the hash |
|
187 | # Substitute in the largefile data for the hash | |
189 | hash = f[0].strip() |
|
188 | hash = f[0].strip() | |
190 | path = lfutil.findfile(rsrc, hash) |
|
189 | path = lfutil.findfile(rsrc, hash) | |
191 |
|
190 | |||
192 | if path is None: |
|
191 | if path is None: | |
193 | raise error.Abort( |
|
192 | raise error.Abort( | |
194 | _(b"missing largefile for '%s' in %s") |
|
193 | _(b"missing largefile for '%s' in %s") | |
195 | % (realname, realrev) |
|
194 | % (realname, realrev) | |
196 | ) |
|
195 | ) | |
197 | return util.readfile(path), f[1] |
|
196 | return util.readfile(path), f[1] | |
198 |
|
197 | |||
199 | class converter(convcmd.converter): |
|
198 | class converter(convcmd.converter): | |
200 | def __init__(self, ui, source, dest, revmapfile, opts): |
|
199 | def __init__(self, ui, source, dest, revmapfile, opts): | |
201 | src = lfsource(ui, source) |
|
200 | src = lfsource(ui, source) | |
202 |
|
201 | |||
203 | super(converter, self).__init__( |
|
202 | super(converter, self).__init__( | |
204 | ui, src, dest, revmapfile, opts |
|
203 | ui, src, dest, revmapfile, opts | |
205 | ) |
|
204 | ) | |
206 |
|
205 | |||
207 | found, missing = downloadlfiles(ui, rsrc) |
|
206 | found, missing = downloadlfiles(ui, rsrc) | |
208 | if missing != 0: |
|
207 | if missing != 0: | |
209 | raise error.Abort(_(b"all largefiles must be present locally")) |
|
208 | raise error.Abort(_(b"all largefiles must be present locally")) | |
210 |
|
209 | |||
211 | orig = convcmd.converter |
|
210 | orig = convcmd.converter | |
212 | convcmd.converter = converter |
|
211 | convcmd.converter = converter | |
213 |
|
212 | |||
214 | try: |
|
213 | try: | |
215 | convcmd.convert( |
|
214 | convcmd.convert( | |
216 | ui, src, dest, source_type=b'hg', dest_type=b'hg' |
|
215 | ui, src, dest, source_type=b'hg', dest_type=b'hg' | |
217 | ) |
|
216 | ) | |
218 | finally: |
|
217 | finally: | |
219 | convcmd.converter = orig |
|
218 | convcmd.converter = orig | |
220 | success = True |
|
219 | success = True | |
221 | finally: |
|
220 | finally: | |
222 | if tolfile: |
|
221 | if tolfile: | |
223 | rdst.dirstate.clear() |
|
222 | rdst.dirstate.clear() | |
224 | release(dstlock, dstwlock) |
|
223 | release(dstlock, dstwlock) | |
225 | if not success: |
|
224 | if not success: | |
226 | # we failed, remove the new directory |
|
225 | # we failed, remove the new directory | |
227 | shutil.rmtree(rdst.root) |
|
226 | shutil.rmtree(rdst.root) | |
228 |
|
227 | |||
229 |
|
228 | |||
230 | def _lfconvert_addchangeset( |
|
229 | def _lfconvert_addchangeset( | |
231 | rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash |
|
230 | rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash | |
232 | ): |
|
231 | ): | |
233 | # Convert src parents to dst parents |
|
232 | # Convert src parents to dst parents | |
234 | parents = _convertparents(ctx, revmap) |
|
233 | parents = _convertparents(ctx, revmap) | |
235 |
|
234 | |||
236 | # Generate list of changed files |
|
235 | # Generate list of changed files | |
237 | files = _getchangedfiles(ctx, parents) |
|
236 | files = _getchangedfiles(ctx, parents) | |
238 |
|
237 | |||
239 | dstfiles = [] |
|
238 | dstfiles = [] | |
240 | for f in files: |
|
239 | for f in files: | |
241 | if f not in lfiles and f not in normalfiles: |
|
240 | if f not in lfiles and f not in normalfiles: | |
242 | islfile = _islfile(f, ctx, matcher, size) |
|
241 | islfile = _islfile(f, ctx, matcher, size) | |
243 | # If this file was renamed or copied then copy |
|
242 | # If this file was renamed or copied then copy | |
244 | # the largefile-ness of its predecessor |
|
243 | # the largefile-ness of its predecessor | |
245 | if f in ctx.manifest(): |
|
244 | if f in ctx.manifest(): | |
246 | fctx = ctx.filectx(f) |
|
245 | fctx = ctx.filectx(f) | |
247 | renamed = fctx.copysource() |
|
246 | renamed = fctx.copysource() | |
248 | if renamed is None: |
|
247 | if renamed is None: | |
249 | # the code below assumes renamed to be a boolean or a list |
|
248 | # the code below assumes renamed to be a boolean or a list | |
250 | # and won't quite work with the value None |
|
249 | # and won't quite work with the value None | |
251 | renamed = False |
|
250 | renamed = False | |
252 | renamedlfile = renamed and renamed in lfiles |
|
251 | renamedlfile = renamed and renamed in lfiles | |
253 | islfile |= renamedlfile |
|
252 | islfile |= renamedlfile | |
254 | if b'l' in fctx.flags(): |
|
253 | if b'l' in fctx.flags(): | |
255 | if renamedlfile: |
|
254 | if renamedlfile: | |
256 | raise error.Abort( |
|
255 | raise error.Abort( | |
257 | _(b'renamed/copied largefile %s becomes symlink') |
|
256 | _(b'renamed/copied largefile %s becomes symlink') | |
258 | % f |
|
257 | % f | |
259 | ) |
|
258 | ) | |
260 | islfile = False |
|
259 | islfile = False | |
261 | if islfile: |
|
260 | if islfile: | |
262 | lfiles.add(f) |
|
261 | lfiles.add(f) | |
263 | else: |
|
262 | else: | |
264 | normalfiles.add(f) |
|
263 | normalfiles.add(f) | |
265 |
|
264 | |||
266 | if f in lfiles: |
|
265 | if f in lfiles: | |
267 | fstandin = lfutil.standin(f) |
|
266 | fstandin = lfutil.standin(f) | |
268 | dstfiles.append(fstandin) |
|
267 | dstfiles.append(fstandin) | |
269 | # largefile in manifest if it has not been removed/renamed |
|
268 | # largefile in manifest if it has not been removed/renamed | |
270 | if f in ctx.manifest(): |
|
269 | if f in ctx.manifest(): | |
271 | fctx = ctx.filectx(f) |
|
270 | fctx = ctx.filectx(f) | |
272 | if b'l' in fctx.flags(): |
|
271 | if b'l' in fctx.flags(): | |
273 | renamed = fctx.copysource() |
|
272 | renamed = fctx.copysource() | |
274 | if renamed and renamed in lfiles: |
|
273 | if renamed and renamed in lfiles: | |
275 | raise error.Abort( |
|
274 | raise error.Abort( | |
276 | _(b'largefile %s becomes symlink') % f |
|
275 | _(b'largefile %s becomes symlink') % f | |
277 | ) |
|
276 | ) | |
278 |
|
277 | |||
279 | # largefile was modified, update standins |
|
278 | # largefile was modified, update standins | |
280 | m = hashutil.sha1(b'') |
|
279 | m = hashutil.sha1(b'') | |
281 | m.update(ctx[f].data()) |
|
280 | m.update(ctx[f].data()) | |
282 | hash = hex(m.digest()) |
|
281 | hash = hex(m.digest()) | |
283 | if f not in lfiletohash or lfiletohash[f] != hash: |
|
282 | if f not in lfiletohash or lfiletohash[f] != hash: | |
284 | rdst.wwrite(f, ctx[f].data(), ctx[f].flags()) |
|
283 | rdst.wwrite(f, ctx[f].data(), ctx[f].flags()) | |
285 | executable = b'x' in ctx[f].flags() |
|
284 | executable = b'x' in ctx[f].flags() | |
286 | lfutil.writestandin(rdst, fstandin, hash, executable) |
|
285 | lfutil.writestandin(rdst, fstandin, hash, executable) | |
287 | lfiletohash[f] = hash |
|
286 | lfiletohash[f] = hash | |
288 | else: |
|
287 | else: | |
289 | # normal file |
|
288 | # normal file | |
290 | dstfiles.append(f) |
|
289 | dstfiles.append(f) | |
291 |
|
290 | |||
292 | def getfilectx(repo, memctx, f): |
|
291 | def getfilectx(repo, memctx, f): | |
293 | srcfname = lfutil.splitstandin(f) |
|
292 | srcfname = lfutil.splitstandin(f) | |
294 | if srcfname is not None: |
|
293 | if srcfname is not None: | |
295 | # if the file isn't in the manifest then it was removed |
|
294 | # if the file isn't in the manifest then it was removed | |
296 | # or renamed, return None to indicate this |
|
295 | # or renamed, return None to indicate this | |
297 | try: |
|
296 | try: | |
298 | fctx = ctx.filectx(srcfname) |
|
297 | fctx = ctx.filectx(srcfname) | |
299 | except error.LookupError: |
|
298 | except error.LookupError: | |
300 | return None |
|
299 | return None | |
301 | renamed = fctx.copysource() |
|
300 | renamed = fctx.copysource() | |
302 | if renamed: |
|
301 | if renamed: | |
303 | # standin is always a largefile because largefile-ness |
|
302 | # standin is always a largefile because largefile-ness | |
304 | # doesn't change after rename or copy |
|
303 | # doesn't change after rename or copy | |
305 | renamed = lfutil.standin(renamed) |
|
304 | renamed = lfutil.standin(renamed) | |
306 |
|
305 | |||
307 | return context.memfilectx( |
|
306 | return context.memfilectx( | |
308 | repo, |
|
307 | repo, | |
309 | memctx, |
|
308 | memctx, | |
310 | f, |
|
309 | f, | |
311 | lfiletohash[srcfname] + b'\n', |
|
310 | lfiletohash[srcfname] + b'\n', | |
312 | b'l' in fctx.flags(), |
|
311 | b'l' in fctx.flags(), | |
313 | b'x' in fctx.flags(), |
|
312 | b'x' in fctx.flags(), | |
314 | renamed, |
|
313 | renamed, | |
315 | ) |
|
314 | ) | |
316 | else: |
|
315 | else: | |
317 | return _getnormalcontext(repo, ctx, f, revmap) |
|
316 | return _getnormalcontext(repo, ctx, f, revmap) | |
318 |
|
317 | |||
319 | # Commit |
|
318 | # Commit | |
320 | _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap) |
|
319 | _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap) | |
321 |
|
320 | |||
322 |
|
321 | |||
323 | def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap): |
|
322 | def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap): | |
324 | mctx = context.memctx( |
|
323 | mctx = context.memctx( | |
325 | rdst, |
|
324 | rdst, | |
326 | parents, |
|
325 | parents, | |
327 | ctx.description(), |
|
326 | ctx.description(), | |
328 | dstfiles, |
|
327 | dstfiles, | |
329 | getfilectx, |
|
328 | getfilectx, | |
330 | ctx.user(), |
|
329 | ctx.user(), | |
331 | ctx.date(), |
|
330 | ctx.date(), | |
332 | ctx.extra(), |
|
331 | ctx.extra(), | |
333 | ) |
|
332 | ) | |
334 | ret = rdst.commitctx(mctx) |
|
333 | ret = rdst.commitctx(mctx) | |
335 | lfutil.copyalltostore(rdst, ret) |
|
334 | lfutil.copyalltostore(rdst, ret) | |
336 | rdst.setparents(ret) |
|
335 | rdst.setparents(ret) | |
337 | revmap[ctx.node()] = rdst.changelog.tip() |
|
336 | revmap[ctx.node()] = rdst.changelog.tip() | |
338 |
|
337 | |||
339 |
|
338 | |||
340 | # Generate list of changed files |
|
339 | # Generate list of changed files | |
341 | def _getchangedfiles(ctx, parents): |
|
340 | def _getchangedfiles(ctx, parents): | |
342 | files = set(ctx.files()) |
|
341 | files = set(ctx.files()) | |
343 | if nullid not in parents: |
|
342 | if ctx.repo().nullid not in parents: | |
344 | mc = ctx.manifest() |
|
343 | mc = ctx.manifest() | |
345 | for pctx in ctx.parents(): |
|
344 | for pctx in ctx.parents(): | |
346 | for fn in pctx.manifest().diff(mc): |
|
345 | for fn in pctx.manifest().diff(mc): | |
347 | files.add(fn) |
|
346 | files.add(fn) | |
348 | return files |
|
347 | return files | |
349 |
|
348 | |||
350 |
|
349 | |||
351 | # Convert src parents to dst parents |
|
350 | # Convert src parents to dst parents | |
352 | def _convertparents(ctx, revmap): |
|
351 | def _convertparents(ctx, revmap): | |
353 | parents = [] |
|
352 | parents = [] | |
354 | for p in ctx.parents(): |
|
353 | for p in ctx.parents(): | |
355 | parents.append(revmap[p.node()]) |
|
354 | parents.append(revmap[p.node()]) | |
356 | while len(parents) < 2: |
|
355 | while len(parents) < 2: | |
357 | parents.append(nullid) |
|
356 | parents.append(ctx.repo().nullid) | |
358 | return parents |
|
357 | return parents | |
359 |
|
358 | |||
360 |
|
359 | |||
361 | # Get memfilectx for a normal file |
|
360 | # Get memfilectx for a normal file | |
362 | def _getnormalcontext(repo, ctx, f, revmap): |
|
361 | def _getnormalcontext(repo, ctx, f, revmap): | |
363 | try: |
|
362 | try: | |
364 | fctx = ctx.filectx(f) |
|
363 | fctx = ctx.filectx(f) | |
365 | except error.LookupError: |
|
364 | except error.LookupError: | |
366 | return None |
|
365 | return None | |
367 | renamed = fctx.copysource() |
|
366 | renamed = fctx.copysource() | |
368 |
|
367 | |||
369 | data = fctx.data() |
|
368 | data = fctx.data() | |
370 | if f == b'.hgtags': |
|
369 | if f == b'.hgtags': | |
371 | data = _converttags(repo.ui, revmap, data) |
|
370 | data = _converttags(repo.ui, revmap, data) | |
372 | return context.memfilectx( |
|
371 | return context.memfilectx( | |
373 | repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed |
|
372 | repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed | |
374 | ) |
|
373 | ) | |
375 |
|
374 | |||
376 |
|
375 | |||
377 | # Remap tag data using a revision map |
|
376 | # Remap tag data using a revision map | |
378 | def _converttags(ui, revmap, data): |
|
377 | def _converttags(ui, revmap, data): | |
379 | newdata = [] |
|
378 | newdata = [] | |
380 | for line in data.splitlines(): |
|
379 | for line in data.splitlines(): | |
381 | try: |
|
380 | try: | |
382 | id, name = line.split(b' ', 1) |
|
381 | id, name = line.split(b' ', 1) | |
383 | except ValueError: |
|
382 | except ValueError: | |
384 | ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line) |
|
383 | ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line) | |
385 | continue |
|
384 | continue | |
386 | try: |
|
385 | try: | |
387 | newid = bin(id) |
|
386 | newid = bin(id) | |
388 | except TypeError: |
|
387 | except TypeError: | |
389 | ui.warn(_(b'skipping incorrectly formatted id %s\n') % id) |
|
388 | ui.warn(_(b'skipping incorrectly formatted id %s\n') % id) | |
390 | continue |
|
389 | continue | |
391 | try: |
|
390 | try: | |
392 | newdata.append(b'%s %s\n' % (hex(revmap[newid]), name)) |
|
391 | newdata.append(b'%s %s\n' % (hex(revmap[newid]), name)) | |
393 | except KeyError: |
|
392 | except KeyError: | |
394 | ui.warn(_(b'no mapping for id %s\n') % id) |
|
393 | ui.warn(_(b'no mapping for id %s\n') % id) | |
395 | continue |
|
394 | continue | |
396 | return b''.join(newdata) |
|
395 | return b''.join(newdata) | |
397 |
|
396 | |||
398 |
|
397 | |||
399 | def _islfile(file, ctx, matcher, size): |
|
398 | def _islfile(file, ctx, matcher, size): | |
400 | """Return true if file should be considered a largefile, i.e. |
|
399 | """Return true if file should be considered a largefile, i.e. | |
401 | matcher matches it or it is larger than size.""" |
|
400 | matcher matches it or it is larger than size.""" | |
402 | # never store special .hg* files as largefiles |
|
401 | # never store special .hg* files as largefiles | |
403 | if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs': |
|
402 | if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs': | |
404 | return False |
|
403 | return False | |
405 | if matcher and matcher(file): |
|
404 | if matcher and matcher(file): | |
406 | return True |
|
405 | return True | |
407 | try: |
|
406 | try: | |
408 | return ctx.filectx(file).size() >= size * 1024 * 1024 |
|
407 | return ctx.filectx(file).size() >= size * 1024 * 1024 | |
409 | except error.LookupError: |
|
408 | except error.LookupError: | |
410 | return False |
|
409 | return False | |
411 |
|
410 | |||
412 |
|
411 | |||
413 | def uploadlfiles(ui, rsrc, rdst, files): |
|
412 | def uploadlfiles(ui, rsrc, rdst, files): | |
414 | '''upload largefiles to the central store''' |
|
413 | '''upload largefiles to the central store''' | |
415 |
|
414 | |||
416 | if not files: |
|
415 | if not files: | |
417 | return |
|
416 | return | |
418 |
|
417 | |||
419 | store = storefactory.openstore(rsrc, rdst, put=True) |
|
418 | store = storefactory.openstore(rsrc, rdst, put=True) | |
420 |
|
419 | |||
421 | at = 0 |
|
420 | at = 0 | |
422 | ui.debug(b"sending statlfile command for %d largefiles\n" % len(files)) |
|
421 | ui.debug(b"sending statlfile command for %d largefiles\n" % len(files)) | |
423 | retval = store.exists(files) |
|
422 | retval = store.exists(files) | |
424 | files = [h for h in files if not retval[h]] |
|
423 | files = [h for h in files if not retval[h]] | |
425 | ui.debug(b"%d largefiles need to be uploaded\n" % len(files)) |
|
424 | ui.debug(b"%d largefiles need to be uploaded\n" % len(files)) | |
426 |
|
425 | |||
427 | with ui.makeprogress( |
|
426 | with ui.makeprogress( | |
428 | _(b'uploading largefiles'), unit=_(b'files'), total=len(files) |
|
427 | _(b'uploading largefiles'), unit=_(b'files'), total=len(files) | |
429 | ) as progress: |
|
428 | ) as progress: | |
430 | for hash in files: |
|
429 | for hash in files: | |
431 | progress.update(at) |
|
430 | progress.update(at) | |
432 | source = lfutil.findfile(rsrc, hash) |
|
431 | source = lfutil.findfile(rsrc, hash) | |
433 | if not source: |
|
432 | if not source: | |
434 | raise error.Abort( |
|
433 | raise error.Abort( | |
435 | _( |
|
434 | _( | |
436 | b'largefile %s missing from store' |
|
435 | b'largefile %s missing from store' | |
437 | b' (needs to be uploaded)' |
|
436 | b' (needs to be uploaded)' | |
438 | ) |
|
437 | ) | |
439 | % hash |
|
438 | % hash | |
440 | ) |
|
439 | ) | |
441 | # XXX check for errors here |
|
440 | # XXX check for errors here | |
442 | store.put(source, hash) |
|
441 | store.put(source, hash) | |
443 | at += 1 |
|
442 | at += 1 | |
444 |
|
443 | |||
445 |
|
444 | |||
446 | def verifylfiles(ui, repo, all=False, contents=False): |
|
445 | def verifylfiles(ui, repo, all=False, contents=False): | |
447 | """Verify that every largefile revision in the current changeset |
|
446 | """Verify that every largefile revision in the current changeset | |
448 | exists in the central store. With --contents, also verify that |
|
447 | exists in the central store. With --contents, also verify that | |
449 | the contents of each local largefile file revision are correct (SHA-1 hash |
|
448 | the contents of each local largefile file revision are correct (SHA-1 hash | |
450 | matches the revision ID). With --all, check every changeset in |
|
449 | matches the revision ID). With --all, check every changeset in | |
451 | this repository.""" |
|
450 | this repository.""" | |
452 | if all: |
|
451 | if all: | |
453 | revs = repo.revs(b'all()') |
|
452 | revs = repo.revs(b'all()') | |
454 | else: |
|
453 | else: | |
455 | revs = [b'.'] |
|
454 | revs = [b'.'] | |
456 |
|
455 | |||
457 | store = storefactory.openstore(repo) |
|
456 | store = storefactory.openstore(repo) | |
458 | return store.verify(revs, contents=contents) |
|
457 | return store.verify(revs, contents=contents) | |
459 |
|
458 | |||
460 |
|
459 | |||
461 | def cachelfiles(ui, repo, node, filelist=None): |
|
460 | def cachelfiles(ui, repo, node, filelist=None): | |
462 | """cachelfiles ensures that all largefiles needed by the specified revision |
|
461 | """cachelfiles ensures that all largefiles needed by the specified revision | |
463 | are present in the repository's largefile cache. |
|
462 | are present in the repository's largefile cache. | |
464 |
|
463 | |||
465 | returns a tuple (cached, missing). cached is the list of files downloaded |
|
464 | returns a tuple (cached, missing). cached is the list of files downloaded | |
466 | by this operation; missing is the list of files that were needed but could |
|
465 | by this operation; missing is the list of files that were needed but could | |
467 | not be found.""" |
|
466 | not be found.""" | |
468 | lfiles = lfutil.listlfiles(repo, node) |
|
467 | lfiles = lfutil.listlfiles(repo, node) | |
469 | if filelist: |
|
468 | if filelist: | |
470 | lfiles = set(lfiles) & set(filelist) |
|
469 | lfiles = set(lfiles) & set(filelist) | |
471 | toget = [] |
|
470 | toget = [] | |
472 |
|
471 | |||
473 | ctx = repo[node] |
|
472 | ctx = repo[node] | |
474 | for lfile in lfiles: |
|
473 | for lfile in lfiles: | |
475 | try: |
|
474 | try: | |
476 | expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)]) |
|
475 | expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)]) | |
477 | except IOError as err: |
|
476 | except IOError as err: | |
478 | if err.errno == errno.ENOENT: |
|
477 | if err.errno == errno.ENOENT: | |
479 | continue # node must be None and standin wasn't found in wctx |
|
478 | continue # node must be None and standin wasn't found in wctx | |
480 | raise |
|
479 | raise | |
481 | if not lfutil.findfile(repo, expectedhash): |
|
480 | if not lfutil.findfile(repo, expectedhash): | |
482 | toget.append((lfile, expectedhash)) |
|
481 | toget.append((lfile, expectedhash)) | |
483 |
|
482 | |||
484 | if toget: |
|
483 | if toget: | |
485 | store = storefactory.openstore(repo) |
|
484 | store = storefactory.openstore(repo) | |
486 | ret = store.get(toget) |
|
485 | ret = store.get(toget) | |
487 | return ret |
|
486 | return ret | |
488 |
|
487 | |||
489 | return ([], []) |
|
488 | return ([], []) | |
490 |
|
489 | |||
491 |
|
490 | |||
492 | def downloadlfiles(ui, repo): |
|
491 | def downloadlfiles(ui, repo): | |
493 | tonode = repo.changelog.node |
|
492 | tonode = repo.changelog.node | |
494 | totalsuccess = 0 |
|
493 | totalsuccess = 0 | |
495 | totalmissing = 0 |
|
494 | totalmissing = 0 | |
496 | for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname): |
|
495 | for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname): | |
497 | success, missing = cachelfiles(ui, repo, tonode(rev)) |
|
496 | success, missing = cachelfiles(ui, repo, tonode(rev)) | |
498 | totalsuccess += len(success) |
|
497 | totalsuccess += len(success) | |
499 | totalmissing += len(missing) |
|
498 | totalmissing += len(missing) | |
500 | ui.status(_(b"%d additional largefiles cached\n") % totalsuccess) |
|
499 | ui.status(_(b"%d additional largefiles cached\n") % totalsuccess) | |
501 | if totalmissing > 0: |
|
500 | if totalmissing > 0: | |
502 | ui.status(_(b"%d largefiles failed to download\n") % totalmissing) |
|
501 | ui.status(_(b"%d largefiles failed to download\n") % totalmissing) | |
503 | return totalsuccess, totalmissing |
|
502 | return totalsuccess, totalmissing | |
504 |
|
503 | |||
505 |
|
504 | |||
506 | def updatelfiles( |
|
505 | def updatelfiles( | |
507 | ui, repo, filelist=None, printmessage=None, normallookup=False |
|
506 | ui, repo, filelist=None, printmessage=None, normallookup=False | |
508 | ): |
|
507 | ): | |
509 | """Update largefiles according to standins in the working directory |
|
508 | """Update largefiles according to standins in the working directory | |
510 |
|
509 | |||
511 | If ``printmessage`` is other than ``None``, it means "print (or |
|
510 | If ``printmessage`` is other than ``None``, it means "print (or | |
512 | ignore, for false) message forcibly". |
|
511 | ignore, for false) message forcibly". | |
513 | """ |
|
512 | """ | |
514 | statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) |
|
513 | statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) | |
515 | with repo.wlock(): |
|
514 | with repo.wlock(): | |
516 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
515 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
517 | lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) |
|
516 | lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) | |
518 |
|
517 | |||
519 | if filelist is not None: |
|
518 | if filelist is not None: | |
520 | filelist = set(filelist) |
|
519 | filelist = set(filelist) | |
521 | lfiles = [f for f in lfiles if f in filelist] |
|
520 | lfiles = [f for f in lfiles if f in filelist] | |
522 |
|
521 | |||
523 | update = {} |
|
522 | update = {} | |
524 | dropped = set() |
|
523 | dropped = set() | |
525 | updated, removed = 0, 0 |
|
524 | updated, removed = 0, 0 | |
526 | wvfs = repo.wvfs |
|
525 | wvfs = repo.wvfs | |
527 | wctx = repo[None] |
|
526 | wctx = repo[None] | |
528 | for lfile in lfiles: |
|
527 | for lfile in lfiles: | |
529 | lfileorig = os.path.relpath( |
|
528 | lfileorig = os.path.relpath( | |
530 | scmutil.backuppath(ui, repo, lfile), start=repo.root |
|
529 | scmutil.backuppath(ui, repo, lfile), start=repo.root | |
531 | ) |
|
530 | ) | |
532 | standin = lfutil.standin(lfile) |
|
531 | standin = lfutil.standin(lfile) | |
533 | standinorig = os.path.relpath( |
|
532 | standinorig = os.path.relpath( | |
534 | scmutil.backuppath(ui, repo, standin), start=repo.root |
|
533 | scmutil.backuppath(ui, repo, standin), start=repo.root | |
535 | ) |
|
534 | ) | |
536 | if wvfs.exists(standin): |
|
535 | if wvfs.exists(standin): | |
537 | if wvfs.exists(standinorig) and wvfs.exists(lfile): |
|
536 | if wvfs.exists(standinorig) and wvfs.exists(lfile): | |
538 | shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig)) |
|
537 | shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig)) | |
539 | wvfs.unlinkpath(standinorig) |
|
538 | wvfs.unlinkpath(standinorig) | |
540 | expecthash = lfutil.readasstandin(wctx[standin]) |
|
539 | expecthash = lfutil.readasstandin(wctx[standin]) | |
541 | if expecthash != b'': |
|
540 | if expecthash != b'': | |
542 | if lfile not in wctx: # not switched to normal file |
|
541 | if lfile not in wctx: # not switched to normal file | |
543 | if repo.dirstate[standin] != b'?': |
|
542 | if repo.dirstate[standin] != b'?': | |
544 | wvfs.unlinkpath(lfile, ignoremissing=True) |
|
543 | wvfs.unlinkpath(lfile, ignoremissing=True) | |
545 | else: |
|
544 | else: | |
546 | dropped.add(lfile) |
|
545 | dropped.add(lfile) | |
547 |
|
546 | |||
548 | # use normallookup() to allocate an entry in largefiles |
|
547 | # use normallookup() to allocate an entry in largefiles | |
549 | # dirstate to prevent lfilesrepo.status() from reporting |
|
548 | # dirstate to prevent lfilesrepo.status() from reporting | |
550 | # missing files as removed. |
|
549 | # missing files as removed. | |
551 | lfdirstate.normallookup(lfile) |
|
550 | lfdirstate.normallookup(lfile) | |
552 | update[lfile] = expecthash |
|
551 | update[lfile] = expecthash | |
553 | else: |
|
552 | else: | |
554 | # Remove lfiles for which the standin is deleted, unless the |
|
553 | # Remove lfiles for which the standin is deleted, unless the | |
555 | # lfile is added to the repository again. This happens when a |
|
554 | # lfile is added to the repository again. This happens when a | |
556 | # largefile is converted back to a normal file: the standin |
|
555 | # largefile is converted back to a normal file: the standin | |
557 | # disappears, but a new (normal) file appears as the lfile. |
|
556 | # disappears, but a new (normal) file appears as the lfile. | |
558 | if ( |
|
557 | if ( | |
559 | wvfs.exists(lfile) |
|
558 | wvfs.exists(lfile) | |
560 | and repo.dirstate.normalize(lfile) not in wctx |
|
559 | and repo.dirstate.normalize(lfile) not in wctx | |
561 | ): |
|
560 | ): | |
562 | wvfs.unlinkpath(lfile) |
|
561 | wvfs.unlinkpath(lfile) | |
563 | removed += 1 |
|
562 | removed += 1 | |
564 |
|
563 | |||
565 | # largefile processing might be slow and be interrupted - be prepared |
|
564 | # largefile processing might be slow and be interrupted - be prepared | |
566 | lfdirstate.write() |
|
565 | lfdirstate.write() | |
567 |
|
566 | |||
568 | if lfiles: |
|
567 | if lfiles: | |
569 | lfiles = [f for f in lfiles if f not in dropped] |
|
568 | lfiles = [f for f in lfiles if f not in dropped] | |
570 |
|
569 | |||
571 | for f in dropped: |
|
570 | for f in dropped: | |
572 | repo.wvfs.unlinkpath(lfutil.standin(f)) |
|
571 | repo.wvfs.unlinkpath(lfutil.standin(f)) | |
573 |
|
572 | |||
574 | # This needs to happen for dropped files, otherwise they stay in |
|
573 | # This needs to happen for dropped files, otherwise they stay in | |
575 | # the M state. |
|
574 | # the M state. | |
576 | lfutil.synclfdirstate(repo, lfdirstate, f, normallookup) |
|
575 | lfutil.synclfdirstate(repo, lfdirstate, f, normallookup) | |
577 |
|
576 | |||
578 | statuswriter(_(b'getting changed largefiles\n')) |
|
577 | statuswriter(_(b'getting changed largefiles\n')) | |
579 | cachelfiles(ui, repo, None, lfiles) |
|
578 | cachelfiles(ui, repo, None, lfiles) | |
580 |
|
579 | |||
581 | for lfile in lfiles: |
|
580 | for lfile in lfiles: | |
582 | update1 = 0 |
|
581 | update1 = 0 | |
583 |
|
582 | |||
584 | expecthash = update.get(lfile) |
|
583 | expecthash = update.get(lfile) | |
585 | if expecthash: |
|
584 | if expecthash: | |
586 | if not lfutil.copyfromcache(repo, expecthash, lfile): |
|
585 | if not lfutil.copyfromcache(repo, expecthash, lfile): | |
587 | # failed ... but already removed and set to normallookup |
|
586 | # failed ... but already removed and set to normallookup | |
588 | continue |
|
587 | continue | |
589 | # Synchronize largefile dirstate to the last modified |
|
588 | # Synchronize largefile dirstate to the last modified | |
590 | # time of the file |
|
589 | # time of the file | |
591 | lfdirstate.normal(lfile) |
|
590 | lfdirstate.normal(lfile) | |
592 | update1 = 1 |
|
591 | update1 = 1 | |
593 |
|
592 | |||
594 | # copy the exec mode of largefile standin from the repository's |
|
593 | # copy the exec mode of largefile standin from the repository's | |
595 | # dirstate to its state in the lfdirstate. |
|
594 | # dirstate to its state in the lfdirstate. | |
596 | standin = lfutil.standin(lfile) |
|
595 | standin = lfutil.standin(lfile) | |
597 | if wvfs.exists(standin): |
|
596 | if wvfs.exists(standin): | |
598 | # exec is decided by the users permissions using mask 0o100 |
|
597 | # exec is decided by the users permissions using mask 0o100 | |
599 | standinexec = wvfs.stat(standin).st_mode & 0o100 |
|
598 | standinexec = wvfs.stat(standin).st_mode & 0o100 | |
600 | st = wvfs.stat(lfile) |
|
599 | st = wvfs.stat(lfile) | |
601 | mode = st.st_mode |
|
600 | mode = st.st_mode | |
602 | if standinexec != mode & 0o100: |
|
601 | if standinexec != mode & 0o100: | |
603 | # first remove all X bits, then shift all R bits to X |
|
602 | # first remove all X bits, then shift all R bits to X | |
604 | mode &= ~0o111 |
|
603 | mode &= ~0o111 | |
605 | if standinexec: |
|
604 | if standinexec: | |
606 | mode |= (mode >> 2) & 0o111 & ~util.umask |
|
605 | mode |= (mode >> 2) & 0o111 & ~util.umask | |
607 | wvfs.chmod(lfile, mode) |
|
606 | wvfs.chmod(lfile, mode) | |
608 | update1 = 1 |
|
607 | update1 = 1 | |
609 |
|
608 | |||
610 | updated += update1 |
|
609 | updated += update1 | |
611 |
|
610 | |||
612 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) |
|
611 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) | |
613 |
|
612 | |||
614 | lfdirstate.write() |
|
613 | lfdirstate.write() | |
615 | if lfiles: |
|
614 | if lfiles: | |
616 | statuswriter( |
|
615 | statuswriter( | |
617 | _(b'%d largefiles updated, %d removed\n') % (updated, removed) |
|
616 | _(b'%d largefiles updated, %d removed\n') % (updated, removed) | |
618 | ) |
|
617 | ) | |
619 |
|
618 | |||
620 |
|
619 | |||
621 | @eh.command( |
|
620 | @eh.command( | |
622 | b'lfpull', |
|
621 | b'lfpull', | |
623 | [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))] |
|
622 | [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))] | |
624 | + cmdutil.remoteopts, |
|
623 | + cmdutil.remoteopts, | |
625 | _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'), |
|
624 | _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'), | |
626 | ) |
|
625 | ) | |
627 | def lfpull(ui, repo, source=b"default", **opts): |
|
626 | def lfpull(ui, repo, source=b"default", **opts): | |
628 | """pull largefiles for the specified revisions from the specified source |
|
627 | """pull largefiles for the specified revisions from the specified source | |
629 |
|
628 | |||
630 | Pull largefiles that are referenced from local changesets but missing |
|
629 | Pull largefiles that are referenced from local changesets but missing | |
631 | locally, pulling from a remote repository to the local cache. |
|
630 | locally, pulling from a remote repository to the local cache. | |
632 |
|
631 | |||
633 | If SOURCE is omitted, the 'default' path will be used. |
|
632 | If SOURCE is omitted, the 'default' path will be used. | |
634 | See :hg:`help urls` for more information. |
|
633 | See :hg:`help urls` for more information. | |
635 |
|
634 | |||
636 | .. container:: verbose |
|
635 | .. container:: verbose | |
637 |
|
636 | |||
638 | Some examples: |
|
637 | Some examples: | |
639 |
|
638 | |||
640 | - pull largefiles for all branch heads:: |
|
639 | - pull largefiles for all branch heads:: | |
641 |
|
640 | |||
642 | hg lfpull -r "head() and not closed()" |
|
641 | hg lfpull -r "head() and not closed()" | |
643 |
|
642 | |||
644 | - pull largefiles on the default branch:: |
|
643 | - pull largefiles on the default branch:: | |
645 |
|
644 | |||
646 | hg lfpull -r "branch(default)" |
|
645 | hg lfpull -r "branch(default)" | |
647 | """ |
|
646 | """ | |
648 | repo.lfpullsource = source |
|
647 | repo.lfpullsource = source | |
649 |
|
648 | |||
650 | revs = opts.get('rev', []) |
|
649 | revs = opts.get('rev', []) | |
651 | if not revs: |
|
650 | if not revs: | |
652 | raise error.Abort(_(b'no revisions specified')) |
|
651 | raise error.Abort(_(b'no revisions specified')) | |
653 | revs = scmutil.revrange(repo, revs) |
|
652 | revs = scmutil.revrange(repo, revs) | |
654 |
|
653 | |||
655 | numcached = 0 |
|
654 | numcached = 0 | |
656 | for rev in revs: |
|
655 | for rev in revs: | |
657 | ui.note(_(b'pulling largefiles for revision %d\n') % rev) |
|
656 | ui.note(_(b'pulling largefiles for revision %d\n') % rev) | |
658 | (cached, missing) = cachelfiles(ui, repo, rev) |
|
657 | (cached, missing) = cachelfiles(ui, repo, rev) | |
659 | numcached += len(cached) |
|
658 | numcached += len(cached) | |
660 | ui.status(_(b"%d largefiles cached\n") % numcached) |
|
659 | ui.status(_(b"%d largefiles cached\n") % numcached) | |
661 |
|
660 | |||
662 |
|
661 | |||
663 | @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE')) |
|
662 | @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE')) | |
664 | def debuglfput(ui, repo, filepath, **kwargs): |
|
663 | def debuglfput(ui, repo, filepath, **kwargs): | |
665 | hash = lfutil.hashfile(filepath) |
|
664 | hash = lfutil.hashfile(filepath) | |
666 | storefactory.openstore(repo).put(filepath, hash) |
|
665 | storefactory.openstore(repo).put(filepath, hash) | |
667 | ui.write(b'%s\n' % hash) |
|
666 | ui.write(b'%s\n' % hash) | |
668 | return 0 |
|
667 | return 0 |
@@ -1,784 +1,781 b'' | |||||
1 | # Copyright 2009-2010 Gregory P. Ward |
|
1 | # Copyright 2009-2010 Gregory P. Ward | |
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
3 | # Copyright 2010-2011 Fog Creek Software |
|
3 | # Copyright 2010-2011 Fog Creek Software | |
4 | # Copyright 2010-2011 Unity Technologies |
|
4 | # Copyright 2010-2011 Unity Technologies | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | '''largefiles utility code: must not import other modules in this package.''' |
|
9 | '''largefiles utility code: must not import other modules in this package.''' | |
10 | from __future__ import absolute_import |
|
10 | from __future__ import absolute_import | |
11 |
|
11 | |||
12 | import contextlib |
|
12 | import contextlib | |
13 | import copy |
|
13 | import copy | |
14 | import os |
|
14 | import os | |
15 | import stat |
|
15 | import stat | |
16 |
|
16 | |||
17 | from mercurial.i18n import _ |
|
17 | from mercurial.i18n import _ | |
18 |
from mercurial.node import |
|
18 | from mercurial.node import hex | |
19 | hex, |
|
|||
20 | nullid, |
|
|||
21 | ) |
|
|||
22 | from mercurial.pycompat import open |
|
19 | from mercurial.pycompat import open | |
23 |
|
20 | |||
24 | from mercurial import ( |
|
21 | from mercurial import ( | |
25 | dirstate, |
|
22 | dirstate, | |
26 | encoding, |
|
23 | encoding, | |
27 | error, |
|
24 | error, | |
28 | httpconnection, |
|
25 | httpconnection, | |
29 | match as matchmod, |
|
26 | match as matchmod, | |
30 | pycompat, |
|
27 | pycompat, | |
31 | scmutil, |
|
28 | scmutil, | |
32 | sparse, |
|
29 | sparse, | |
33 | util, |
|
30 | util, | |
34 | vfs as vfsmod, |
|
31 | vfs as vfsmod, | |
35 | ) |
|
32 | ) | |
36 | from mercurial.utils import hashutil |
|
33 | from mercurial.utils import hashutil | |
37 |
|
34 | |||
38 | shortname = b'.hglf' |
|
35 | shortname = b'.hglf' | |
39 | shortnameslash = shortname + b'/' |
|
36 | shortnameslash = shortname + b'/' | |
40 | longname = b'largefiles' |
|
37 | longname = b'largefiles' | |
41 |
|
38 | |||
42 | # -- Private worker functions ------------------------------------------ |
|
39 | # -- Private worker functions ------------------------------------------ | |
43 |
|
40 | |||
44 |
|
41 | |||
45 | @contextlib.contextmanager |
|
42 | @contextlib.contextmanager | |
46 | def lfstatus(repo, value=True): |
|
43 | def lfstatus(repo, value=True): | |
47 | oldvalue = getattr(repo, 'lfstatus', False) |
|
44 | oldvalue = getattr(repo, 'lfstatus', False) | |
48 | repo.lfstatus = value |
|
45 | repo.lfstatus = value | |
49 | try: |
|
46 | try: | |
50 | yield |
|
47 | yield | |
51 | finally: |
|
48 | finally: | |
52 | repo.lfstatus = oldvalue |
|
49 | repo.lfstatus = oldvalue | |
53 |
|
50 | |||
54 |
|
51 | |||
55 | def getminsize(ui, assumelfiles, opt, default=10): |
|
52 | def getminsize(ui, assumelfiles, opt, default=10): | |
56 | lfsize = opt |
|
53 | lfsize = opt | |
57 | if not lfsize and assumelfiles: |
|
54 | if not lfsize and assumelfiles: | |
58 | lfsize = ui.config(longname, b'minsize', default=default) |
|
55 | lfsize = ui.config(longname, b'minsize', default=default) | |
59 | if lfsize: |
|
56 | if lfsize: | |
60 | try: |
|
57 | try: | |
61 | lfsize = float(lfsize) |
|
58 | lfsize = float(lfsize) | |
62 | except ValueError: |
|
59 | except ValueError: | |
63 | raise error.Abort( |
|
60 | raise error.Abort( | |
64 | _(b'largefiles: size must be number (not %s)\n') % lfsize |
|
61 | _(b'largefiles: size must be number (not %s)\n') % lfsize | |
65 | ) |
|
62 | ) | |
66 | if lfsize is None: |
|
63 | if lfsize is None: | |
67 | raise error.Abort(_(b'minimum size for largefiles must be specified')) |
|
64 | raise error.Abort(_(b'minimum size for largefiles must be specified')) | |
68 | return lfsize |
|
65 | return lfsize | |
69 |
|
66 | |||
70 |
|
67 | |||
71 | def link(src, dest): |
|
68 | def link(src, dest): | |
72 | """Try to create hardlink - if that fails, efficiently make a copy.""" |
|
69 | """Try to create hardlink - if that fails, efficiently make a copy.""" | |
73 | util.makedirs(os.path.dirname(dest)) |
|
70 | util.makedirs(os.path.dirname(dest)) | |
74 | try: |
|
71 | try: | |
75 | util.oslink(src, dest) |
|
72 | util.oslink(src, dest) | |
76 | except OSError: |
|
73 | except OSError: | |
77 | # if hardlinks fail, fallback on atomic copy |
|
74 | # if hardlinks fail, fallback on atomic copy | |
78 | with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: |
|
75 | with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: | |
79 | for chunk in util.filechunkiter(srcf): |
|
76 | for chunk in util.filechunkiter(srcf): | |
80 | dstf.write(chunk) |
|
77 | dstf.write(chunk) | |
81 | os.chmod(dest, os.stat(src).st_mode) |
|
78 | os.chmod(dest, os.stat(src).st_mode) | |
82 |
|
79 | |||
83 |
|
80 | |||
84 | def usercachepath(ui, hash): |
|
81 | def usercachepath(ui, hash): | |
85 | """Return the correct location in the "global" largefiles cache for a file |
|
82 | """Return the correct location in the "global" largefiles cache for a file | |
86 | with the given hash. |
|
83 | with the given hash. | |
87 | This cache is used for sharing of largefiles across repositories - both |
|
84 | This cache is used for sharing of largefiles across repositories - both | |
88 | to preserve download bandwidth and storage space.""" |
|
85 | to preserve download bandwidth and storage space.""" | |
89 | return os.path.join(_usercachedir(ui), hash) |
|
86 | return os.path.join(_usercachedir(ui), hash) | |
90 |
|
87 | |||
91 |
|
88 | |||
92 | def _usercachedir(ui, name=longname): |
|
89 | def _usercachedir(ui, name=longname): | |
93 | '''Return the location of the "global" largefiles cache.''' |
|
90 | '''Return the location of the "global" largefiles cache.''' | |
94 | path = ui.configpath(name, b'usercache') |
|
91 | path = ui.configpath(name, b'usercache') | |
95 | if path: |
|
92 | if path: | |
96 | return path |
|
93 | return path | |
97 |
|
94 | |||
98 | hint = None |
|
95 | hint = None | |
99 |
|
96 | |||
100 | if pycompat.iswindows: |
|
97 | if pycompat.iswindows: | |
101 | appdata = encoding.environ.get( |
|
98 | appdata = encoding.environ.get( | |
102 | b'LOCALAPPDATA', encoding.environ.get(b'APPDATA') |
|
99 | b'LOCALAPPDATA', encoding.environ.get(b'APPDATA') | |
103 | ) |
|
100 | ) | |
104 | if appdata: |
|
101 | if appdata: | |
105 | return os.path.join(appdata, name) |
|
102 | return os.path.join(appdata, name) | |
106 |
|
103 | |||
107 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( |
|
104 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( | |
108 | b"LOCALAPPDATA", |
|
105 | b"LOCALAPPDATA", | |
109 | b"APPDATA", |
|
106 | b"APPDATA", | |
110 | name, |
|
107 | name, | |
111 | ) |
|
108 | ) | |
112 | elif pycompat.isdarwin: |
|
109 | elif pycompat.isdarwin: | |
113 | home = encoding.environ.get(b'HOME') |
|
110 | home = encoding.environ.get(b'HOME') | |
114 | if home: |
|
111 | if home: | |
115 | return os.path.join(home, b'Library', b'Caches', name) |
|
112 | return os.path.join(home, b'Library', b'Caches', name) | |
116 |
|
113 | |||
117 | hint = _(b"define %s in the environment, or set %s.usercache") % ( |
|
114 | hint = _(b"define %s in the environment, or set %s.usercache") % ( | |
118 | b"HOME", |
|
115 | b"HOME", | |
119 | name, |
|
116 | name, | |
120 | ) |
|
117 | ) | |
121 | elif pycompat.isposix: |
|
118 | elif pycompat.isposix: | |
122 | path = encoding.environ.get(b'XDG_CACHE_HOME') |
|
119 | path = encoding.environ.get(b'XDG_CACHE_HOME') | |
123 | if path: |
|
120 | if path: | |
124 | return os.path.join(path, name) |
|
121 | return os.path.join(path, name) | |
125 | home = encoding.environ.get(b'HOME') |
|
122 | home = encoding.environ.get(b'HOME') | |
126 | if home: |
|
123 | if home: | |
127 | return os.path.join(home, b'.cache', name) |
|
124 | return os.path.join(home, b'.cache', name) | |
128 |
|
125 | |||
129 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( |
|
126 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( | |
130 | b"XDG_CACHE_HOME", |
|
127 | b"XDG_CACHE_HOME", | |
131 | b"HOME", |
|
128 | b"HOME", | |
132 | name, |
|
129 | name, | |
133 | ) |
|
130 | ) | |
134 | else: |
|
131 | else: | |
135 | raise error.Abort( |
|
132 | raise error.Abort( | |
136 | _(b'unknown operating system: %s\n') % pycompat.osname |
|
133 | _(b'unknown operating system: %s\n') % pycompat.osname | |
137 | ) |
|
134 | ) | |
138 |
|
135 | |||
139 | raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint) |
|
136 | raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint) | |
140 |
|
137 | |||
141 |
|
138 | |||
142 | def inusercache(ui, hash): |
|
139 | def inusercache(ui, hash): | |
143 | path = usercachepath(ui, hash) |
|
140 | path = usercachepath(ui, hash) | |
144 | return os.path.exists(path) |
|
141 | return os.path.exists(path) | |
145 |
|
142 | |||
146 |
|
143 | |||
147 | def findfile(repo, hash): |
|
144 | def findfile(repo, hash): | |
148 | """Return store path of the largefile with the specified hash. |
|
145 | """Return store path of the largefile with the specified hash. | |
149 | As a side effect, the file might be linked from user cache. |
|
146 | As a side effect, the file might be linked from user cache. | |
150 | Return None if the file can't be found locally.""" |
|
147 | Return None if the file can't be found locally.""" | |
151 | path, exists = findstorepath(repo, hash) |
|
148 | path, exists = findstorepath(repo, hash) | |
152 | if exists: |
|
149 | if exists: | |
153 | repo.ui.note(_(b'found %s in store\n') % hash) |
|
150 | repo.ui.note(_(b'found %s in store\n') % hash) | |
154 | return path |
|
151 | return path | |
155 | elif inusercache(repo.ui, hash): |
|
152 | elif inusercache(repo.ui, hash): | |
156 | repo.ui.note(_(b'found %s in system cache\n') % hash) |
|
153 | repo.ui.note(_(b'found %s in system cache\n') % hash) | |
157 | path = storepath(repo, hash) |
|
154 | path = storepath(repo, hash) | |
158 | link(usercachepath(repo.ui, hash), path) |
|
155 | link(usercachepath(repo.ui, hash), path) | |
159 | return path |
|
156 | return path | |
160 | return None |
|
157 | return None | |
161 |
|
158 | |||
162 |
|
159 | |||
163 | class largefilesdirstate(dirstate.dirstate): |
|
160 | class largefilesdirstate(dirstate.dirstate): | |
164 | def __getitem__(self, key): |
|
161 | def __getitem__(self, key): | |
165 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
|
162 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) | |
166 |
|
163 | |||
167 | def normal(self, f): |
|
164 | def normal(self, f): | |
168 | return super(largefilesdirstate, self).normal(unixpath(f)) |
|
165 | return super(largefilesdirstate, self).normal(unixpath(f)) | |
169 |
|
166 | |||
170 | def remove(self, f): |
|
167 | def remove(self, f): | |
171 | return super(largefilesdirstate, self).remove(unixpath(f)) |
|
168 | return super(largefilesdirstate, self).remove(unixpath(f)) | |
172 |
|
169 | |||
173 | def add(self, f): |
|
170 | def add(self, f): | |
174 | return super(largefilesdirstate, self).add(unixpath(f)) |
|
171 | return super(largefilesdirstate, self).add(unixpath(f)) | |
175 |
|
172 | |||
176 | def drop(self, f): |
|
173 | def drop(self, f): | |
177 | return super(largefilesdirstate, self).drop(unixpath(f)) |
|
174 | return super(largefilesdirstate, self).drop(unixpath(f)) | |
178 |
|
175 | |||
179 | def forget(self, f): |
|
176 | def forget(self, f): | |
180 | return super(largefilesdirstate, self).forget(unixpath(f)) |
|
177 | return super(largefilesdirstate, self).forget(unixpath(f)) | |
181 |
|
178 | |||
182 | def normallookup(self, f): |
|
179 | def normallookup(self, f): | |
183 | return super(largefilesdirstate, self).normallookup(unixpath(f)) |
|
180 | return super(largefilesdirstate, self).normallookup(unixpath(f)) | |
184 |
|
181 | |||
185 | def _ignore(self, f): |
|
182 | def _ignore(self, f): | |
186 | return False |
|
183 | return False | |
187 |
|
184 | |||
188 | def write(self, tr=False): |
|
185 | def write(self, tr=False): | |
189 | # (1) disable PENDING mode always |
|
186 | # (1) disable PENDING mode always | |
190 | # (lfdirstate isn't yet managed as a part of the transaction) |
|
187 | # (lfdirstate isn't yet managed as a part of the transaction) | |
191 | # (2) avoid develwarn 'use dirstate.write with ....' |
|
188 | # (2) avoid develwarn 'use dirstate.write with ....' | |
192 | super(largefilesdirstate, self).write(None) |
|
189 | super(largefilesdirstate, self).write(None) | |
193 |
|
190 | |||
194 |
|
191 | |||
195 | def openlfdirstate(ui, repo, create=True): |
|
192 | def openlfdirstate(ui, repo, create=True): | |
196 | """ |
|
193 | """ | |
197 | Return a dirstate object that tracks largefiles: i.e. its root is |
|
194 | Return a dirstate object that tracks largefiles: i.e. its root is | |
198 | the repo root, but it is saved in .hg/largefiles/dirstate. |
|
195 | the repo root, but it is saved in .hg/largefiles/dirstate. | |
199 | """ |
|
196 | """ | |
200 | vfs = repo.vfs |
|
197 | vfs = repo.vfs | |
201 | lfstoredir = longname |
|
198 | lfstoredir = longname | |
202 | opener = vfsmod.vfs(vfs.join(lfstoredir)) |
|
199 | opener = vfsmod.vfs(vfs.join(lfstoredir)) | |
203 | lfdirstate = largefilesdirstate( |
|
200 | lfdirstate = largefilesdirstate( | |
204 | opener, |
|
201 | opener, | |
205 | ui, |
|
202 | ui, | |
206 | repo.root, |
|
203 | repo.root, | |
207 | repo.dirstate._validate, |
|
204 | repo.dirstate._validate, | |
208 | lambda: sparse.matcher(repo), |
|
205 | lambda: sparse.matcher(repo), | |
209 | repo.nodeconstants, |
|
206 | repo.nodeconstants, | |
210 | ) |
|
207 | ) | |
211 |
|
208 | |||
212 | # If the largefiles dirstate does not exist, populate and create |
|
209 | # If the largefiles dirstate does not exist, populate and create | |
213 | # it. This ensures that we create it on the first meaningful |
|
210 | # it. This ensures that we create it on the first meaningful | |
214 | # largefiles operation in a new clone. |
|
211 | # largefiles operation in a new clone. | |
215 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): |
|
212 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): | |
216 | matcher = getstandinmatcher(repo) |
|
213 | matcher = getstandinmatcher(repo) | |
217 | standins = repo.dirstate.walk( |
|
214 | standins = repo.dirstate.walk( | |
218 | matcher, subrepos=[], unknown=False, ignored=False |
|
215 | matcher, subrepos=[], unknown=False, ignored=False | |
219 | ) |
|
216 | ) | |
220 |
|
217 | |||
221 | if len(standins) > 0: |
|
218 | if len(standins) > 0: | |
222 | vfs.makedirs(lfstoredir) |
|
219 | vfs.makedirs(lfstoredir) | |
223 |
|
220 | |||
224 | for standin in standins: |
|
221 | for standin in standins: | |
225 | lfile = splitstandin(standin) |
|
222 | lfile = splitstandin(standin) | |
226 | lfdirstate.normallookup(lfile) |
|
223 | lfdirstate.normallookup(lfile) | |
227 | return lfdirstate |
|
224 | return lfdirstate | |
228 |
|
225 | |||
229 |
|
226 | |||
230 | def lfdirstatestatus(lfdirstate, repo): |
|
227 | def lfdirstatestatus(lfdirstate, repo): | |
231 | pctx = repo[b'.'] |
|
228 | pctx = repo[b'.'] | |
232 | match = matchmod.always() |
|
229 | match = matchmod.always() | |
233 | unsure, s = lfdirstate.status( |
|
230 | unsure, s = lfdirstate.status( | |
234 | match, subrepos=[], ignored=False, clean=False, unknown=False |
|
231 | match, subrepos=[], ignored=False, clean=False, unknown=False | |
235 | ) |
|
232 | ) | |
236 | modified, clean = s.modified, s.clean |
|
233 | modified, clean = s.modified, s.clean | |
237 | for lfile in unsure: |
|
234 | for lfile in unsure: | |
238 | try: |
|
235 | try: | |
239 | fctx = pctx[standin(lfile)] |
|
236 | fctx = pctx[standin(lfile)] | |
240 | except LookupError: |
|
237 | except LookupError: | |
241 | fctx = None |
|
238 | fctx = None | |
242 | if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)): |
|
239 | if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)): | |
243 | modified.append(lfile) |
|
240 | modified.append(lfile) | |
244 | else: |
|
241 | else: | |
245 | clean.append(lfile) |
|
242 | clean.append(lfile) | |
246 | lfdirstate.normal(lfile) |
|
243 | lfdirstate.normal(lfile) | |
247 | return s |
|
244 | return s | |
248 |
|
245 | |||
249 |
|
246 | |||
250 | def listlfiles(repo, rev=None, matcher=None): |
|
247 | def listlfiles(repo, rev=None, matcher=None): | |
251 | """return a list of largefiles in the working copy or the |
|
248 | """return a list of largefiles in the working copy or the | |
252 | specified changeset""" |
|
249 | specified changeset""" | |
253 |
|
250 | |||
254 | if matcher is None: |
|
251 | if matcher is None: | |
255 | matcher = getstandinmatcher(repo) |
|
252 | matcher = getstandinmatcher(repo) | |
256 |
|
253 | |||
257 | # ignore unknown files in working directory |
|
254 | # ignore unknown files in working directory | |
258 | return [ |
|
255 | return [ | |
259 | splitstandin(f) |
|
256 | splitstandin(f) | |
260 | for f in repo[rev].walk(matcher) |
|
257 | for f in repo[rev].walk(matcher) | |
261 | if rev is not None or repo.dirstate[f] != b'?' |
|
258 | if rev is not None or repo.dirstate[f] != b'?' | |
262 | ] |
|
259 | ] | |
263 |
|
260 | |||
264 |
|
261 | |||
265 | def instore(repo, hash, forcelocal=False): |
|
262 | def instore(repo, hash, forcelocal=False): | |
266 | '''Return true if a largefile with the given hash exists in the store''' |
|
263 | '''Return true if a largefile with the given hash exists in the store''' | |
267 | return os.path.exists(storepath(repo, hash, forcelocal)) |
|
264 | return os.path.exists(storepath(repo, hash, forcelocal)) | |
268 |
|
265 | |||
269 |
|
266 | |||
270 | def storepath(repo, hash, forcelocal=False): |
|
267 | def storepath(repo, hash, forcelocal=False): | |
271 | """Return the correct location in the repository largefiles store for a |
|
268 | """Return the correct location in the repository largefiles store for a | |
272 | file with the given hash.""" |
|
269 | file with the given hash.""" | |
273 | if not forcelocal and repo.shared(): |
|
270 | if not forcelocal and repo.shared(): | |
274 | return repo.vfs.reljoin(repo.sharedpath, longname, hash) |
|
271 | return repo.vfs.reljoin(repo.sharedpath, longname, hash) | |
275 | return repo.vfs.join(longname, hash) |
|
272 | return repo.vfs.join(longname, hash) | |
276 |
|
273 | |||
277 |
|
274 | |||
278 | def findstorepath(repo, hash): |
|
275 | def findstorepath(repo, hash): | |
279 | """Search through the local store path(s) to find the file for the given |
|
276 | """Search through the local store path(s) to find the file for the given | |
280 | hash. If the file is not found, its path in the primary store is returned. |
|
277 | hash. If the file is not found, its path in the primary store is returned. | |
281 | The return value is a tuple of (path, exists(path)). |
|
278 | The return value is a tuple of (path, exists(path)). | |
282 | """ |
|
279 | """ | |
283 | # For shared repos, the primary store is in the share source. But for |
|
280 | # For shared repos, the primary store is in the share source. But for | |
284 | # backward compatibility, force a lookup in the local store if it wasn't |
|
281 | # backward compatibility, force a lookup in the local store if it wasn't | |
285 | # found in the share source. |
|
282 | # found in the share source. | |
286 | path = storepath(repo, hash, False) |
|
283 | path = storepath(repo, hash, False) | |
287 |
|
284 | |||
288 | if instore(repo, hash): |
|
285 | if instore(repo, hash): | |
289 | return (path, True) |
|
286 | return (path, True) | |
290 | elif repo.shared() and instore(repo, hash, True): |
|
287 | elif repo.shared() and instore(repo, hash, True): | |
291 | return storepath(repo, hash, True), True |
|
288 | return storepath(repo, hash, True), True | |
292 |
|
289 | |||
293 | return (path, False) |
|
290 | return (path, False) | |
294 |
|
291 | |||
295 |
|
292 | |||
296 | def copyfromcache(repo, hash, filename): |
|
293 | def copyfromcache(repo, hash, filename): | |
297 | """Copy the specified largefile from the repo or system cache to |
|
294 | """Copy the specified largefile from the repo or system cache to | |
298 | filename in the repository. Return true on success or false if the |
|
295 | filename in the repository. Return true on success or false if the | |
299 | file was not found in either cache (which should not happened: |
|
296 | file was not found in either cache (which should not happened: | |
300 | this is meant to be called only after ensuring that the needed |
|
297 | this is meant to be called only after ensuring that the needed | |
301 | largefile exists in the cache).""" |
|
298 | largefile exists in the cache).""" | |
302 | wvfs = repo.wvfs |
|
299 | wvfs = repo.wvfs | |
303 | path = findfile(repo, hash) |
|
300 | path = findfile(repo, hash) | |
304 | if path is None: |
|
301 | if path is None: | |
305 | return False |
|
302 | return False | |
306 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) |
|
303 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) | |
307 | # The write may fail before the file is fully written, but we |
|
304 | # The write may fail before the file is fully written, but we | |
308 | # don't use atomic writes in the working copy. |
|
305 | # don't use atomic writes in the working copy. | |
309 | with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: |
|
306 | with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: | |
310 | gothash = copyandhash(util.filechunkiter(srcfd), destfd) |
|
307 | gothash = copyandhash(util.filechunkiter(srcfd), destfd) | |
311 | if gothash != hash: |
|
308 | if gothash != hash: | |
312 | repo.ui.warn( |
|
309 | repo.ui.warn( | |
313 | _(b'%s: data corruption in %s with hash %s\n') |
|
310 | _(b'%s: data corruption in %s with hash %s\n') | |
314 | % (filename, path, gothash) |
|
311 | % (filename, path, gothash) | |
315 | ) |
|
312 | ) | |
316 | wvfs.unlink(filename) |
|
313 | wvfs.unlink(filename) | |
317 | return False |
|
314 | return False | |
318 | return True |
|
315 | return True | |
319 |
|
316 | |||
320 |
|
317 | |||
321 | def copytostore(repo, ctx, file, fstandin): |
|
318 | def copytostore(repo, ctx, file, fstandin): | |
322 | wvfs = repo.wvfs |
|
319 | wvfs = repo.wvfs | |
323 | hash = readasstandin(ctx[fstandin]) |
|
320 | hash = readasstandin(ctx[fstandin]) | |
324 | if instore(repo, hash): |
|
321 | if instore(repo, hash): | |
325 | return |
|
322 | return | |
326 | if wvfs.exists(file): |
|
323 | if wvfs.exists(file): | |
327 | copytostoreabsolute(repo, wvfs.join(file), hash) |
|
324 | copytostoreabsolute(repo, wvfs.join(file), hash) | |
328 | else: |
|
325 | else: | |
329 | repo.ui.warn( |
|
326 | repo.ui.warn( | |
330 | _(b"%s: largefile %s not available from local store\n") |
|
327 | _(b"%s: largefile %s not available from local store\n") | |
331 | % (file, hash) |
|
328 | % (file, hash) | |
332 | ) |
|
329 | ) | |
333 |
|
330 | |||
334 |
|
331 | |||
335 | def copyalltostore(repo, node): |
|
332 | def copyalltostore(repo, node): | |
336 | '''Copy all largefiles in a given revision to the store''' |
|
333 | '''Copy all largefiles in a given revision to the store''' | |
337 |
|
334 | |||
338 | ctx = repo[node] |
|
335 | ctx = repo[node] | |
339 | for filename in ctx.files(): |
|
336 | for filename in ctx.files(): | |
340 | realfile = splitstandin(filename) |
|
337 | realfile = splitstandin(filename) | |
341 | if realfile is not None and filename in ctx.manifest(): |
|
338 | if realfile is not None and filename in ctx.manifest(): | |
342 | copytostore(repo, ctx, realfile, filename) |
|
339 | copytostore(repo, ctx, realfile, filename) | |
343 |
|
340 | |||
344 |
|
341 | |||
345 | def copytostoreabsolute(repo, file, hash): |
|
342 | def copytostoreabsolute(repo, file, hash): | |
346 | if inusercache(repo.ui, hash): |
|
343 | if inusercache(repo.ui, hash): | |
347 | link(usercachepath(repo.ui, hash), storepath(repo, hash)) |
|
344 | link(usercachepath(repo.ui, hash), storepath(repo, hash)) | |
348 | else: |
|
345 | else: | |
349 | util.makedirs(os.path.dirname(storepath(repo, hash))) |
|
346 | util.makedirs(os.path.dirname(storepath(repo, hash))) | |
350 | with open(file, b'rb') as srcf: |
|
347 | with open(file, b'rb') as srcf: | |
351 | with util.atomictempfile( |
|
348 | with util.atomictempfile( | |
352 | storepath(repo, hash), createmode=repo.store.createmode |
|
349 | storepath(repo, hash), createmode=repo.store.createmode | |
353 | ) as dstf: |
|
350 | ) as dstf: | |
354 | for chunk in util.filechunkiter(srcf): |
|
351 | for chunk in util.filechunkiter(srcf): | |
355 | dstf.write(chunk) |
|
352 | dstf.write(chunk) | |
356 | linktousercache(repo, hash) |
|
353 | linktousercache(repo, hash) | |
357 |
|
354 | |||
358 |
|
355 | |||
359 | def linktousercache(repo, hash): |
|
356 | def linktousercache(repo, hash): | |
360 | """Link / copy the largefile with the specified hash from the store |
|
357 | """Link / copy the largefile with the specified hash from the store | |
361 | to the cache.""" |
|
358 | to the cache.""" | |
362 | path = usercachepath(repo.ui, hash) |
|
359 | path = usercachepath(repo.ui, hash) | |
363 | link(storepath(repo, hash), path) |
|
360 | link(storepath(repo, hash), path) | |
364 |
|
361 | |||
365 |
|
362 | |||
366 | def getstandinmatcher(repo, rmatcher=None): |
|
363 | def getstandinmatcher(repo, rmatcher=None): | |
367 | '''Return a match object that applies rmatcher to the standin directory''' |
|
364 | '''Return a match object that applies rmatcher to the standin directory''' | |
368 | wvfs = repo.wvfs |
|
365 | wvfs = repo.wvfs | |
369 | standindir = shortname |
|
366 | standindir = shortname | |
370 |
|
367 | |||
371 | # no warnings about missing files or directories |
|
368 | # no warnings about missing files or directories | |
372 | badfn = lambda f, msg: None |
|
369 | badfn = lambda f, msg: None | |
373 |
|
370 | |||
374 | if rmatcher and not rmatcher.always(): |
|
371 | if rmatcher and not rmatcher.always(): | |
375 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] |
|
372 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] | |
376 | if not pats: |
|
373 | if not pats: | |
377 | pats = [wvfs.join(standindir)] |
|
374 | pats = [wvfs.join(standindir)] | |
378 | match = scmutil.match(repo[None], pats, badfn=badfn) |
|
375 | match = scmutil.match(repo[None], pats, badfn=badfn) | |
379 | else: |
|
376 | else: | |
380 | # no patterns: relative to repo root |
|
377 | # no patterns: relative to repo root | |
381 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) |
|
378 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) | |
382 | return match |
|
379 | return match | |
383 |
|
380 | |||
384 |
|
381 | |||
385 | def composestandinmatcher(repo, rmatcher): |
|
382 | def composestandinmatcher(repo, rmatcher): | |
386 | """Return a matcher that accepts standins corresponding to the |
|
383 | """Return a matcher that accepts standins corresponding to the | |
387 | files accepted by rmatcher. Pass the list of files in the matcher |
|
384 | files accepted by rmatcher. Pass the list of files in the matcher | |
388 | as the paths specified by the user.""" |
|
385 | as the paths specified by the user.""" | |
389 | smatcher = getstandinmatcher(repo, rmatcher) |
|
386 | smatcher = getstandinmatcher(repo, rmatcher) | |
390 | isstandin = smatcher.matchfn |
|
387 | isstandin = smatcher.matchfn | |
391 |
|
388 | |||
392 | def composedmatchfn(f): |
|
389 | def composedmatchfn(f): | |
393 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
|
390 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | |
394 |
|
391 | |||
395 | smatcher.matchfn = composedmatchfn |
|
392 | smatcher.matchfn = composedmatchfn | |
396 |
|
393 | |||
397 | return smatcher |
|
394 | return smatcher | |
398 |
|
395 | |||
399 |
|
396 | |||
400 | def standin(filename): |
|
397 | def standin(filename): | |
401 | """Return the repo-relative path to the standin for the specified big |
|
398 | """Return the repo-relative path to the standin for the specified big | |
402 | file.""" |
|
399 | file.""" | |
403 | # Notes: |
|
400 | # Notes: | |
404 | # 1) Some callers want an absolute path, but for instance addlargefiles |
|
401 | # 1) Some callers want an absolute path, but for instance addlargefiles | |
405 | # needs it repo-relative so it can be passed to repo[None].add(). So |
|
402 | # needs it repo-relative so it can be passed to repo[None].add(). So | |
406 | # leave it up to the caller to use repo.wjoin() to get an absolute path. |
|
403 | # leave it up to the caller to use repo.wjoin() to get an absolute path. | |
407 | # 2) Join with '/' because that's what dirstate always uses, even on |
|
404 | # 2) Join with '/' because that's what dirstate always uses, even on | |
408 | # Windows. Change existing separator to '/' first in case we are |
|
405 | # Windows. Change existing separator to '/' first in case we are | |
409 | # passed filenames from an external source (like the command line). |
|
406 | # passed filenames from an external source (like the command line). | |
410 | return shortnameslash + util.pconvert(filename) |
|
407 | return shortnameslash + util.pconvert(filename) | |
411 |
|
408 | |||
412 |
|
409 | |||
413 | def isstandin(filename): |
|
410 | def isstandin(filename): | |
414 | """Return true if filename is a big file standin. filename must be |
|
411 | """Return true if filename is a big file standin. filename must be | |
415 | in Mercurial's internal form (slash-separated).""" |
|
412 | in Mercurial's internal form (slash-separated).""" | |
416 | return filename.startswith(shortnameslash) |
|
413 | return filename.startswith(shortnameslash) | |
417 |
|
414 | |||
418 |
|
415 | |||
419 | def splitstandin(filename): |
|
416 | def splitstandin(filename): | |
420 | # Split on / because that's what dirstate always uses, even on Windows. |
|
417 | # Split on / because that's what dirstate always uses, even on Windows. | |
421 | # Change local separator to / first just in case we are passed filenames |
|
418 | # Change local separator to / first just in case we are passed filenames | |
422 | # from an external source (like the command line). |
|
419 | # from an external source (like the command line). | |
423 | bits = util.pconvert(filename).split(b'/', 1) |
|
420 | bits = util.pconvert(filename).split(b'/', 1) | |
424 | if len(bits) == 2 and bits[0] == shortname: |
|
421 | if len(bits) == 2 and bits[0] == shortname: | |
425 | return bits[1] |
|
422 | return bits[1] | |
426 | else: |
|
423 | else: | |
427 | return None |
|
424 | return None | |
428 |
|
425 | |||
429 |
|
426 | |||
430 | def updatestandin(repo, lfile, standin): |
|
427 | def updatestandin(repo, lfile, standin): | |
431 | """Re-calculate hash value of lfile and write it into standin |
|
428 | """Re-calculate hash value of lfile and write it into standin | |
432 |
|
429 | |||
433 | This assumes that "lfutil.standin(lfile) == standin", for efficiency. |
|
430 | This assumes that "lfutil.standin(lfile) == standin", for efficiency. | |
434 | """ |
|
431 | """ | |
435 | file = repo.wjoin(lfile) |
|
432 | file = repo.wjoin(lfile) | |
436 | if repo.wvfs.exists(lfile): |
|
433 | if repo.wvfs.exists(lfile): | |
437 | hash = hashfile(file) |
|
434 | hash = hashfile(file) | |
438 | executable = getexecutable(file) |
|
435 | executable = getexecutable(file) | |
439 | writestandin(repo, standin, hash, executable) |
|
436 | writestandin(repo, standin, hash, executable) | |
440 | else: |
|
437 | else: | |
441 | raise error.Abort(_(b'%s: file not found!') % lfile) |
|
438 | raise error.Abort(_(b'%s: file not found!') % lfile) | |
442 |
|
439 | |||
443 |
|
440 | |||
444 | def readasstandin(fctx): |
|
441 | def readasstandin(fctx): | |
445 | """read hex hash from given filectx of standin file |
|
442 | """read hex hash from given filectx of standin file | |
446 |
|
443 | |||
447 | This encapsulates how "standin" data is stored into storage layer.""" |
|
444 | This encapsulates how "standin" data is stored into storage layer.""" | |
448 | return fctx.data().strip() |
|
445 | return fctx.data().strip() | |
449 |
|
446 | |||
450 |
|
447 | |||
451 | def writestandin(repo, standin, hash, executable): |
|
448 | def writestandin(repo, standin, hash, executable): | |
452 | '''write hash to <repo.root>/<standin>''' |
|
449 | '''write hash to <repo.root>/<standin>''' | |
453 | repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') |
|
450 | repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') | |
454 |
|
451 | |||
455 |
|
452 | |||
456 | def copyandhash(instream, outfile): |
|
453 | def copyandhash(instream, outfile): | |
457 | """Read bytes from instream (iterable) and write them to outfile, |
|
454 | """Read bytes from instream (iterable) and write them to outfile, | |
458 | computing the SHA-1 hash of the data along the way. Return the hash.""" |
|
455 | computing the SHA-1 hash of the data along the way. Return the hash.""" | |
459 | hasher = hashutil.sha1(b'') |
|
456 | hasher = hashutil.sha1(b'') | |
460 | for data in instream: |
|
457 | for data in instream: | |
461 | hasher.update(data) |
|
458 | hasher.update(data) | |
462 | outfile.write(data) |
|
459 | outfile.write(data) | |
463 | return hex(hasher.digest()) |
|
460 | return hex(hasher.digest()) | |
464 |
|
461 | |||
465 |
|
462 | |||
466 | def hashfile(file): |
|
463 | def hashfile(file): | |
467 | if not os.path.exists(file): |
|
464 | if not os.path.exists(file): | |
468 | return b'' |
|
465 | return b'' | |
469 | with open(file, b'rb') as fd: |
|
466 | with open(file, b'rb') as fd: | |
470 | return hexsha1(fd) |
|
467 | return hexsha1(fd) | |
471 |
|
468 | |||
472 |
|
469 | |||
473 | def getexecutable(filename): |
|
470 | def getexecutable(filename): | |
474 | mode = os.stat(filename).st_mode |
|
471 | mode = os.stat(filename).st_mode | |
475 | return ( |
|
472 | return ( | |
476 | (mode & stat.S_IXUSR) |
|
473 | (mode & stat.S_IXUSR) | |
477 | and (mode & stat.S_IXGRP) |
|
474 | and (mode & stat.S_IXGRP) | |
478 | and (mode & stat.S_IXOTH) |
|
475 | and (mode & stat.S_IXOTH) | |
479 | ) |
|
476 | ) | |
480 |
|
477 | |||
481 |
|
478 | |||
482 | def urljoin(first, second, *arg): |
|
479 | def urljoin(first, second, *arg): | |
483 | def join(left, right): |
|
480 | def join(left, right): | |
484 | if not left.endswith(b'/'): |
|
481 | if not left.endswith(b'/'): | |
485 | left += b'/' |
|
482 | left += b'/' | |
486 | if right.startswith(b'/'): |
|
483 | if right.startswith(b'/'): | |
487 | right = right[1:] |
|
484 | right = right[1:] | |
488 | return left + right |
|
485 | return left + right | |
489 |
|
486 | |||
490 | url = join(first, second) |
|
487 | url = join(first, second) | |
491 | for a in arg: |
|
488 | for a in arg: | |
492 | url = join(url, a) |
|
489 | url = join(url, a) | |
493 | return url |
|
490 | return url | |
494 |
|
491 | |||
495 |
|
492 | |||
496 | def hexsha1(fileobj): |
|
493 | def hexsha1(fileobj): | |
497 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
|
494 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like | |
498 | object data""" |
|
495 | object data""" | |
499 | h = hashutil.sha1() |
|
496 | h = hashutil.sha1() | |
500 | for chunk in util.filechunkiter(fileobj): |
|
497 | for chunk in util.filechunkiter(fileobj): | |
501 | h.update(chunk) |
|
498 | h.update(chunk) | |
502 | return hex(h.digest()) |
|
499 | return hex(h.digest()) | |
503 |
|
500 | |||
504 |
|
501 | |||
505 | def httpsendfile(ui, filename): |
|
502 | def httpsendfile(ui, filename): | |
506 | return httpconnection.httpsendfile(ui, filename, b'rb') |
|
503 | return httpconnection.httpsendfile(ui, filename, b'rb') | |
507 |
|
504 | |||
508 |
|
505 | |||
509 | def unixpath(path): |
|
506 | def unixpath(path): | |
510 | '''Return a version of path normalized for use with the lfdirstate.''' |
|
507 | '''Return a version of path normalized for use with the lfdirstate.''' | |
511 | return util.pconvert(os.path.normpath(path)) |
|
508 | return util.pconvert(os.path.normpath(path)) | |
512 |
|
509 | |||
513 |
|
510 | |||
514 | def islfilesrepo(repo): |
|
511 | def islfilesrepo(repo): | |
515 | '''Return true if the repo is a largefile repo.''' |
|
512 | '''Return true if the repo is a largefile repo.''' | |
516 | if b'largefiles' in repo.requirements and any( |
|
513 | if b'largefiles' in repo.requirements and any( | |
517 | shortnameslash in f[1] for f in repo.store.datafiles() |
|
514 | shortnameslash in f[1] for f in repo.store.datafiles() | |
518 | ): |
|
515 | ): | |
519 | return True |
|
516 | return True | |
520 |
|
517 | |||
521 | return any(openlfdirstate(repo.ui, repo, False)) |
|
518 | return any(openlfdirstate(repo.ui, repo, False)) | |
522 |
|
519 | |||
523 |
|
520 | |||
524 | class storeprotonotcapable(Exception): |
|
521 | class storeprotonotcapable(Exception): | |
525 | def __init__(self, storetypes): |
|
522 | def __init__(self, storetypes): | |
526 | self.storetypes = storetypes |
|
523 | self.storetypes = storetypes | |
527 |
|
524 | |||
528 |
|
525 | |||
529 | def getstandinsstate(repo): |
|
526 | def getstandinsstate(repo): | |
530 | standins = [] |
|
527 | standins = [] | |
531 | matcher = getstandinmatcher(repo) |
|
528 | matcher = getstandinmatcher(repo) | |
532 | wctx = repo[None] |
|
529 | wctx = repo[None] | |
533 | for standin in repo.dirstate.walk( |
|
530 | for standin in repo.dirstate.walk( | |
534 | matcher, subrepos=[], unknown=False, ignored=False |
|
531 | matcher, subrepos=[], unknown=False, ignored=False | |
535 | ): |
|
532 | ): | |
536 | lfile = splitstandin(standin) |
|
533 | lfile = splitstandin(standin) | |
537 | try: |
|
534 | try: | |
538 | hash = readasstandin(wctx[standin]) |
|
535 | hash = readasstandin(wctx[standin]) | |
539 | except IOError: |
|
536 | except IOError: | |
540 | hash = None |
|
537 | hash = None | |
541 | standins.append((lfile, hash)) |
|
538 | standins.append((lfile, hash)) | |
542 | return standins |
|
539 | return standins | |
543 |
|
540 | |||
544 |
|
541 | |||
545 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
|
542 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): | |
546 | lfstandin = standin(lfile) |
|
543 | lfstandin = standin(lfile) | |
547 | if lfstandin in repo.dirstate: |
|
544 | if lfstandin in repo.dirstate: | |
548 | stat = repo.dirstate._map[lfstandin] |
|
545 | stat = repo.dirstate._map[lfstandin] | |
549 | state, mtime = stat[0], stat[3] |
|
546 | state, mtime = stat[0], stat[3] | |
550 | else: |
|
547 | else: | |
551 | state, mtime = b'?', -1 |
|
548 | state, mtime = b'?', -1 | |
552 | if state == b'n': |
|
549 | if state == b'n': | |
553 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): |
|
550 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): | |
554 | # state 'n' doesn't ensure 'clean' in this case |
|
551 | # state 'n' doesn't ensure 'clean' in this case | |
555 | lfdirstate.normallookup(lfile) |
|
552 | lfdirstate.normallookup(lfile) | |
556 | else: |
|
553 | else: | |
557 | lfdirstate.normal(lfile) |
|
554 | lfdirstate.normal(lfile) | |
558 | elif state == b'm': |
|
555 | elif state == b'm': | |
559 | lfdirstate.normallookup(lfile) |
|
556 | lfdirstate.normallookup(lfile) | |
560 | elif state == b'r': |
|
557 | elif state == b'r': | |
561 | lfdirstate.remove(lfile) |
|
558 | lfdirstate.remove(lfile) | |
562 | elif state == b'a': |
|
559 | elif state == b'a': | |
563 | lfdirstate.add(lfile) |
|
560 | lfdirstate.add(lfile) | |
564 | elif state == b'?': |
|
561 | elif state == b'?': | |
565 | lfdirstate.drop(lfile) |
|
562 | lfdirstate.drop(lfile) | |
566 |
|
563 | |||
567 |
|
564 | |||
568 | def markcommitted(orig, ctx, node): |
|
565 | def markcommitted(orig, ctx, node): | |
569 | repo = ctx.repo() |
|
566 | repo = ctx.repo() | |
570 |
|
567 | |||
571 | orig(node) |
|
568 | orig(node) | |
572 |
|
569 | |||
573 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" |
|
570 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" | |
574 | # because files coming from the 2nd parent are omitted in the latter. |
|
571 | # because files coming from the 2nd parent are omitted in the latter. | |
575 | # |
|
572 | # | |
576 | # The former should be used to get targets of "synclfdirstate", |
|
573 | # The former should be used to get targets of "synclfdirstate", | |
577 | # because such files: |
|
574 | # because such files: | |
578 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and |
|
575 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and | |
579 | # - have to be marked as "n" after commit, but |
|
576 | # - have to be marked as "n" after commit, but | |
580 | # - aren't listed in "repo[node].files()" |
|
577 | # - aren't listed in "repo[node].files()" | |
581 |
|
578 | |||
582 | lfdirstate = openlfdirstate(repo.ui, repo) |
|
579 | lfdirstate = openlfdirstate(repo.ui, repo) | |
583 | for f in ctx.files(): |
|
580 | for f in ctx.files(): | |
584 | lfile = splitstandin(f) |
|
581 | lfile = splitstandin(f) | |
585 | if lfile is not None: |
|
582 | if lfile is not None: | |
586 | synclfdirstate(repo, lfdirstate, lfile, False) |
|
583 | synclfdirstate(repo, lfdirstate, lfile, False) | |
587 | lfdirstate.write() |
|
584 | lfdirstate.write() | |
588 |
|
585 | |||
589 | # As part of committing, copy all of the largefiles into the cache. |
|
586 | # As part of committing, copy all of the largefiles into the cache. | |
590 | # |
|
587 | # | |
591 | # Using "node" instead of "ctx" implies additional "repo[node]" |
|
588 | # Using "node" instead of "ctx" implies additional "repo[node]" | |
592 | # lookup while copyalltostore(), but can omit redundant check for |
|
589 | # lookup while copyalltostore(), but can omit redundant check for | |
593 | # files comming from the 2nd parent, which should exist in store |
|
590 | # files comming from the 2nd parent, which should exist in store | |
594 | # at merging. |
|
591 | # at merging. | |
595 | copyalltostore(repo, node) |
|
592 | copyalltostore(repo, node) | |
596 |
|
593 | |||
597 |
|
594 | |||
598 | def getlfilestoupdate(oldstandins, newstandins): |
|
595 | def getlfilestoupdate(oldstandins, newstandins): | |
599 | changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) |
|
596 | changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) | |
600 | filelist = [] |
|
597 | filelist = [] | |
601 | for f in changedstandins: |
|
598 | for f in changedstandins: | |
602 | if f[0] not in filelist: |
|
599 | if f[0] not in filelist: | |
603 | filelist.append(f[0]) |
|
600 | filelist.append(f[0]) | |
604 | return filelist |
|
601 | return filelist | |
605 |
|
602 | |||
606 |
|
603 | |||
607 | def getlfilestoupload(repo, missing, addfunc): |
|
604 | def getlfilestoupload(repo, missing, addfunc): | |
608 | makeprogress = repo.ui.makeprogress |
|
605 | makeprogress = repo.ui.makeprogress | |
609 | with makeprogress( |
|
606 | with makeprogress( | |
610 | _(b'finding outgoing largefiles'), |
|
607 | _(b'finding outgoing largefiles'), | |
611 | unit=_(b'revisions'), |
|
608 | unit=_(b'revisions'), | |
612 | total=len(missing), |
|
609 | total=len(missing), | |
613 | ) as progress: |
|
610 | ) as progress: | |
614 | for i, n in enumerate(missing): |
|
611 | for i, n in enumerate(missing): | |
615 | progress.update(i) |
|
612 | progress.update(i) | |
616 | parents = [p for p in repo[n].parents() if p != nullid] |
|
613 | parents = [p for p in repo[n].parents() if p != repo.nullid] | |
617 |
|
614 | |||
618 | with lfstatus(repo, value=False): |
|
615 | with lfstatus(repo, value=False): | |
619 | ctx = repo[n] |
|
616 | ctx = repo[n] | |
620 |
|
617 | |||
621 | files = set(ctx.files()) |
|
618 | files = set(ctx.files()) | |
622 | if len(parents) == 2: |
|
619 | if len(parents) == 2: | |
623 | mc = ctx.manifest() |
|
620 | mc = ctx.manifest() | |
624 | mp1 = ctx.p1().manifest() |
|
621 | mp1 = ctx.p1().manifest() | |
625 | mp2 = ctx.p2().manifest() |
|
622 | mp2 = ctx.p2().manifest() | |
626 | for f in mp1: |
|
623 | for f in mp1: | |
627 | if f not in mc: |
|
624 | if f not in mc: | |
628 | files.add(f) |
|
625 | files.add(f) | |
629 | for f in mp2: |
|
626 | for f in mp2: | |
630 | if f not in mc: |
|
627 | if f not in mc: | |
631 | files.add(f) |
|
628 | files.add(f) | |
632 | for f in mc: |
|
629 | for f in mc: | |
633 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
630 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): | |
634 | files.add(f) |
|
631 | files.add(f) | |
635 | for fn in files: |
|
632 | for fn in files: | |
636 | if isstandin(fn) and fn in ctx: |
|
633 | if isstandin(fn) and fn in ctx: | |
637 | addfunc(fn, readasstandin(ctx[fn])) |
|
634 | addfunc(fn, readasstandin(ctx[fn])) | |
638 |
|
635 | |||
639 |
|
636 | |||
640 | def updatestandinsbymatch(repo, match): |
|
637 | def updatestandinsbymatch(repo, match): | |
641 | """Update standins in the working directory according to specified match |
|
638 | """Update standins in the working directory according to specified match | |
642 |
|
639 | |||
643 | This returns (possibly modified) ``match`` object to be used for |
|
640 | This returns (possibly modified) ``match`` object to be used for | |
644 | subsequent commit process. |
|
641 | subsequent commit process. | |
645 | """ |
|
642 | """ | |
646 |
|
643 | |||
647 | ui = repo.ui |
|
644 | ui = repo.ui | |
648 |
|
645 | |||
649 | # Case 1: user calls commit with no specific files or |
|
646 | # Case 1: user calls commit with no specific files or | |
650 | # include/exclude patterns: refresh and commit all files that |
|
647 | # include/exclude patterns: refresh and commit all files that | |
651 | # are "dirty". |
|
648 | # are "dirty". | |
652 | if match is None or match.always(): |
|
649 | if match is None or match.always(): | |
653 | # Spend a bit of time here to get a list of files we know |
|
650 | # Spend a bit of time here to get a list of files we know | |
654 | # are modified so we can compare only against those. |
|
651 | # are modified so we can compare only against those. | |
655 | # It can cost a lot of time (several seconds) |
|
652 | # It can cost a lot of time (several seconds) | |
656 | # otherwise to update all standins if the largefiles are |
|
653 | # otherwise to update all standins if the largefiles are | |
657 | # large. |
|
654 | # large. | |
658 | lfdirstate = openlfdirstate(ui, repo) |
|
655 | lfdirstate = openlfdirstate(ui, repo) | |
659 | dirtymatch = matchmod.always() |
|
656 | dirtymatch = matchmod.always() | |
660 | unsure, s = lfdirstate.status( |
|
657 | unsure, s = lfdirstate.status( | |
661 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False |
|
658 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False | |
662 | ) |
|
659 | ) | |
663 | modifiedfiles = unsure + s.modified + s.added + s.removed |
|
660 | modifiedfiles = unsure + s.modified + s.added + s.removed | |
664 | lfiles = listlfiles(repo) |
|
661 | lfiles = listlfiles(repo) | |
665 | # this only loops through largefiles that exist (not |
|
662 | # this only loops through largefiles that exist (not | |
666 | # removed/renamed) |
|
663 | # removed/renamed) | |
667 | for lfile in lfiles: |
|
664 | for lfile in lfiles: | |
668 | if lfile in modifiedfiles: |
|
665 | if lfile in modifiedfiles: | |
669 | fstandin = standin(lfile) |
|
666 | fstandin = standin(lfile) | |
670 | if repo.wvfs.exists(fstandin): |
|
667 | if repo.wvfs.exists(fstandin): | |
671 | # this handles the case where a rebase is being |
|
668 | # this handles the case where a rebase is being | |
672 | # performed and the working copy is not updated |
|
669 | # performed and the working copy is not updated | |
673 | # yet. |
|
670 | # yet. | |
674 | if repo.wvfs.exists(lfile): |
|
671 | if repo.wvfs.exists(lfile): | |
675 | updatestandin(repo, lfile, fstandin) |
|
672 | updatestandin(repo, lfile, fstandin) | |
676 |
|
673 | |||
677 | return match |
|
674 | return match | |
678 |
|
675 | |||
679 | lfiles = listlfiles(repo) |
|
676 | lfiles = listlfiles(repo) | |
680 | match._files = repo._subdirlfs(match.files(), lfiles) |
|
677 | match._files = repo._subdirlfs(match.files(), lfiles) | |
681 |
|
678 | |||
682 | # Case 2: user calls commit with specified patterns: refresh |
|
679 | # Case 2: user calls commit with specified patterns: refresh | |
683 | # any matching big files. |
|
680 | # any matching big files. | |
684 | smatcher = composestandinmatcher(repo, match) |
|
681 | smatcher = composestandinmatcher(repo, match) | |
685 | standins = repo.dirstate.walk( |
|
682 | standins = repo.dirstate.walk( | |
686 | smatcher, subrepos=[], unknown=False, ignored=False |
|
683 | smatcher, subrepos=[], unknown=False, ignored=False | |
687 | ) |
|
684 | ) | |
688 |
|
685 | |||
689 | # No matching big files: get out of the way and pass control to |
|
686 | # No matching big files: get out of the way and pass control to | |
690 | # the usual commit() method. |
|
687 | # the usual commit() method. | |
691 | if not standins: |
|
688 | if not standins: | |
692 | return match |
|
689 | return match | |
693 |
|
690 | |||
694 | # Refresh all matching big files. It's possible that the |
|
691 | # Refresh all matching big files. It's possible that the | |
695 | # commit will end up failing, in which case the big files will |
|
692 | # commit will end up failing, in which case the big files will | |
696 | # stay refreshed. No harm done: the user modified them and |
|
693 | # stay refreshed. No harm done: the user modified them and | |
697 | # asked to commit them, so sooner or later we're going to |
|
694 | # asked to commit them, so sooner or later we're going to | |
698 | # refresh the standins. Might as well leave them refreshed. |
|
695 | # refresh the standins. Might as well leave them refreshed. | |
699 | lfdirstate = openlfdirstate(ui, repo) |
|
696 | lfdirstate = openlfdirstate(ui, repo) | |
700 | for fstandin in standins: |
|
697 | for fstandin in standins: | |
701 | lfile = splitstandin(fstandin) |
|
698 | lfile = splitstandin(fstandin) | |
702 | if lfdirstate[lfile] != b'r': |
|
699 | if lfdirstate[lfile] != b'r': | |
703 | updatestandin(repo, lfile, fstandin) |
|
700 | updatestandin(repo, lfile, fstandin) | |
704 |
|
701 | |||
705 | # Cook up a new matcher that only matches regular files or |
|
702 | # Cook up a new matcher that only matches regular files or | |
706 | # standins corresponding to the big files requested by the |
|
703 | # standins corresponding to the big files requested by the | |
707 | # user. Have to modify _files to prevent commit() from |
|
704 | # user. Have to modify _files to prevent commit() from | |
708 | # complaining "not tracked" for big files. |
|
705 | # complaining "not tracked" for big files. | |
709 | match = copy.copy(match) |
|
706 | match = copy.copy(match) | |
710 | origmatchfn = match.matchfn |
|
707 | origmatchfn = match.matchfn | |
711 |
|
708 | |||
712 | # Check both the list of largefiles and the list of |
|
709 | # Check both the list of largefiles and the list of | |
713 | # standins because if a largefile was removed, it |
|
710 | # standins because if a largefile was removed, it | |
714 | # won't be in the list of largefiles at this point |
|
711 | # won't be in the list of largefiles at this point | |
715 | match._files += sorted(standins) |
|
712 | match._files += sorted(standins) | |
716 |
|
713 | |||
717 | actualfiles = [] |
|
714 | actualfiles = [] | |
718 | for f in match._files: |
|
715 | for f in match._files: | |
719 | fstandin = standin(f) |
|
716 | fstandin = standin(f) | |
720 |
|
717 | |||
721 | # For largefiles, only one of the normal and standin should be |
|
718 | # For largefiles, only one of the normal and standin should be | |
722 | # committed (except if one of them is a remove). In the case of a |
|
719 | # committed (except if one of them is a remove). In the case of a | |
723 | # standin removal, drop the normal file if it is unknown to dirstate. |
|
720 | # standin removal, drop the normal file if it is unknown to dirstate. | |
724 | # Thus, skip plain largefile names but keep the standin. |
|
721 | # Thus, skip plain largefile names but keep the standin. | |
725 | if f in lfiles or fstandin in standins: |
|
722 | if f in lfiles or fstandin in standins: | |
726 | if repo.dirstate[fstandin] != b'r': |
|
723 | if repo.dirstate[fstandin] != b'r': | |
727 | if repo.dirstate[f] != b'r': |
|
724 | if repo.dirstate[f] != b'r': | |
728 | continue |
|
725 | continue | |
729 | elif repo.dirstate[f] == b'?': |
|
726 | elif repo.dirstate[f] == b'?': | |
730 | continue |
|
727 | continue | |
731 |
|
728 | |||
732 | actualfiles.append(f) |
|
729 | actualfiles.append(f) | |
733 | match._files = actualfiles |
|
730 | match._files = actualfiles | |
734 |
|
731 | |||
735 | def matchfn(f): |
|
732 | def matchfn(f): | |
736 | if origmatchfn(f): |
|
733 | if origmatchfn(f): | |
737 | return f not in lfiles |
|
734 | return f not in lfiles | |
738 | else: |
|
735 | else: | |
739 | return f in standins |
|
736 | return f in standins | |
740 |
|
737 | |||
741 | match.matchfn = matchfn |
|
738 | match.matchfn = matchfn | |
742 |
|
739 | |||
743 | return match |
|
740 | return match | |
744 |
|
741 | |||
745 |
|
742 | |||
746 | class automatedcommithook(object): |
|
743 | class automatedcommithook(object): | |
747 | """Stateful hook to update standins at the 1st commit of resuming |
|
744 | """Stateful hook to update standins at the 1st commit of resuming | |
748 |
|
745 | |||
749 | For efficiency, updating standins in the working directory should |
|
746 | For efficiency, updating standins in the working directory should | |
750 | be avoided while automated committing (like rebase, transplant and |
|
747 | be avoided while automated committing (like rebase, transplant and | |
751 | so on), because they should be updated before committing. |
|
748 | so on), because they should be updated before committing. | |
752 |
|
749 | |||
753 | But the 1st commit of resuming automated committing (e.g. ``rebase |
|
750 | But the 1st commit of resuming automated committing (e.g. ``rebase | |
754 | --continue``) should update them, because largefiles may be |
|
751 | --continue``) should update them, because largefiles may be | |
755 | modified manually. |
|
752 | modified manually. | |
756 | """ |
|
753 | """ | |
757 |
|
754 | |||
758 | def __init__(self, resuming): |
|
755 | def __init__(self, resuming): | |
759 | self.resuming = resuming |
|
756 | self.resuming = resuming | |
760 |
|
757 | |||
761 | def __call__(self, repo, match): |
|
758 | def __call__(self, repo, match): | |
762 | if self.resuming: |
|
759 | if self.resuming: | |
763 | self.resuming = False # avoids updating at subsequent commits |
|
760 | self.resuming = False # avoids updating at subsequent commits | |
764 | return updatestandinsbymatch(repo, match) |
|
761 | return updatestandinsbymatch(repo, match) | |
765 | else: |
|
762 | else: | |
766 | return match |
|
763 | return match | |
767 |
|
764 | |||
768 |
|
765 | |||
769 | def getstatuswriter(ui, repo, forcibly=None): |
|
766 | def getstatuswriter(ui, repo, forcibly=None): | |
770 | """Return the function to write largefiles specific status out |
|
767 | """Return the function to write largefiles specific status out | |
771 |
|
768 | |||
772 | If ``forcibly`` is ``None``, this returns the last element of |
|
769 | If ``forcibly`` is ``None``, this returns the last element of | |
773 | ``repo._lfstatuswriters`` as "default" writer function. |
|
770 | ``repo._lfstatuswriters`` as "default" writer function. | |
774 |
|
771 | |||
775 | Otherwise, this returns the function to always write out (or |
|
772 | Otherwise, this returns the function to always write out (or | |
776 | ignore if ``not forcibly``) status. |
|
773 | ignore if ``not forcibly``) status. | |
777 | """ |
|
774 | """ | |
778 | if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): |
|
775 | if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): | |
779 | return repo._lfstatuswriters[-1] |
|
776 | return repo._lfstatuswriters[-1] | |
780 | else: |
|
777 | else: | |
781 | if forcibly: |
|
778 | if forcibly: | |
782 | return ui.status # forcibly WRITE OUT |
|
779 | return ui.status # forcibly WRITE OUT | |
783 | else: |
|
780 | else: | |
784 | return lambda *msg, **opts: None # forcibly IGNORE |
|
781 | return lambda *msg, **opts: None # forcibly IGNORE |
@@ -1,550 +1,550 b'' | |||||
1 | # wrapper.py - methods wrapping core mercurial logic |
|
1 | # wrapper.py - methods wrapping core mercurial logic | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Facebook, Inc. |
|
3 | # Copyright 2017 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import hashlib |
|
10 | import hashlib | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 |
from mercurial.node import bin, hex, |
|
13 | from mercurial.node import bin, hex, short | |
14 | from mercurial.pycompat import ( |
|
14 | from mercurial.pycompat import ( | |
15 | getattr, |
|
15 | getattr, | |
16 | setattr, |
|
16 | setattr, | |
17 | ) |
|
17 | ) | |
18 |
|
18 | |||
19 | from mercurial import ( |
|
19 | from mercurial import ( | |
20 | bundle2, |
|
20 | bundle2, | |
21 | changegroup, |
|
21 | changegroup, | |
22 | cmdutil, |
|
22 | cmdutil, | |
23 | context, |
|
23 | context, | |
24 | error, |
|
24 | error, | |
25 | exchange, |
|
25 | exchange, | |
26 | exthelper, |
|
26 | exthelper, | |
27 | localrepo, |
|
27 | localrepo, | |
28 | pycompat, |
|
28 | pycompat, | |
29 | revlog, |
|
29 | revlog, | |
30 | scmutil, |
|
30 | scmutil, | |
31 | util, |
|
31 | util, | |
32 | vfs as vfsmod, |
|
32 | vfs as vfsmod, | |
33 | wireprotov1server, |
|
33 | wireprotov1server, | |
34 | ) |
|
34 | ) | |
35 |
|
35 | |||
36 | from mercurial.upgrade_utils import ( |
|
36 | from mercurial.upgrade_utils import ( | |
37 | actions as upgrade_actions, |
|
37 | actions as upgrade_actions, | |
38 | engine as upgrade_engine, |
|
38 | engine as upgrade_engine, | |
39 | ) |
|
39 | ) | |
40 |
|
40 | |||
41 | from mercurial.interfaces import repository |
|
41 | from mercurial.interfaces import repository | |
42 |
|
42 | |||
43 | from mercurial.utils import ( |
|
43 | from mercurial.utils import ( | |
44 | storageutil, |
|
44 | storageutil, | |
45 | stringutil, |
|
45 | stringutil, | |
46 | ) |
|
46 | ) | |
47 |
|
47 | |||
48 | from ..largefiles import lfutil |
|
48 | from ..largefiles import lfutil | |
49 |
|
49 | |||
50 | from . import ( |
|
50 | from . import ( | |
51 | blobstore, |
|
51 | blobstore, | |
52 | pointer, |
|
52 | pointer, | |
53 | ) |
|
53 | ) | |
54 |
|
54 | |||
55 | eh = exthelper.exthelper() |
|
55 | eh = exthelper.exthelper() | |
56 |
|
56 | |||
57 |
|
57 | |||
58 | @eh.wrapfunction(localrepo, b'makefilestorage') |
|
58 | @eh.wrapfunction(localrepo, b'makefilestorage') | |
59 | def localrepomakefilestorage(orig, requirements, features, **kwargs): |
|
59 | def localrepomakefilestorage(orig, requirements, features, **kwargs): | |
60 | if b'lfs' in requirements: |
|
60 | if b'lfs' in requirements: | |
61 | features.add(repository.REPO_FEATURE_LFS) |
|
61 | features.add(repository.REPO_FEATURE_LFS) | |
62 |
|
62 | |||
63 | return orig(requirements=requirements, features=features, **kwargs) |
|
63 | return orig(requirements=requirements, features=features, **kwargs) | |
64 |
|
64 | |||
65 |
|
65 | |||
66 | @eh.wrapfunction(changegroup, b'allsupportedversions') |
|
66 | @eh.wrapfunction(changegroup, b'allsupportedversions') | |
67 | def allsupportedversions(orig, ui): |
|
67 | def allsupportedversions(orig, ui): | |
68 | versions = orig(ui) |
|
68 | versions = orig(ui) | |
69 | versions.add(b'03') |
|
69 | versions.add(b'03') | |
70 | return versions |
|
70 | return versions | |
71 |
|
71 | |||
72 |
|
72 | |||
73 | @eh.wrapfunction(wireprotov1server, b'_capabilities') |
|
73 | @eh.wrapfunction(wireprotov1server, b'_capabilities') | |
74 | def _capabilities(orig, repo, proto): |
|
74 | def _capabilities(orig, repo, proto): | |
75 | '''Wrap server command to announce lfs server capability''' |
|
75 | '''Wrap server command to announce lfs server capability''' | |
76 | caps = orig(repo, proto) |
|
76 | caps = orig(repo, proto) | |
77 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
77 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): | |
78 | # Advertise a slightly different capability when lfs is *required*, so |
|
78 | # Advertise a slightly different capability when lfs is *required*, so | |
79 | # that the client knows it MUST load the extension. If lfs is not |
|
79 | # that the client knows it MUST load the extension. If lfs is not | |
80 | # required on the server, there's no reason to autoload the extension |
|
80 | # required on the server, there's no reason to autoload the extension | |
81 | # on the client. |
|
81 | # on the client. | |
82 | if b'lfs' in repo.requirements: |
|
82 | if b'lfs' in repo.requirements: | |
83 | caps.append(b'lfs-serve') |
|
83 | caps.append(b'lfs-serve') | |
84 |
|
84 | |||
85 | caps.append(b'lfs') |
|
85 | caps.append(b'lfs') | |
86 | return caps |
|
86 | return caps | |
87 |
|
87 | |||
88 |
|
88 | |||
89 | def bypasscheckhash(self, text): |
|
89 | def bypasscheckhash(self, text): | |
90 | return False |
|
90 | return False | |
91 |
|
91 | |||
92 |
|
92 | |||
93 | def readfromstore(self, text): |
|
93 | def readfromstore(self, text): | |
94 | """Read filelog content from local blobstore transform for flagprocessor. |
|
94 | """Read filelog content from local blobstore transform for flagprocessor. | |
95 |
|
95 | |||
96 | Default tranform for flagprocessor, returning contents from blobstore. |
|
96 | Default tranform for flagprocessor, returning contents from blobstore. | |
97 | Returns a 2-typle (text, validatehash) where validatehash is True as the |
|
97 | Returns a 2-typle (text, validatehash) where validatehash is True as the | |
98 | contents of the blobstore should be checked using checkhash. |
|
98 | contents of the blobstore should be checked using checkhash. | |
99 | """ |
|
99 | """ | |
100 | p = pointer.deserialize(text) |
|
100 | p = pointer.deserialize(text) | |
101 | oid = p.oid() |
|
101 | oid = p.oid() | |
102 | store = self.opener.lfslocalblobstore |
|
102 | store = self.opener.lfslocalblobstore | |
103 | if not store.has(oid): |
|
103 | if not store.has(oid): | |
104 | p.filename = self.filename |
|
104 | p.filename = self.filename | |
105 | self.opener.lfsremoteblobstore.readbatch([p], store) |
|
105 | self.opener.lfsremoteblobstore.readbatch([p], store) | |
106 |
|
106 | |||
107 | # The caller will validate the content |
|
107 | # The caller will validate the content | |
108 | text = store.read(oid, verify=False) |
|
108 | text = store.read(oid, verify=False) | |
109 |
|
109 | |||
110 | # pack hg filelog metadata |
|
110 | # pack hg filelog metadata | |
111 | hgmeta = {} |
|
111 | hgmeta = {} | |
112 | for k in p.keys(): |
|
112 | for k in p.keys(): | |
113 | if k.startswith(b'x-hg-'): |
|
113 | if k.startswith(b'x-hg-'): | |
114 | name = k[len(b'x-hg-') :] |
|
114 | name = k[len(b'x-hg-') :] | |
115 | hgmeta[name] = p[k] |
|
115 | hgmeta[name] = p[k] | |
116 | if hgmeta or text.startswith(b'\1\n'): |
|
116 | if hgmeta or text.startswith(b'\1\n'): | |
117 | text = storageutil.packmeta(hgmeta, text) |
|
117 | text = storageutil.packmeta(hgmeta, text) | |
118 |
|
118 | |||
119 | return (text, True) |
|
119 | return (text, True) | |
120 |
|
120 | |||
121 |
|
121 | |||
122 | def writetostore(self, text): |
|
122 | def writetostore(self, text): | |
123 | # hg filelog metadata (includes rename, etc) |
|
123 | # hg filelog metadata (includes rename, etc) | |
124 | hgmeta, offset = storageutil.parsemeta(text) |
|
124 | hgmeta, offset = storageutil.parsemeta(text) | |
125 | if offset and offset > 0: |
|
125 | if offset and offset > 0: | |
126 | # lfs blob does not contain hg filelog metadata |
|
126 | # lfs blob does not contain hg filelog metadata | |
127 | text = text[offset:] |
|
127 | text = text[offset:] | |
128 |
|
128 | |||
129 | # git-lfs only supports sha256 |
|
129 | # git-lfs only supports sha256 | |
130 | oid = hex(hashlib.sha256(text).digest()) |
|
130 | oid = hex(hashlib.sha256(text).digest()) | |
131 | self.opener.lfslocalblobstore.write(oid, text) |
|
131 | self.opener.lfslocalblobstore.write(oid, text) | |
132 |
|
132 | |||
133 | # replace contents with metadata |
|
133 | # replace contents with metadata | |
134 | longoid = b'sha256:%s' % oid |
|
134 | longoid = b'sha256:%s' % oid | |
135 | metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) |
|
135 | metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) | |
136 |
|
136 | |||
137 | # by default, we expect the content to be binary. however, LFS could also |
|
137 | # by default, we expect the content to be binary. however, LFS could also | |
138 | # be used for non-binary content. add a special entry for non-binary data. |
|
138 | # be used for non-binary content. add a special entry for non-binary data. | |
139 | # this will be used by filectx.isbinary(). |
|
139 | # this will be used by filectx.isbinary(). | |
140 | if not stringutil.binary(text): |
|
140 | if not stringutil.binary(text): | |
141 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix |
|
141 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix | |
142 | metadata[b'x-is-binary'] = b'0' |
|
142 | metadata[b'x-is-binary'] = b'0' | |
143 |
|
143 | |||
144 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix |
|
144 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix | |
145 | if hgmeta is not None: |
|
145 | if hgmeta is not None: | |
146 | for k, v in pycompat.iteritems(hgmeta): |
|
146 | for k, v in pycompat.iteritems(hgmeta): | |
147 | metadata[b'x-hg-%s' % k] = v |
|
147 | metadata[b'x-hg-%s' % k] = v | |
148 |
|
148 | |||
149 | rawtext = metadata.serialize() |
|
149 | rawtext = metadata.serialize() | |
150 | return (rawtext, False) |
|
150 | return (rawtext, False) | |
151 |
|
151 | |||
152 |
|
152 | |||
153 | def _islfs(rlog, node=None, rev=None): |
|
153 | def _islfs(rlog, node=None, rev=None): | |
154 | if rev is None: |
|
154 | if rev is None: | |
155 | if node is None: |
|
155 | if node is None: | |
156 | # both None - likely working copy content where node is not ready |
|
156 | # both None - likely working copy content where node is not ready | |
157 | return False |
|
157 | return False | |
158 | rev = rlog.rev(node) |
|
158 | rev = rlog.rev(node) | |
159 | else: |
|
159 | else: | |
160 | node = rlog.node(rev) |
|
160 | node = rlog.node(rev) | |
161 | if node == nullid: |
|
161 | if node == rlog.nullid: | |
162 | return False |
|
162 | return False | |
163 | flags = rlog.flags(rev) |
|
163 | flags = rlog.flags(rev) | |
164 | return bool(flags & revlog.REVIDX_EXTSTORED) |
|
164 | return bool(flags & revlog.REVIDX_EXTSTORED) | |
165 |
|
165 | |||
166 |
|
166 | |||
167 | # Wrapping may also be applied by remotefilelog |
|
167 | # Wrapping may also be applied by remotefilelog | |
168 | def filelogaddrevision( |
|
168 | def filelogaddrevision( | |
169 | orig, |
|
169 | orig, | |
170 | self, |
|
170 | self, | |
171 | text, |
|
171 | text, | |
172 | transaction, |
|
172 | transaction, | |
173 | link, |
|
173 | link, | |
174 | p1, |
|
174 | p1, | |
175 | p2, |
|
175 | p2, | |
176 | cachedelta=None, |
|
176 | cachedelta=None, | |
177 | node=None, |
|
177 | node=None, | |
178 | flags=revlog.REVIDX_DEFAULT_FLAGS, |
|
178 | flags=revlog.REVIDX_DEFAULT_FLAGS, | |
179 | **kwds |
|
179 | **kwds | |
180 | ): |
|
180 | ): | |
181 | # The matcher isn't available if reposetup() wasn't called. |
|
181 | # The matcher isn't available if reposetup() wasn't called. | |
182 | lfstrack = self._revlog.opener.options.get(b'lfstrack') |
|
182 | lfstrack = self._revlog.opener.options.get(b'lfstrack') | |
183 |
|
183 | |||
184 | if lfstrack: |
|
184 | if lfstrack: | |
185 | textlen = len(text) |
|
185 | textlen = len(text) | |
186 | # exclude hg rename meta from file size |
|
186 | # exclude hg rename meta from file size | |
187 | meta, offset = storageutil.parsemeta(text) |
|
187 | meta, offset = storageutil.parsemeta(text) | |
188 | if offset: |
|
188 | if offset: | |
189 | textlen -= offset |
|
189 | textlen -= offset | |
190 |
|
190 | |||
191 | if lfstrack(self._revlog.filename, textlen): |
|
191 | if lfstrack(self._revlog.filename, textlen): | |
192 | flags |= revlog.REVIDX_EXTSTORED |
|
192 | flags |= revlog.REVIDX_EXTSTORED | |
193 |
|
193 | |||
194 | return orig( |
|
194 | return orig( | |
195 | self, |
|
195 | self, | |
196 | text, |
|
196 | text, | |
197 | transaction, |
|
197 | transaction, | |
198 | link, |
|
198 | link, | |
199 | p1, |
|
199 | p1, | |
200 | p2, |
|
200 | p2, | |
201 | cachedelta=cachedelta, |
|
201 | cachedelta=cachedelta, | |
202 | node=node, |
|
202 | node=node, | |
203 | flags=flags, |
|
203 | flags=flags, | |
204 | **kwds |
|
204 | **kwds | |
205 | ) |
|
205 | ) | |
206 |
|
206 | |||
207 |
|
207 | |||
208 | # Wrapping may also be applied by remotefilelog |
|
208 | # Wrapping may also be applied by remotefilelog | |
209 | def filelogrenamed(orig, self, node): |
|
209 | def filelogrenamed(orig, self, node): | |
210 | if _islfs(self._revlog, node): |
|
210 | if _islfs(self._revlog, node): | |
211 | rawtext = self._revlog.rawdata(node) |
|
211 | rawtext = self._revlog.rawdata(node) | |
212 | if not rawtext: |
|
212 | if not rawtext: | |
213 | return False |
|
213 | return False | |
214 | metadata = pointer.deserialize(rawtext) |
|
214 | metadata = pointer.deserialize(rawtext) | |
215 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: |
|
215 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: | |
216 | return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) |
|
216 | return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) | |
217 | else: |
|
217 | else: | |
218 | return False |
|
218 | return False | |
219 | return orig(self, node) |
|
219 | return orig(self, node) | |
220 |
|
220 | |||
221 |
|
221 | |||
222 | # Wrapping may also be applied by remotefilelog |
|
222 | # Wrapping may also be applied by remotefilelog | |
223 | def filelogsize(orig, self, rev): |
|
223 | def filelogsize(orig, self, rev): | |
224 | if _islfs(self._revlog, rev=rev): |
|
224 | if _islfs(self._revlog, rev=rev): | |
225 | # fast path: use lfs metadata to answer size |
|
225 | # fast path: use lfs metadata to answer size | |
226 | rawtext = self._revlog.rawdata(rev) |
|
226 | rawtext = self._revlog.rawdata(rev) | |
227 | metadata = pointer.deserialize(rawtext) |
|
227 | metadata = pointer.deserialize(rawtext) | |
228 | return int(metadata[b'size']) |
|
228 | return int(metadata[b'size']) | |
229 | return orig(self, rev) |
|
229 | return orig(self, rev) | |
230 |
|
230 | |||
231 |
|
231 | |||
232 | @eh.wrapfunction(revlog, b'_verify_revision') |
|
232 | @eh.wrapfunction(revlog, b'_verify_revision') | |
233 | def _verify_revision(orig, rl, skipflags, state, node): |
|
233 | def _verify_revision(orig, rl, skipflags, state, node): | |
234 | if _islfs(rl, node=node): |
|
234 | if _islfs(rl, node=node): | |
235 | rawtext = rl.rawdata(node) |
|
235 | rawtext = rl.rawdata(node) | |
236 | metadata = pointer.deserialize(rawtext) |
|
236 | metadata = pointer.deserialize(rawtext) | |
237 |
|
237 | |||
238 | # Don't skip blobs that are stored locally, as local verification is |
|
238 | # Don't skip blobs that are stored locally, as local verification is | |
239 | # relatively cheap and there's no other way to verify the raw data in |
|
239 | # relatively cheap and there's no other way to verify the raw data in | |
240 | # the revlog. |
|
240 | # the revlog. | |
241 | if rl.opener.lfslocalblobstore.has(metadata.oid()): |
|
241 | if rl.opener.lfslocalblobstore.has(metadata.oid()): | |
242 | skipflags &= ~revlog.REVIDX_EXTSTORED |
|
242 | skipflags &= ~revlog.REVIDX_EXTSTORED | |
243 | elif skipflags & revlog.REVIDX_EXTSTORED: |
|
243 | elif skipflags & revlog.REVIDX_EXTSTORED: | |
244 | # The wrapped method will set `skipread`, but there's enough local |
|
244 | # The wrapped method will set `skipread`, but there's enough local | |
245 | # info to check renames. |
|
245 | # info to check renames. | |
246 | state[b'safe_renamed'].add(node) |
|
246 | state[b'safe_renamed'].add(node) | |
247 |
|
247 | |||
248 | orig(rl, skipflags, state, node) |
|
248 | orig(rl, skipflags, state, node) | |
249 |
|
249 | |||
250 |
|
250 | |||
251 | @eh.wrapfunction(context.basefilectx, b'cmp') |
|
251 | @eh.wrapfunction(context.basefilectx, b'cmp') | |
252 | def filectxcmp(orig, self, fctx): |
|
252 | def filectxcmp(orig, self, fctx): | |
253 | """returns True if text is different than fctx""" |
|
253 | """returns True if text is different than fctx""" | |
254 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
|
254 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs | |
255 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): |
|
255 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): | |
256 | # fast path: check LFS oid |
|
256 | # fast path: check LFS oid | |
257 | p1 = pointer.deserialize(self.rawdata()) |
|
257 | p1 = pointer.deserialize(self.rawdata()) | |
258 | p2 = pointer.deserialize(fctx.rawdata()) |
|
258 | p2 = pointer.deserialize(fctx.rawdata()) | |
259 | return p1.oid() != p2.oid() |
|
259 | return p1.oid() != p2.oid() | |
260 | return orig(self, fctx) |
|
260 | return orig(self, fctx) | |
261 |
|
261 | |||
262 |
|
262 | |||
263 | @eh.wrapfunction(context.basefilectx, b'isbinary') |
|
263 | @eh.wrapfunction(context.basefilectx, b'isbinary') | |
264 | def filectxisbinary(orig, self): |
|
264 | def filectxisbinary(orig, self): | |
265 | if self.islfs(): |
|
265 | if self.islfs(): | |
266 | # fast path: use lfs metadata to answer isbinary |
|
266 | # fast path: use lfs metadata to answer isbinary | |
267 | metadata = pointer.deserialize(self.rawdata()) |
|
267 | metadata = pointer.deserialize(self.rawdata()) | |
268 | # if lfs metadata says nothing, assume it's binary by default |
|
268 | # if lfs metadata says nothing, assume it's binary by default | |
269 | return bool(int(metadata.get(b'x-is-binary', 1))) |
|
269 | return bool(int(metadata.get(b'x-is-binary', 1))) | |
270 | return orig(self) |
|
270 | return orig(self) | |
271 |
|
271 | |||
272 |
|
272 | |||
273 | def filectxislfs(self): |
|
273 | def filectxislfs(self): | |
274 | return _islfs(self.filelog()._revlog, self.filenode()) |
|
274 | return _islfs(self.filelog()._revlog, self.filenode()) | |
275 |
|
275 | |||
276 |
|
276 | |||
277 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') |
|
277 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') | |
278 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
|
278 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): | |
279 | orig(fm, ctx, matcher, path, decode) |
|
279 | orig(fm, ctx, matcher, path, decode) | |
280 | fm.data(rawdata=ctx[path].rawdata()) |
|
280 | fm.data(rawdata=ctx[path].rawdata()) | |
281 |
|
281 | |||
282 |
|
282 | |||
283 | @eh.wrapfunction(scmutil, b'wrapconvertsink') |
|
283 | @eh.wrapfunction(scmutil, b'wrapconvertsink') | |
284 | def convertsink(orig, sink): |
|
284 | def convertsink(orig, sink): | |
285 | sink = orig(sink) |
|
285 | sink = orig(sink) | |
286 | if sink.repotype == b'hg': |
|
286 | if sink.repotype == b'hg': | |
287 |
|
287 | |||
288 | class lfssink(sink.__class__): |
|
288 | class lfssink(sink.__class__): | |
289 | def putcommit( |
|
289 | def putcommit( | |
290 | self, |
|
290 | self, | |
291 | files, |
|
291 | files, | |
292 | copies, |
|
292 | copies, | |
293 | parents, |
|
293 | parents, | |
294 | commit, |
|
294 | commit, | |
295 | source, |
|
295 | source, | |
296 | revmap, |
|
296 | revmap, | |
297 | full, |
|
297 | full, | |
298 | cleanp2, |
|
298 | cleanp2, | |
299 | ): |
|
299 | ): | |
300 | pc = super(lfssink, self).putcommit |
|
300 | pc = super(lfssink, self).putcommit | |
301 | node = pc( |
|
301 | node = pc( | |
302 | files, |
|
302 | files, | |
303 | copies, |
|
303 | copies, | |
304 | parents, |
|
304 | parents, | |
305 | commit, |
|
305 | commit, | |
306 | source, |
|
306 | source, | |
307 | revmap, |
|
307 | revmap, | |
308 | full, |
|
308 | full, | |
309 | cleanp2, |
|
309 | cleanp2, | |
310 | ) |
|
310 | ) | |
311 |
|
311 | |||
312 | if b'lfs' not in self.repo.requirements: |
|
312 | if b'lfs' not in self.repo.requirements: | |
313 | ctx = self.repo[node] |
|
313 | ctx = self.repo[node] | |
314 |
|
314 | |||
315 | # The file list may contain removed files, so check for |
|
315 | # The file list may contain removed files, so check for | |
316 | # membership before assuming it is in the context. |
|
316 | # membership before assuming it is in the context. | |
317 | if any(f in ctx and ctx[f].islfs() for f, n in files): |
|
317 | if any(f in ctx and ctx[f].islfs() for f, n in files): | |
318 | self.repo.requirements.add(b'lfs') |
|
318 | self.repo.requirements.add(b'lfs') | |
319 | scmutil.writereporequirements(self.repo) |
|
319 | scmutil.writereporequirements(self.repo) | |
320 |
|
320 | |||
321 | return node |
|
321 | return node | |
322 |
|
322 | |||
323 | sink.__class__ = lfssink |
|
323 | sink.__class__ = lfssink | |
324 |
|
324 | |||
325 | return sink |
|
325 | return sink | |
326 |
|
326 | |||
327 |
|
327 | |||
328 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
|
328 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs | |
329 | # options and blob stores are passed from othervfs to the new readonlyvfs. |
|
329 | # options and blob stores are passed from othervfs to the new readonlyvfs. | |
330 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') |
|
330 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') | |
331 | def vfsinit(orig, self, othervfs): |
|
331 | def vfsinit(orig, self, othervfs): | |
332 | orig(self, othervfs) |
|
332 | orig(self, othervfs) | |
333 | # copy lfs related options |
|
333 | # copy lfs related options | |
334 | for k, v in othervfs.options.items(): |
|
334 | for k, v in othervfs.options.items(): | |
335 | if k.startswith(b'lfs'): |
|
335 | if k.startswith(b'lfs'): | |
336 | self.options[k] = v |
|
336 | self.options[k] = v | |
337 | # also copy lfs blobstores. note: this can run before reposetup, so lfs |
|
337 | # also copy lfs blobstores. note: this can run before reposetup, so lfs | |
338 | # blobstore attributes are not always ready at this time. |
|
338 | # blobstore attributes are not always ready at this time. | |
339 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: |
|
339 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: | |
340 | if util.safehasattr(othervfs, name): |
|
340 | if util.safehasattr(othervfs, name): | |
341 | setattr(self, name, getattr(othervfs, name)) |
|
341 | setattr(self, name, getattr(othervfs, name)) | |
342 |
|
342 | |||
343 |
|
343 | |||
344 | def _prefetchfiles(repo, revmatches): |
|
344 | def _prefetchfiles(repo, revmatches): | |
345 | """Ensure that required LFS blobs are present, fetching them as a group if |
|
345 | """Ensure that required LFS blobs are present, fetching them as a group if | |
346 | needed.""" |
|
346 | needed.""" | |
347 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
347 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): | |
348 | return |
|
348 | return | |
349 |
|
349 | |||
350 | pointers = [] |
|
350 | pointers = [] | |
351 | oids = set() |
|
351 | oids = set() | |
352 | localstore = repo.svfs.lfslocalblobstore |
|
352 | localstore = repo.svfs.lfslocalblobstore | |
353 |
|
353 | |||
354 | for rev, match in revmatches: |
|
354 | for rev, match in revmatches: | |
355 | ctx = repo[rev] |
|
355 | ctx = repo[rev] | |
356 | for f in ctx.walk(match): |
|
356 | for f in ctx.walk(match): | |
357 | p = pointerfromctx(ctx, f) |
|
357 | p = pointerfromctx(ctx, f) | |
358 | if p and p.oid() not in oids and not localstore.has(p.oid()): |
|
358 | if p and p.oid() not in oids and not localstore.has(p.oid()): | |
359 | p.filename = f |
|
359 | p.filename = f | |
360 | pointers.append(p) |
|
360 | pointers.append(p) | |
361 | oids.add(p.oid()) |
|
361 | oids.add(p.oid()) | |
362 |
|
362 | |||
363 | if pointers: |
|
363 | if pointers: | |
364 | # Recalculating the repo store here allows 'paths.default' that is set |
|
364 | # Recalculating the repo store here allows 'paths.default' that is set | |
365 | # on the repo by a clone command to be used for the update. |
|
365 | # on the repo by a clone command to be used for the update. | |
366 | blobstore.remote(repo).readbatch(pointers, localstore) |
|
366 | blobstore.remote(repo).readbatch(pointers, localstore) | |
367 |
|
367 | |||
368 |
|
368 | |||
369 | def _canskipupload(repo): |
|
369 | def _canskipupload(repo): | |
370 | # Skip if this hasn't been passed to reposetup() |
|
370 | # Skip if this hasn't been passed to reposetup() | |
371 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
371 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | |
372 | return True |
|
372 | return True | |
373 |
|
373 | |||
374 | # if remotestore is a null store, upload is a no-op and can be skipped |
|
374 | # if remotestore is a null store, upload is a no-op and can be skipped | |
375 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
375 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |
376 |
|
376 | |||
377 |
|
377 | |||
378 | def candownload(repo): |
|
378 | def candownload(repo): | |
379 | # Skip if this hasn't been passed to reposetup() |
|
379 | # Skip if this hasn't been passed to reposetup() | |
380 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
380 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | |
381 | return False |
|
381 | return False | |
382 |
|
382 | |||
383 | # if remotestore is a null store, downloads will lead to nothing |
|
383 | # if remotestore is a null store, downloads will lead to nothing | |
384 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
384 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |
385 |
|
385 | |||
386 |
|
386 | |||
387 | def uploadblobsfromrevs(repo, revs): |
|
387 | def uploadblobsfromrevs(repo, revs): | |
388 | """upload lfs blobs introduced by revs |
|
388 | """upload lfs blobs introduced by revs | |
389 |
|
389 | |||
390 | Note: also used by other extensions e. g. infinitepush. avoid renaming. |
|
390 | Note: also used by other extensions e. g. infinitepush. avoid renaming. | |
391 | """ |
|
391 | """ | |
392 | if _canskipupload(repo): |
|
392 | if _canskipupload(repo): | |
393 | return |
|
393 | return | |
394 | pointers = extractpointers(repo, revs) |
|
394 | pointers = extractpointers(repo, revs) | |
395 | uploadblobs(repo, pointers) |
|
395 | uploadblobs(repo, pointers) | |
396 |
|
396 | |||
397 |
|
397 | |||
398 | def prepush(pushop): |
|
398 | def prepush(pushop): | |
399 | """Prepush hook. |
|
399 | """Prepush hook. | |
400 |
|
400 | |||
401 | Read through the revisions to push, looking for filelog entries that can be |
|
401 | Read through the revisions to push, looking for filelog entries that can be | |
402 | deserialized into metadata so that we can block the push on their upload to |
|
402 | deserialized into metadata so that we can block the push on their upload to | |
403 | the remote blobstore. |
|
403 | the remote blobstore. | |
404 | """ |
|
404 | """ | |
405 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
|
405 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) | |
406 |
|
406 | |||
407 |
|
407 | |||
408 | @eh.wrapfunction(exchange, b'push') |
|
408 | @eh.wrapfunction(exchange, b'push') | |
409 | def push(orig, repo, remote, *args, **kwargs): |
|
409 | def push(orig, repo, remote, *args, **kwargs): | |
410 | """bail on push if the extension isn't enabled on remote when needed, and |
|
410 | """bail on push if the extension isn't enabled on remote when needed, and | |
411 | update the remote store based on the destination path.""" |
|
411 | update the remote store based on the destination path.""" | |
412 | if b'lfs' in repo.requirements: |
|
412 | if b'lfs' in repo.requirements: | |
413 | # If the remote peer is for a local repo, the requirement tests in the |
|
413 | # If the remote peer is for a local repo, the requirement tests in the | |
414 | # base class method enforce lfs support. Otherwise, some revisions in |
|
414 | # base class method enforce lfs support. Otherwise, some revisions in | |
415 | # this repo use lfs, and the remote repo needs the extension loaded. |
|
415 | # this repo use lfs, and the remote repo needs the extension loaded. | |
416 | if not remote.local() and not remote.capable(b'lfs'): |
|
416 | if not remote.local() and not remote.capable(b'lfs'): | |
417 | # This is a copy of the message in exchange.push() when requirements |
|
417 | # This is a copy of the message in exchange.push() when requirements | |
418 | # are missing between local repos. |
|
418 | # are missing between local repos. | |
419 | m = _(b"required features are not supported in the destination: %s") |
|
419 | m = _(b"required features are not supported in the destination: %s") | |
420 | raise error.Abort( |
|
420 | raise error.Abort( | |
421 | m % b'lfs', hint=_(b'enable the lfs extension on the server') |
|
421 | m % b'lfs', hint=_(b'enable the lfs extension on the server') | |
422 | ) |
|
422 | ) | |
423 |
|
423 | |||
424 | # Repositories where this extension is disabled won't have the field. |
|
424 | # Repositories where this extension is disabled won't have the field. | |
425 | # But if there's a requirement, then the extension must be loaded AND |
|
425 | # But if there's a requirement, then the extension must be loaded AND | |
426 | # there may be blobs to push. |
|
426 | # there may be blobs to push. | |
427 | remotestore = repo.svfs.lfsremoteblobstore |
|
427 | remotestore = repo.svfs.lfsremoteblobstore | |
428 | try: |
|
428 | try: | |
429 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) |
|
429 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) | |
430 | return orig(repo, remote, *args, **kwargs) |
|
430 | return orig(repo, remote, *args, **kwargs) | |
431 | finally: |
|
431 | finally: | |
432 | repo.svfs.lfsremoteblobstore = remotestore |
|
432 | repo.svfs.lfsremoteblobstore = remotestore | |
433 | else: |
|
433 | else: | |
434 | return orig(repo, remote, *args, **kwargs) |
|
434 | return orig(repo, remote, *args, **kwargs) | |
435 |
|
435 | |||
436 |
|
436 | |||
437 | # when writing a bundle via "hg bundle" command, upload related LFS blobs |
|
437 | # when writing a bundle via "hg bundle" command, upload related LFS blobs | |
438 | @eh.wrapfunction(bundle2, b'writenewbundle') |
|
438 | @eh.wrapfunction(bundle2, b'writenewbundle') | |
439 | def writenewbundle( |
|
439 | def writenewbundle( | |
440 | orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
440 | orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs | |
441 | ): |
|
441 | ): | |
442 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
|
442 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" | |
443 | uploadblobsfromrevs(repo, outgoing.missing) |
|
443 | uploadblobsfromrevs(repo, outgoing.missing) | |
444 | return orig( |
|
444 | return orig( | |
445 | ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
445 | ui, repo, source, filename, bundletype, outgoing, *args, **kwargs | |
446 | ) |
|
446 | ) | |
447 |
|
447 | |||
448 |
|
448 | |||
449 | def extractpointers(repo, revs): |
|
449 | def extractpointers(repo, revs): | |
450 | """return a list of lfs pointers added by given revs""" |
|
450 | """return a list of lfs pointers added by given revs""" | |
451 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') |
|
451 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') | |
452 | pointers = {} |
|
452 | pointers = {} | |
453 |
|
453 | |||
454 | makeprogress = repo.ui.makeprogress |
|
454 | makeprogress = repo.ui.makeprogress | |
455 | with makeprogress( |
|
455 | with makeprogress( | |
456 | _(b'lfs search'), _(b'changesets'), len(revs) |
|
456 | _(b'lfs search'), _(b'changesets'), len(revs) | |
457 | ) as progress: |
|
457 | ) as progress: | |
458 | for r in revs: |
|
458 | for r in revs: | |
459 | ctx = repo[r] |
|
459 | ctx = repo[r] | |
460 | for p in pointersfromctx(ctx).values(): |
|
460 | for p in pointersfromctx(ctx).values(): | |
461 | pointers[p.oid()] = p |
|
461 | pointers[p.oid()] = p | |
462 | progress.increment() |
|
462 | progress.increment() | |
463 | return sorted(pointers.values(), key=lambda p: p.oid()) |
|
463 | return sorted(pointers.values(), key=lambda p: p.oid()) | |
464 |
|
464 | |||
465 |
|
465 | |||
466 | def pointerfromctx(ctx, f, removed=False): |
|
466 | def pointerfromctx(ctx, f, removed=False): | |
467 | """return a pointer for the named file from the given changectx, or None if |
|
467 | """return a pointer for the named file from the given changectx, or None if | |
468 | the file isn't LFS. |
|
468 | the file isn't LFS. | |
469 |
|
469 | |||
470 | Optionally, the pointer for a file deleted from the context can be returned. |
|
470 | Optionally, the pointer for a file deleted from the context can be returned. | |
471 | Since no such pointer is actually stored, and to distinguish from a non LFS |
|
471 | Since no such pointer is actually stored, and to distinguish from a non LFS | |
472 | file, this pointer is represented by an empty dict. |
|
472 | file, this pointer is represented by an empty dict. | |
473 | """ |
|
473 | """ | |
474 | _ctx = ctx |
|
474 | _ctx = ctx | |
475 | if f not in ctx: |
|
475 | if f not in ctx: | |
476 | if not removed: |
|
476 | if not removed: | |
477 | return None |
|
477 | return None | |
478 | if f in ctx.p1(): |
|
478 | if f in ctx.p1(): | |
479 | _ctx = ctx.p1() |
|
479 | _ctx = ctx.p1() | |
480 | elif f in ctx.p2(): |
|
480 | elif f in ctx.p2(): | |
481 | _ctx = ctx.p2() |
|
481 | _ctx = ctx.p2() | |
482 | else: |
|
482 | else: | |
483 | return None |
|
483 | return None | |
484 | fctx = _ctx[f] |
|
484 | fctx = _ctx[f] | |
485 | if not _islfs(fctx.filelog()._revlog, fctx.filenode()): |
|
485 | if not _islfs(fctx.filelog()._revlog, fctx.filenode()): | |
486 | return None |
|
486 | return None | |
487 | try: |
|
487 | try: | |
488 | p = pointer.deserialize(fctx.rawdata()) |
|
488 | p = pointer.deserialize(fctx.rawdata()) | |
489 | if ctx == _ctx: |
|
489 | if ctx == _ctx: | |
490 | return p |
|
490 | return p | |
491 | return {} |
|
491 | return {} | |
492 | except pointer.InvalidPointer as ex: |
|
492 | except pointer.InvalidPointer as ex: | |
493 | raise error.Abort( |
|
493 | raise error.Abort( | |
494 | _(b'lfs: corrupted pointer (%s@%s): %s\n') |
|
494 | _(b'lfs: corrupted pointer (%s@%s): %s\n') | |
495 | % (f, short(_ctx.node()), ex) |
|
495 | % (f, short(_ctx.node()), ex) | |
496 | ) |
|
496 | ) | |
497 |
|
497 | |||
498 |
|
498 | |||
499 | def pointersfromctx(ctx, removed=False): |
|
499 | def pointersfromctx(ctx, removed=False): | |
500 | """return a dict {path: pointer} for given single changectx. |
|
500 | """return a dict {path: pointer} for given single changectx. | |
501 |
|
501 | |||
502 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value |
|
502 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value | |
503 | stored for the path is an empty dict. |
|
503 | stored for the path is an empty dict. | |
504 | """ |
|
504 | """ | |
505 | result = {} |
|
505 | result = {} | |
506 | m = ctx.repo().narrowmatch() |
|
506 | m = ctx.repo().narrowmatch() | |
507 |
|
507 | |||
508 | # TODO: consider manifest.fastread() instead |
|
508 | # TODO: consider manifest.fastread() instead | |
509 | for f in ctx.files(): |
|
509 | for f in ctx.files(): | |
510 | if not m(f): |
|
510 | if not m(f): | |
511 | continue |
|
511 | continue | |
512 | p = pointerfromctx(ctx, f, removed=removed) |
|
512 | p = pointerfromctx(ctx, f, removed=removed) | |
513 | if p is not None: |
|
513 | if p is not None: | |
514 | result[f] = p |
|
514 | result[f] = p | |
515 | return result |
|
515 | return result | |
516 |
|
516 | |||
517 |
|
517 | |||
518 | def uploadblobs(repo, pointers): |
|
518 | def uploadblobs(repo, pointers): | |
519 | """upload given pointers from local blobstore""" |
|
519 | """upload given pointers from local blobstore""" | |
520 | if not pointers: |
|
520 | if not pointers: | |
521 | return |
|
521 | return | |
522 |
|
522 | |||
523 | remoteblob = repo.svfs.lfsremoteblobstore |
|
523 | remoteblob = repo.svfs.lfsremoteblobstore | |
524 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
|
524 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) | |
525 |
|
525 | |||
526 |
|
526 | |||
527 | @eh.wrapfunction(upgrade_engine, b'finishdatamigration') |
|
527 | @eh.wrapfunction(upgrade_engine, b'finishdatamigration') | |
528 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
|
528 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): | |
529 | orig(ui, srcrepo, dstrepo, requirements) |
|
529 | orig(ui, srcrepo, dstrepo, requirements) | |
530 |
|
530 | |||
531 | # Skip if this hasn't been passed to reposetup() |
|
531 | # Skip if this hasn't been passed to reposetup() | |
532 | if util.safehasattr( |
|
532 | if util.safehasattr( | |
533 | srcrepo.svfs, b'lfslocalblobstore' |
|
533 | srcrepo.svfs, b'lfslocalblobstore' | |
534 | ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): |
|
534 | ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): | |
535 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
|
535 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs | |
536 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
|
536 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs | |
537 |
|
537 | |||
538 | for dirpath, dirs, files in srclfsvfs.walk(): |
|
538 | for dirpath, dirs, files in srclfsvfs.walk(): | |
539 | for oid in files: |
|
539 | for oid in files: | |
540 | ui.write(_(b'copying lfs blob %s\n') % oid) |
|
540 | ui.write(_(b'copying lfs blob %s\n') % oid) | |
541 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
541 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) | |
542 |
|
542 | |||
543 |
|
543 | |||
544 | @eh.wrapfunction(upgrade_actions, b'preservedrequirements') |
|
544 | @eh.wrapfunction(upgrade_actions, b'preservedrequirements') | |
545 | @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements') |
|
545 | @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements') | |
546 | def upgraderequirements(orig, repo): |
|
546 | def upgraderequirements(orig, repo): | |
547 | reqs = orig(repo) |
|
547 | reqs = orig(repo) | |
548 | if b'lfs' in repo.requirements: |
|
548 | if b'lfs' in repo.requirements: | |
549 | reqs.add(b'lfs') |
|
549 | reqs.add(b'lfs') | |
550 | return reqs |
|
550 | return reqs |
@@ -1,4315 +1,4314 b'' | |||||
1 | # mq.py - patch queues for mercurial |
|
1 | # mq.py - patch queues for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''manage a stack of patches |
|
8 | '''manage a stack of patches | |
9 |
|
9 | |||
10 | This extension lets you work with a stack of patches in a Mercurial |
|
10 | This extension lets you work with a stack of patches in a Mercurial | |
11 | repository. It manages two stacks of patches - all known patches, and |
|
11 | repository. It manages two stacks of patches - all known patches, and | |
12 | applied patches (subset of known patches). |
|
12 | applied patches (subset of known patches). | |
13 |
|
13 | |||
14 | Known patches are represented as patch files in the .hg/patches |
|
14 | Known patches are represented as patch files in the .hg/patches | |
15 | directory. Applied patches are both patch files and changesets. |
|
15 | directory. Applied patches are both patch files and changesets. | |
16 |
|
16 | |||
17 | Common tasks (use :hg:`help COMMAND` for more details):: |
|
17 | Common tasks (use :hg:`help COMMAND` for more details):: | |
18 |
|
18 | |||
19 | create new patch qnew |
|
19 | create new patch qnew | |
20 | import existing patch qimport |
|
20 | import existing patch qimport | |
21 |
|
21 | |||
22 | print patch series qseries |
|
22 | print patch series qseries | |
23 | print applied patches qapplied |
|
23 | print applied patches qapplied | |
24 |
|
24 | |||
25 | add known patch to applied stack qpush |
|
25 | add known patch to applied stack qpush | |
26 | remove patch from applied stack qpop |
|
26 | remove patch from applied stack qpop | |
27 | refresh contents of top applied patch qrefresh |
|
27 | refresh contents of top applied patch qrefresh | |
28 |
|
28 | |||
29 | By default, mq will automatically use git patches when required to |
|
29 | By default, mq will automatically use git patches when required to | |
30 | avoid losing file mode changes, copy records, binary files or empty |
|
30 | avoid losing file mode changes, copy records, binary files or empty | |
31 | files creations or deletions. This behavior can be configured with:: |
|
31 | files creations or deletions. This behavior can be configured with:: | |
32 |
|
32 | |||
33 | [mq] |
|
33 | [mq] | |
34 | git = auto/keep/yes/no |
|
34 | git = auto/keep/yes/no | |
35 |
|
35 | |||
36 | If set to 'keep', mq will obey the [diff] section configuration while |
|
36 | If set to 'keep', mq will obey the [diff] section configuration while | |
37 | preserving existing git patches upon qrefresh. If set to 'yes' or |
|
37 | preserving existing git patches upon qrefresh. If set to 'yes' or | |
38 | 'no', mq will override the [diff] section and always generate git or |
|
38 | 'no', mq will override the [diff] section and always generate git or | |
39 | regular patches, possibly losing data in the second case. |
|
39 | regular patches, possibly losing data in the second case. | |
40 |
|
40 | |||
41 | It may be desirable for mq changesets to be kept in the secret phase (see |
|
41 | It may be desirable for mq changesets to be kept in the secret phase (see | |
42 | :hg:`help phases`), which can be enabled with the following setting:: |
|
42 | :hg:`help phases`), which can be enabled with the following setting:: | |
43 |
|
43 | |||
44 | [mq] |
|
44 | [mq] | |
45 | secret = True |
|
45 | secret = True | |
46 |
|
46 | |||
47 | You will by default be managing a patch queue named "patches". You can |
|
47 | You will by default be managing a patch queue named "patches". You can | |
48 | create other, independent patch queues with the :hg:`qqueue` command. |
|
48 | create other, independent patch queues with the :hg:`qqueue` command. | |
49 |
|
49 | |||
50 | If the working directory contains uncommitted files, qpush, qpop and |
|
50 | If the working directory contains uncommitted files, qpush, qpop and | |
51 | qgoto abort immediately. If -f/--force is used, the changes are |
|
51 | qgoto abort immediately. If -f/--force is used, the changes are | |
52 | discarded. Setting:: |
|
52 | discarded. Setting:: | |
53 |
|
53 | |||
54 | [mq] |
|
54 | [mq] | |
55 | keepchanges = True |
|
55 | keepchanges = True | |
56 |
|
56 | |||
57 | make them behave as if --keep-changes were passed, and non-conflicting |
|
57 | make them behave as if --keep-changes were passed, and non-conflicting | |
58 | local changes will be tolerated and preserved. If incompatible options |
|
58 | local changes will be tolerated and preserved. If incompatible options | |
59 | such as -f/--force or --exact are passed, this setting is ignored. |
|
59 | such as -f/--force or --exact are passed, this setting is ignored. | |
60 |
|
60 | |||
61 | This extension used to provide a strip command. This command now lives |
|
61 | This extension used to provide a strip command. This command now lives | |
62 | in the strip extension. |
|
62 | in the strip extension. | |
63 | ''' |
|
63 | ''' | |
64 |
|
64 | |||
65 | from __future__ import absolute_import, print_function |
|
65 | from __future__ import absolute_import, print_function | |
66 |
|
66 | |||
67 | import errno |
|
67 | import errno | |
68 | import os |
|
68 | import os | |
69 | import re |
|
69 | import re | |
70 | import shutil |
|
70 | import shutil | |
71 | import sys |
|
71 | import sys | |
72 | from mercurial.i18n import _ |
|
72 | from mercurial.i18n import _ | |
73 | from mercurial.node import ( |
|
73 | from mercurial.node import ( | |
74 | bin, |
|
74 | bin, | |
75 | hex, |
|
75 | hex, | |
76 | nullid, |
|
|||
77 | nullrev, |
|
76 | nullrev, | |
78 | short, |
|
77 | short, | |
79 | ) |
|
78 | ) | |
80 | from mercurial.pycompat import ( |
|
79 | from mercurial.pycompat import ( | |
81 | delattr, |
|
80 | delattr, | |
82 | getattr, |
|
81 | getattr, | |
83 | open, |
|
82 | open, | |
84 | ) |
|
83 | ) | |
85 | from mercurial import ( |
|
84 | from mercurial import ( | |
86 | cmdutil, |
|
85 | cmdutil, | |
87 | commands, |
|
86 | commands, | |
88 | dirstateguard, |
|
87 | dirstateguard, | |
89 | encoding, |
|
88 | encoding, | |
90 | error, |
|
89 | error, | |
91 | extensions, |
|
90 | extensions, | |
92 | hg, |
|
91 | hg, | |
93 | localrepo, |
|
92 | localrepo, | |
94 | lock as lockmod, |
|
93 | lock as lockmod, | |
95 | logcmdutil, |
|
94 | logcmdutil, | |
96 | patch as patchmod, |
|
95 | patch as patchmod, | |
97 | phases, |
|
96 | phases, | |
98 | pycompat, |
|
97 | pycompat, | |
99 | registrar, |
|
98 | registrar, | |
100 | revsetlang, |
|
99 | revsetlang, | |
101 | scmutil, |
|
100 | scmutil, | |
102 | smartset, |
|
101 | smartset, | |
103 | strip, |
|
102 | strip, | |
104 | subrepoutil, |
|
103 | subrepoutil, | |
105 | util, |
|
104 | util, | |
106 | vfs as vfsmod, |
|
105 | vfs as vfsmod, | |
107 | ) |
|
106 | ) | |
108 | from mercurial.utils import ( |
|
107 | from mercurial.utils import ( | |
109 | dateutil, |
|
108 | dateutil, | |
110 | stringutil, |
|
109 | stringutil, | |
111 | urlutil, |
|
110 | urlutil, | |
112 | ) |
|
111 | ) | |
113 |
|
112 | |||
114 | release = lockmod.release |
|
113 | release = lockmod.release | |
115 | seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))] |
|
114 | seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))] | |
116 |
|
115 | |||
117 | cmdtable = {} |
|
116 | cmdtable = {} | |
118 | command = registrar.command(cmdtable) |
|
117 | command = registrar.command(cmdtable) | |
119 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
118 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
120 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
119 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
121 | # be specifying the version(s) of Mercurial they are tested with, or |
|
120 | # be specifying the version(s) of Mercurial they are tested with, or | |
122 | # leave the attribute unspecified. |
|
121 | # leave the attribute unspecified. | |
123 | testedwith = b'ships-with-hg-core' |
|
122 | testedwith = b'ships-with-hg-core' | |
124 |
|
123 | |||
125 | configtable = {} |
|
124 | configtable = {} | |
126 | configitem = registrar.configitem(configtable) |
|
125 | configitem = registrar.configitem(configtable) | |
127 |
|
126 | |||
128 | configitem( |
|
127 | configitem( | |
129 | b'mq', |
|
128 | b'mq', | |
130 | b'git', |
|
129 | b'git', | |
131 | default=b'auto', |
|
130 | default=b'auto', | |
132 | ) |
|
131 | ) | |
133 | configitem( |
|
132 | configitem( | |
134 | b'mq', |
|
133 | b'mq', | |
135 | b'keepchanges', |
|
134 | b'keepchanges', | |
136 | default=False, |
|
135 | default=False, | |
137 | ) |
|
136 | ) | |
138 | configitem( |
|
137 | configitem( | |
139 | b'mq', |
|
138 | b'mq', | |
140 | b'plain', |
|
139 | b'plain', | |
141 | default=False, |
|
140 | default=False, | |
142 | ) |
|
141 | ) | |
143 | configitem( |
|
142 | configitem( | |
144 | b'mq', |
|
143 | b'mq', | |
145 | b'secret', |
|
144 | b'secret', | |
146 | default=False, |
|
145 | default=False, | |
147 | ) |
|
146 | ) | |
148 |
|
147 | |||
149 | # force load strip extension formerly included in mq and import some utility |
|
148 | # force load strip extension formerly included in mq and import some utility | |
150 | try: |
|
149 | try: | |
151 | extensions.find(b'strip') |
|
150 | extensions.find(b'strip') | |
152 | except KeyError: |
|
151 | except KeyError: | |
153 | # note: load is lazy so we could avoid the try-except, |
|
152 | # note: load is lazy so we could avoid the try-except, | |
154 | # but I (marmoute) prefer this explicit code. |
|
153 | # but I (marmoute) prefer this explicit code. | |
155 | class dummyui(object): |
|
154 | class dummyui(object): | |
156 | def debug(self, msg): |
|
155 | def debug(self, msg): | |
157 | pass |
|
156 | pass | |
158 |
|
157 | |||
159 | def log(self, event, msgfmt, *msgargs, **opts): |
|
158 | def log(self, event, msgfmt, *msgargs, **opts): | |
160 | pass |
|
159 | pass | |
161 |
|
160 | |||
162 | extensions.load(dummyui(), b'strip', b'') |
|
161 | extensions.load(dummyui(), b'strip', b'') | |
163 |
|
162 | |||
164 | strip = strip.strip |
|
163 | strip = strip.strip | |
165 |
|
164 | |||
166 |
|
165 | |||
167 | def checksubstate(repo, baserev=None): |
|
166 | def checksubstate(repo, baserev=None): | |
168 | """return list of subrepos at a different revision than substate. |
|
167 | """return list of subrepos at a different revision than substate. | |
169 | Abort if any subrepos have uncommitted changes.""" |
|
168 | Abort if any subrepos have uncommitted changes.""" | |
170 | inclsubs = [] |
|
169 | inclsubs = [] | |
171 | wctx = repo[None] |
|
170 | wctx = repo[None] | |
172 | if baserev: |
|
171 | if baserev: | |
173 | bctx = repo[baserev] |
|
172 | bctx = repo[baserev] | |
174 | else: |
|
173 | else: | |
175 | bctx = wctx.p1() |
|
174 | bctx = wctx.p1() | |
176 | for s in sorted(wctx.substate): |
|
175 | for s in sorted(wctx.substate): | |
177 | wctx.sub(s).bailifchanged(True) |
|
176 | wctx.sub(s).bailifchanged(True) | |
178 | if s not in bctx.substate or bctx.sub(s).dirty(): |
|
177 | if s not in bctx.substate or bctx.sub(s).dirty(): | |
179 | inclsubs.append(s) |
|
178 | inclsubs.append(s) | |
180 | return inclsubs |
|
179 | return inclsubs | |
181 |
|
180 | |||
182 |
|
181 | |||
183 | # Patch names looks like unix-file names. |
|
182 | # Patch names looks like unix-file names. | |
184 | # They must be joinable with queue directory and result in the patch path. |
|
183 | # They must be joinable with queue directory and result in the patch path. | |
185 | normname = util.normpath |
|
184 | normname = util.normpath | |
186 |
|
185 | |||
187 |
|
186 | |||
188 | class statusentry(object): |
|
187 | class statusentry(object): | |
189 | def __init__(self, node, name): |
|
188 | def __init__(self, node, name): | |
190 | self.node, self.name = node, name |
|
189 | self.node, self.name = node, name | |
191 |
|
190 | |||
192 | def __bytes__(self): |
|
191 | def __bytes__(self): | |
193 | return hex(self.node) + b':' + self.name |
|
192 | return hex(self.node) + b':' + self.name | |
194 |
|
193 | |||
195 | __str__ = encoding.strmethod(__bytes__) |
|
194 | __str__ = encoding.strmethod(__bytes__) | |
196 | __repr__ = encoding.strmethod(__bytes__) |
|
195 | __repr__ = encoding.strmethod(__bytes__) | |
197 |
|
196 | |||
198 |
|
197 | |||
199 | # The order of the headers in 'hg export' HG patches: |
|
198 | # The order of the headers in 'hg export' HG patches: | |
200 | HGHEADERS = [ |
|
199 | HGHEADERS = [ | |
201 | # '# HG changeset patch', |
|
200 | # '# HG changeset patch', | |
202 | b'# User ', |
|
201 | b'# User ', | |
203 | b'# Date ', |
|
202 | b'# Date ', | |
204 | b'# ', |
|
203 | b'# ', | |
205 | b'# Branch ', |
|
204 | b'# Branch ', | |
206 | b'# Node ID ', |
|
205 | b'# Node ID ', | |
207 | b'# Parent ', # can occur twice for merges - but that is not relevant for mq |
|
206 | b'# Parent ', # can occur twice for merges - but that is not relevant for mq | |
208 | ] |
|
207 | ] | |
209 | # The order of headers in plain 'mail style' patches: |
|
208 | # The order of headers in plain 'mail style' patches: | |
210 | PLAINHEADERS = { |
|
209 | PLAINHEADERS = { | |
211 | b'from': 0, |
|
210 | b'from': 0, | |
212 | b'date': 1, |
|
211 | b'date': 1, | |
213 | b'subject': 2, |
|
212 | b'subject': 2, | |
214 | } |
|
213 | } | |
215 |
|
214 | |||
216 |
|
215 | |||
217 | def inserthgheader(lines, header, value): |
|
216 | def inserthgheader(lines, header, value): | |
218 | """Assuming lines contains a HG patch header, add a header line with value. |
|
217 | """Assuming lines contains a HG patch header, add a header line with value. | |
219 | >>> try: inserthgheader([], b'# Date ', b'z') |
|
218 | >>> try: inserthgheader([], b'# Date ', b'z') | |
220 | ... except ValueError as inst: print("oops") |
|
219 | ... except ValueError as inst: print("oops") | |
221 | oops |
|
220 | oops | |
222 | >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z') |
|
221 | >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z') | |
223 | ['# HG changeset patch', '# Date z'] |
|
222 | ['# HG changeset patch', '# Date z'] | |
224 | >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z') |
|
223 | >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z') | |
225 | ['# HG changeset patch', '# Date z', ''] |
|
224 | ['# HG changeset patch', '# Date z', ''] | |
226 | >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z') |
|
225 | >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z') | |
227 | ['# HG changeset patch', '# User y', '# Date z'] |
|
226 | ['# HG changeset patch', '# User y', '# Date z'] | |
228 | >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'], |
|
227 | >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'], | |
229 | ... b'# User ', b'z') |
|
228 | ... b'# User ', b'z') | |
230 | ['# HG changeset patch', '# Date x', '# User z'] |
|
229 | ['# HG changeset patch', '# Date x', '# User z'] | |
231 | >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z') |
|
230 | >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z') | |
232 | ['# HG changeset patch', '# Date z'] |
|
231 | ['# HG changeset patch', '# Date z'] | |
233 | >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'], |
|
232 | >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'], | |
234 | ... b'# Date ', b'z') |
|
233 | ... b'# Date ', b'z') | |
235 | ['# HG changeset patch', '# Date z', '', '# Date y'] |
|
234 | ['# HG changeset patch', '# Date z', '', '# Date y'] | |
236 | >>> inserthgheader([b'# HG changeset patch', b'# Parent y'], |
|
235 | >>> inserthgheader([b'# HG changeset patch', b'# Parent y'], | |
237 | ... b'# Date ', b'z') |
|
236 | ... b'# Date ', b'z') | |
238 | ['# HG changeset patch', '# Date z', '# Parent y'] |
|
237 | ['# HG changeset patch', '# Date z', '# Parent y'] | |
239 | """ |
|
238 | """ | |
240 | start = lines.index(b'# HG changeset patch') + 1 |
|
239 | start = lines.index(b'# HG changeset patch') + 1 | |
241 | newindex = HGHEADERS.index(header) |
|
240 | newindex = HGHEADERS.index(header) | |
242 | bestpos = len(lines) |
|
241 | bestpos = len(lines) | |
243 | for i in range(start, len(lines)): |
|
242 | for i in range(start, len(lines)): | |
244 | line = lines[i] |
|
243 | line = lines[i] | |
245 | if not line.startswith(b'# '): |
|
244 | if not line.startswith(b'# '): | |
246 | bestpos = min(bestpos, i) |
|
245 | bestpos = min(bestpos, i) | |
247 | break |
|
246 | break | |
248 | for lineindex, h in enumerate(HGHEADERS): |
|
247 | for lineindex, h in enumerate(HGHEADERS): | |
249 | if line.startswith(h): |
|
248 | if line.startswith(h): | |
250 | if lineindex == newindex: |
|
249 | if lineindex == newindex: | |
251 | lines[i] = header + value |
|
250 | lines[i] = header + value | |
252 | return lines |
|
251 | return lines | |
253 | if lineindex > newindex: |
|
252 | if lineindex > newindex: | |
254 | bestpos = min(bestpos, i) |
|
253 | bestpos = min(bestpos, i) | |
255 | break # next line |
|
254 | break # next line | |
256 | lines.insert(bestpos, header + value) |
|
255 | lines.insert(bestpos, header + value) | |
257 | return lines |
|
256 | return lines | |
258 |
|
257 | |||
259 |
|
258 | |||
260 | def insertplainheader(lines, header, value): |
|
259 | def insertplainheader(lines, header, value): | |
261 | """For lines containing a plain patch header, add a header line with value. |
|
260 | """For lines containing a plain patch header, add a header line with value. | |
262 | >>> insertplainheader([], b'Date', b'z') |
|
261 | >>> insertplainheader([], b'Date', b'z') | |
263 | ['Date: z'] |
|
262 | ['Date: z'] | |
264 | >>> insertplainheader([b''], b'Date', b'z') |
|
263 | >>> insertplainheader([b''], b'Date', b'z') | |
265 | ['Date: z', ''] |
|
264 | ['Date: z', ''] | |
266 | >>> insertplainheader([b'x'], b'Date', b'z') |
|
265 | >>> insertplainheader([b'x'], b'Date', b'z') | |
267 | ['Date: z', '', 'x'] |
|
266 | ['Date: z', '', 'x'] | |
268 | >>> insertplainheader([b'From: y', b'x'], b'Date', b'z') |
|
267 | >>> insertplainheader([b'From: y', b'x'], b'Date', b'z') | |
269 | ['From: y', 'Date: z', '', 'x'] |
|
268 | ['From: y', 'Date: z', '', 'x'] | |
270 | >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z') |
|
269 | >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z') | |
271 | [' date : x', 'From: z', ''] |
|
270 | [' date : x', 'From: z', ''] | |
272 | >>> insertplainheader([b'', b'Date: y'], b'Date', b'z') |
|
271 | >>> insertplainheader([b'', b'Date: y'], b'Date', b'z') | |
273 | ['Date: z', '', 'Date: y'] |
|
272 | ['Date: z', '', 'Date: y'] | |
274 | >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y') |
|
273 | >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y') | |
275 | ['From: y', 'foo: bar', 'DATE: z', '', 'x'] |
|
274 | ['From: y', 'foo: bar', 'DATE: z', '', 'x'] | |
276 | """ |
|
275 | """ | |
277 | newprio = PLAINHEADERS[header.lower()] |
|
276 | newprio = PLAINHEADERS[header.lower()] | |
278 | bestpos = len(lines) |
|
277 | bestpos = len(lines) | |
279 | for i, line in enumerate(lines): |
|
278 | for i, line in enumerate(lines): | |
280 | if b':' in line: |
|
279 | if b':' in line: | |
281 | lheader = line.split(b':', 1)[0].strip().lower() |
|
280 | lheader = line.split(b':', 1)[0].strip().lower() | |
282 | lprio = PLAINHEADERS.get(lheader, newprio + 1) |
|
281 | lprio = PLAINHEADERS.get(lheader, newprio + 1) | |
283 | if lprio == newprio: |
|
282 | if lprio == newprio: | |
284 | lines[i] = b'%s: %s' % (header, value) |
|
283 | lines[i] = b'%s: %s' % (header, value) | |
285 | return lines |
|
284 | return lines | |
286 | if lprio > newprio and i < bestpos: |
|
285 | if lprio > newprio and i < bestpos: | |
287 | bestpos = i |
|
286 | bestpos = i | |
288 | else: |
|
287 | else: | |
289 | if line: |
|
288 | if line: | |
290 | lines.insert(i, b'') |
|
289 | lines.insert(i, b'') | |
291 | if i < bestpos: |
|
290 | if i < bestpos: | |
292 | bestpos = i |
|
291 | bestpos = i | |
293 | break |
|
292 | break | |
294 | lines.insert(bestpos, b'%s: %s' % (header, value)) |
|
293 | lines.insert(bestpos, b'%s: %s' % (header, value)) | |
295 | return lines |
|
294 | return lines | |
296 |
|
295 | |||
297 |
|
296 | |||
298 | class patchheader(object): |
|
297 | class patchheader(object): | |
299 | def __init__(self, pf, plainmode=False): |
|
298 | def __init__(self, pf, plainmode=False): | |
300 | def eatdiff(lines): |
|
299 | def eatdiff(lines): | |
301 | while lines: |
|
300 | while lines: | |
302 | l = lines[-1] |
|
301 | l = lines[-1] | |
303 | if ( |
|
302 | if ( | |
304 | l.startswith(b"diff -") |
|
303 | l.startswith(b"diff -") | |
305 | or l.startswith(b"Index:") |
|
304 | or l.startswith(b"Index:") | |
306 | or l.startswith(b"===========") |
|
305 | or l.startswith(b"===========") | |
307 | ): |
|
306 | ): | |
308 | del lines[-1] |
|
307 | del lines[-1] | |
309 | else: |
|
308 | else: | |
310 | break |
|
309 | break | |
311 |
|
310 | |||
312 | def eatempty(lines): |
|
311 | def eatempty(lines): | |
313 | while lines: |
|
312 | while lines: | |
314 | if not lines[-1].strip(): |
|
313 | if not lines[-1].strip(): | |
315 | del lines[-1] |
|
314 | del lines[-1] | |
316 | else: |
|
315 | else: | |
317 | break |
|
316 | break | |
318 |
|
317 | |||
319 | message = [] |
|
318 | message = [] | |
320 | comments = [] |
|
319 | comments = [] | |
321 | user = None |
|
320 | user = None | |
322 | date = None |
|
321 | date = None | |
323 | parent = None |
|
322 | parent = None | |
324 | format = None |
|
323 | format = None | |
325 | subject = None |
|
324 | subject = None | |
326 | branch = None |
|
325 | branch = None | |
327 | nodeid = None |
|
326 | nodeid = None | |
328 | diffstart = 0 |
|
327 | diffstart = 0 | |
329 |
|
328 | |||
330 | for line in open(pf, b'rb'): |
|
329 | for line in open(pf, b'rb'): | |
331 | line = line.rstrip() |
|
330 | line = line.rstrip() | |
332 | if line.startswith(b'diff --git') or ( |
|
331 | if line.startswith(b'diff --git') or ( | |
333 | diffstart and line.startswith(b'+++ ') |
|
332 | diffstart and line.startswith(b'+++ ') | |
334 | ): |
|
333 | ): | |
335 | diffstart = 2 |
|
334 | diffstart = 2 | |
336 | break |
|
335 | break | |
337 | diffstart = 0 # reset |
|
336 | diffstart = 0 # reset | |
338 | if line.startswith(b"--- "): |
|
337 | if line.startswith(b"--- "): | |
339 | diffstart = 1 |
|
338 | diffstart = 1 | |
340 | continue |
|
339 | continue | |
341 | elif format == b"hgpatch": |
|
340 | elif format == b"hgpatch": | |
342 | # parse values when importing the result of an hg export |
|
341 | # parse values when importing the result of an hg export | |
343 | if line.startswith(b"# User "): |
|
342 | if line.startswith(b"# User "): | |
344 | user = line[7:] |
|
343 | user = line[7:] | |
345 | elif line.startswith(b"# Date "): |
|
344 | elif line.startswith(b"# Date "): | |
346 | date = line[7:] |
|
345 | date = line[7:] | |
347 | elif line.startswith(b"# Parent "): |
|
346 | elif line.startswith(b"# Parent "): | |
348 | parent = line[9:].lstrip() # handle double trailing space |
|
347 | parent = line[9:].lstrip() # handle double trailing space | |
349 | elif line.startswith(b"# Branch "): |
|
348 | elif line.startswith(b"# Branch "): | |
350 | branch = line[9:] |
|
349 | branch = line[9:] | |
351 | elif line.startswith(b"# Node ID "): |
|
350 | elif line.startswith(b"# Node ID "): | |
352 | nodeid = line[10:] |
|
351 | nodeid = line[10:] | |
353 | elif not line.startswith(b"# ") and line: |
|
352 | elif not line.startswith(b"# ") and line: | |
354 | message.append(line) |
|
353 | message.append(line) | |
355 | format = None |
|
354 | format = None | |
356 | elif line == b'# HG changeset patch': |
|
355 | elif line == b'# HG changeset patch': | |
357 | message = [] |
|
356 | message = [] | |
358 | format = b"hgpatch" |
|
357 | format = b"hgpatch" | |
359 | elif format != b"tagdone" and ( |
|
358 | elif format != b"tagdone" and ( | |
360 | line.startswith(b"Subject: ") or line.startswith(b"subject: ") |
|
359 | line.startswith(b"Subject: ") or line.startswith(b"subject: ") | |
361 | ): |
|
360 | ): | |
362 | subject = line[9:] |
|
361 | subject = line[9:] | |
363 | format = b"tag" |
|
362 | format = b"tag" | |
364 | elif format != b"tagdone" and ( |
|
363 | elif format != b"tagdone" and ( | |
365 | line.startswith(b"From: ") or line.startswith(b"from: ") |
|
364 | line.startswith(b"From: ") or line.startswith(b"from: ") | |
366 | ): |
|
365 | ): | |
367 | user = line[6:] |
|
366 | user = line[6:] | |
368 | format = b"tag" |
|
367 | format = b"tag" | |
369 | elif format != b"tagdone" and ( |
|
368 | elif format != b"tagdone" and ( | |
370 | line.startswith(b"Date: ") or line.startswith(b"date: ") |
|
369 | line.startswith(b"Date: ") or line.startswith(b"date: ") | |
371 | ): |
|
370 | ): | |
372 | date = line[6:] |
|
371 | date = line[6:] | |
373 | format = b"tag" |
|
372 | format = b"tag" | |
374 | elif format == b"tag" and line == b"": |
|
373 | elif format == b"tag" and line == b"": | |
375 | # when looking for tags (subject: from: etc) they |
|
374 | # when looking for tags (subject: from: etc) they | |
376 | # end once you find a blank line in the source |
|
375 | # end once you find a blank line in the source | |
377 | format = b"tagdone" |
|
376 | format = b"tagdone" | |
378 | elif message or line: |
|
377 | elif message or line: | |
379 | message.append(line) |
|
378 | message.append(line) | |
380 | comments.append(line) |
|
379 | comments.append(line) | |
381 |
|
380 | |||
382 | eatdiff(message) |
|
381 | eatdiff(message) | |
383 | eatdiff(comments) |
|
382 | eatdiff(comments) | |
384 | # Remember the exact starting line of the patch diffs before consuming |
|
383 | # Remember the exact starting line of the patch diffs before consuming | |
385 | # empty lines, for external use by TortoiseHg and others |
|
384 | # empty lines, for external use by TortoiseHg and others | |
386 | self.diffstartline = len(comments) |
|
385 | self.diffstartline = len(comments) | |
387 | eatempty(message) |
|
386 | eatempty(message) | |
388 | eatempty(comments) |
|
387 | eatempty(comments) | |
389 |
|
388 | |||
390 | # make sure message isn't empty |
|
389 | # make sure message isn't empty | |
391 | if format and format.startswith(b"tag") and subject: |
|
390 | if format and format.startswith(b"tag") and subject: | |
392 | message.insert(0, subject) |
|
391 | message.insert(0, subject) | |
393 |
|
392 | |||
394 | self.message = message |
|
393 | self.message = message | |
395 | self.comments = comments |
|
394 | self.comments = comments | |
396 | self.user = user |
|
395 | self.user = user | |
397 | self.date = date |
|
396 | self.date = date | |
398 | self.parent = parent |
|
397 | self.parent = parent | |
399 | # nodeid and branch are for external use by TortoiseHg and others |
|
398 | # nodeid and branch are for external use by TortoiseHg and others | |
400 | self.nodeid = nodeid |
|
399 | self.nodeid = nodeid | |
401 | self.branch = branch |
|
400 | self.branch = branch | |
402 | self.haspatch = diffstart > 1 |
|
401 | self.haspatch = diffstart > 1 | |
403 | self.plainmode = ( |
|
402 | self.plainmode = ( | |
404 | plainmode |
|
403 | plainmode | |
405 | or b'# HG changeset patch' not in self.comments |
|
404 | or b'# HG changeset patch' not in self.comments | |
406 | and any( |
|
405 | and any( | |
407 | c.startswith(b'Date: ') or c.startswith(b'From: ') |
|
406 | c.startswith(b'Date: ') or c.startswith(b'From: ') | |
408 | for c in self.comments |
|
407 | for c in self.comments | |
409 | ) |
|
408 | ) | |
410 | ) |
|
409 | ) | |
411 |
|
410 | |||
412 | def setuser(self, user): |
|
411 | def setuser(self, user): | |
413 | try: |
|
412 | try: | |
414 | inserthgheader(self.comments, b'# User ', user) |
|
413 | inserthgheader(self.comments, b'# User ', user) | |
415 | except ValueError: |
|
414 | except ValueError: | |
416 | if self.plainmode: |
|
415 | if self.plainmode: | |
417 | insertplainheader(self.comments, b'From', user) |
|
416 | insertplainheader(self.comments, b'From', user) | |
418 | else: |
|
417 | else: | |
419 | tmp = [b'# HG changeset patch', b'# User ' + user] |
|
418 | tmp = [b'# HG changeset patch', b'# User ' + user] | |
420 | self.comments = tmp + self.comments |
|
419 | self.comments = tmp + self.comments | |
421 | self.user = user |
|
420 | self.user = user | |
422 |
|
421 | |||
423 | def setdate(self, date): |
|
422 | def setdate(self, date): | |
424 | try: |
|
423 | try: | |
425 | inserthgheader(self.comments, b'# Date ', date) |
|
424 | inserthgheader(self.comments, b'# Date ', date) | |
426 | except ValueError: |
|
425 | except ValueError: | |
427 | if self.plainmode: |
|
426 | if self.plainmode: | |
428 | insertplainheader(self.comments, b'Date', date) |
|
427 | insertplainheader(self.comments, b'Date', date) | |
429 | else: |
|
428 | else: | |
430 | tmp = [b'# HG changeset patch', b'# Date ' + date] |
|
429 | tmp = [b'# HG changeset patch', b'# Date ' + date] | |
431 | self.comments = tmp + self.comments |
|
430 | self.comments = tmp + self.comments | |
432 | self.date = date |
|
431 | self.date = date | |
433 |
|
432 | |||
434 | def setparent(self, parent): |
|
433 | def setparent(self, parent): | |
435 | try: |
|
434 | try: | |
436 | inserthgheader(self.comments, b'# Parent ', parent) |
|
435 | inserthgheader(self.comments, b'# Parent ', parent) | |
437 | except ValueError: |
|
436 | except ValueError: | |
438 | if not self.plainmode: |
|
437 | if not self.plainmode: | |
439 | tmp = [b'# HG changeset patch', b'# Parent ' + parent] |
|
438 | tmp = [b'# HG changeset patch', b'# Parent ' + parent] | |
440 | self.comments = tmp + self.comments |
|
439 | self.comments = tmp + self.comments | |
441 | self.parent = parent |
|
440 | self.parent = parent | |
442 |
|
441 | |||
443 | def setmessage(self, message): |
|
442 | def setmessage(self, message): | |
444 | if self.comments: |
|
443 | if self.comments: | |
445 | self._delmsg() |
|
444 | self._delmsg() | |
446 | self.message = [message] |
|
445 | self.message = [message] | |
447 | if message: |
|
446 | if message: | |
448 | if self.plainmode and self.comments and self.comments[-1]: |
|
447 | if self.plainmode and self.comments and self.comments[-1]: | |
449 | self.comments.append(b'') |
|
448 | self.comments.append(b'') | |
450 | self.comments.append(message) |
|
449 | self.comments.append(message) | |
451 |
|
450 | |||
452 | def __bytes__(self): |
|
451 | def __bytes__(self): | |
453 | s = b'\n'.join(self.comments).rstrip() |
|
452 | s = b'\n'.join(self.comments).rstrip() | |
454 | if not s: |
|
453 | if not s: | |
455 | return b'' |
|
454 | return b'' | |
456 | return s + b'\n\n' |
|
455 | return s + b'\n\n' | |
457 |
|
456 | |||
458 | __str__ = encoding.strmethod(__bytes__) |
|
457 | __str__ = encoding.strmethod(__bytes__) | |
459 |
|
458 | |||
460 | def _delmsg(self): |
|
459 | def _delmsg(self): | |
461 | """Remove existing message, keeping the rest of the comments fields. |
|
460 | """Remove existing message, keeping the rest of the comments fields. | |
462 | If comments contains 'subject: ', message will prepend |
|
461 | If comments contains 'subject: ', message will prepend | |
463 | the field and a blank line.""" |
|
462 | the field and a blank line.""" | |
464 | if self.message: |
|
463 | if self.message: | |
465 | subj = b'subject: ' + self.message[0].lower() |
|
464 | subj = b'subject: ' + self.message[0].lower() | |
466 | for i in pycompat.xrange(len(self.comments)): |
|
465 | for i in pycompat.xrange(len(self.comments)): | |
467 | if subj == self.comments[i].lower(): |
|
466 | if subj == self.comments[i].lower(): | |
468 | del self.comments[i] |
|
467 | del self.comments[i] | |
469 | self.message = self.message[2:] |
|
468 | self.message = self.message[2:] | |
470 | break |
|
469 | break | |
471 | ci = 0 |
|
470 | ci = 0 | |
472 | for mi in self.message: |
|
471 | for mi in self.message: | |
473 | while mi != self.comments[ci]: |
|
472 | while mi != self.comments[ci]: | |
474 | ci += 1 |
|
473 | ci += 1 | |
475 | del self.comments[ci] |
|
474 | del self.comments[ci] | |
476 |
|
475 | |||
477 |
|
476 | |||
478 | def newcommit(repo, phase, *args, **kwargs): |
|
477 | def newcommit(repo, phase, *args, **kwargs): | |
479 | """helper dedicated to ensure a commit respect mq.secret setting |
|
478 | """helper dedicated to ensure a commit respect mq.secret setting | |
480 |
|
479 | |||
481 | It should be used instead of repo.commit inside the mq source for operation |
|
480 | It should be used instead of repo.commit inside the mq source for operation | |
482 | creating new changeset. |
|
481 | creating new changeset. | |
483 | """ |
|
482 | """ | |
484 | repo = repo.unfiltered() |
|
483 | repo = repo.unfiltered() | |
485 | if phase is None: |
|
484 | if phase is None: | |
486 | if repo.ui.configbool(b'mq', b'secret'): |
|
485 | if repo.ui.configbool(b'mq', b'secret'): | |
487 | phase = phases.secret |
|
486 | phase = phases.secret | |
488 | overrides = {(b'ui', b'allowemptycommit'): True} |
|
487 | overrides = {(b'ui', b'allowemptycommit'): True} | |
489 | if phase is not None: |
|
488 | if phase is not None: | |
490 | overrides[(b'phases', b'new-commit')] = phase |
|
489 | overrides[(b'phases', b'new-commit')] = phase | |
491 | with repo.ui.configoverride(overrides, b'mq'): |
|
490 | with repo.ui.configoverride(overrides, b'mq'): | |
492 | repo.ui.setconfig(b'ui', b'allowemptycommit', True) |
|
491 | repo.ui.setconfig(b'ui', b'allowemptycommit', True) | |
493 | return repo.commit(*args, **kwargs) |
|
492 | return repo.commit(*args, **kwargs) | |
494 |
|
493 | |||
495 |
|
494 | |||
496 | class AbortNoCleanup(error.Abort): |
|
495 | class AbortNoCleanup(error.Abort): | |
497 | pass |
|
496 | pass | |
498 |
|
497 | |||
499 |
|
498 | |||
500 | class queue(object): |
|
499 | class queue(object): | |
501 | def __init__(self, ui, baseui, path, patchdir=None): |
|
500 | def __init__(self, ui, baseui, path, patchdir=None): | |
502 | self.basepath = path |
|
501 | self.basepath = path | |
503 | try: |
|
502 | try: | |
504 | with open(os.path.join(path, b'patches.queue'), 'rb') as fh: |
|
503 | with open(os.path.join(path, b'patches.queue'), 'rb') as fh: | |
505 | cur = fh.read().rstrip() |
|
504 | cur = fh.read().rstrip() | |
506 |
|
505 | |||
507 | if not cur: |
|
506 | if not cur: | |
508 | curpath = os.path.join(path, b'patches') |
|
507 | curpath = os.path.join(path, b'patches') | |
509 | else: |
|
508 | else: | |
510 | curpath = os.path.join(path, b'patches-' + cur) |
|
509 | curpath = os.path.join(path, b'patches-' + cur) | |
511 | except IOError: |
|
510 | except IOError: | |
512 | curpath = os.path.join(path, b'patches') |
|
511 | curpath = os.path.join(path, b'patches') | |
513 | self.path = patchdir or curpath |
|
512 | self.path = patchdir or curpath | |
514 | self.opener = vfsmod.vfs(self.path) |
|
513 | self.opener = vfsmod.vfs(self.path) | |
515 | self.ui = ui |
|
514 | self.ui = ui | |
516 | self.baseui = baseui |
|
515 | self.baseui = baseui | |
517 | self.applieddirty = False |
|
516 | self.applieddirty = False | |
518 | self.seriesdirty = False |
|
517 | self.seriesdirty = False | |
519 | self.added = [] |
|
518 | self.added = [] | |
520 | self.seriespath = b"series" |
|
519 | self.seriespath = b"series" | |
521 | self.statuspath = b"status" |
|
520 | self.statuspath = b"status" | |
522 | self.guardspath = b"guards" |
|
521 | self.guardspath = b"guards" | |
523 | self.activeguards = None |
|
522 | self.activeguards = None | |
524 | self.guardsdirty = False |
|
523 | self.guardsdirty = False | |
525 | # Handle mq.git as a bool with extended values |
|
524 | # Handle mq.git as a bool with extended values | |
526 | gitmode = ui.config(b'mq', b'git').lower() |
|
525 | gitmode = ui.config(b'mq', b'git').lower() | |
527 | boolmode = stringutil.parsebool(gitmode) |
|
526 | boolmode = stringutil.parsebool(gitmode) | |
528 | if boolmode is not None: |
|
527 | if boolmode is not None: | |
529 | if boolmode: |
|
528 | if boolmode: | |
530 | gitmode = b'yes' |
|
529 | gitmode = b'yes' | |
531 | else: |
|
530 | else: | |
532 | gitmode = b'no' |
|
531 | gitmode = b'no' | |
533 | self.gitmode = gitmode |
|
532 | self.gitmode = gitmode | |
534 | # deprecated config: mq.plain |
|
533 | # deprecated config: mq.plain | |
535 | self.plainmode = ui.configbool(b'mq', b'plain') |
|
534 | self.plainmode = ui.configbool(b'mq', b'plain') | |
536 | self.checkapplied = True |
|
535 | self.checkapplied = True | |
537 |
|
536 | |||
538 | @util.propertycache |
|
537 | @util.propertycache | |
539 | def applied(self): |
|
538 | def applied(self): | |
540 | def parselines(lines): |
|
539 | def parselines(lines): | |
541 | for l in lines: |
|
540 | for l in lines: | |
542 | entry = l.split(b':', 1) |
|
541 | entry = l.split(b':', 1) | |
543 | if len(entry) > 1: |
|
542 | if len(entry) > 1: | |
544 | n, name = entry |
|
543 | n, name = entry | |
545 | yield statusentry(bin(n), name) |
|
544 | yield statusentry(bin(n), name) | |
546 | elif l.strip(): |
|
545 | elif l.strip(): | |
547 | self.ui.warn( |
|
546 | self.ui.warn( | |
548 | _(b'malformated mq status line: %s\n') |
|
547 | _(b'malformated mq status line: %s\n') | |
549 | % stringutil.pprint(entry) |
|
548 | % stringutil.pprint(entry) | |
550 | ) |
|
549 | ) | |
551 | # else we ignore empty lines |
|
550 | # else we ignore empty lines | |
552 |
|
551 | |||
553 | try: |
|
552 | try: | |
554 | lines = self.opener.read(self.statuspath).splitlines() |
|
553 | lines = self.opener.read(self.statuspath).splitlines() | |
555 | return list(parselines(lines)) |
|
554 | return list(parselines(lines)) | |
556 | except IOError as e: |
|
555 | except IOError as e: | |
557 | if e.errno == errno.ENOENT: |
|
556 | if e.errno == errno.ENOENT: | |
558 | return [] |
|
557 | return [] | |
559 | raise |
|
558 | raise | |
560 |
|
559 | |||
561 | @util.propertycache |
|
560 | @util.propertycache | |
562 | def fullseries(self): |
|
561 | def fullseries(self): | |
563 | try: |
|
562 | try: | |
564 | return self.opener.read(self.seriespath).splitlines() |
|
563 | return self.opener.read(self.seriespath).splitlines() | |
565 | except IOError as e: |
|
564 | except IOError as e: | |
566 | if e.errno == errno.ENOENT: |
|
565 | if e.errno == errno.ENOENT: | |
567 | return [] |
|
566 | return [] | |
568 | raise |
|
567 | raise | |
569 |
|
568 | |||
570 | @util.propertycache |
|
569 | @util.propertycache | |
571 | def series(self): |
|
570 | def series(self): | |
572 | self.parseseries() |
|
571 | self.parseseries() | |
573 | return self.series |
|
572 | return self.series | |
574 |
|
573 | |||
575 | @util.propertycache |
|
574 | @util.propertycache | |
576 | def seriesguards(self): |
|
575 | def seriesguards(self): | |
577 | self.parseseries() |
|
576 | self.parseseries() | |
578 | return self.seriesguards |
|
577 | return self.seriesguards | |
579 |
|
578 | |||
580 | def invalidate(self): |
|
579 | def invalidate(self): | |
581 | for a in 'applied fullseries series seriesguards'.split(): |
|
580 | for a in 'applied fullseries series seriesguards'.split(): | |
582 | if a in self.__dict__: |
|
581 | if a in self.__dict__: | |
583 | delattr(self, a) |
|
582 | delattr(self, a) | |
584 | self.applieddirty = False |
|
583 | self.applieddirty = False | |
585 | self.seriesdirty = False |
|
584 | self.seriesdirty = False | |
586 | self.guardsdirty = False |
|
585 | self.guardsdirty = False | |
587 | self.activeguards = None |
|
586 | self.activeguards = None | |
588 |
|
587 | |||
589 | def diffopts(self, opts=None, patchfn=None, plain=False): |
|
588 | def diffopts(self, opts=None, patchfn=None, plain=False): | |
590 | """Return diff options tweaked for this mq use, possibly upgrading to |
|
589 | """Return diff options tweaked for this mq use, possibly upgrading to | |
591 | git format, and possibly plain and without lossy options.""" |
|
590 | git format, and possibly plain and without lossy options.""" | |
592 | diffopts = patchmod.difffeatureopts( |
|
591 | diffopts = patchmod.difffeatureopts( | |
593 | self.ui, |
|
592 | self.ui, | |
594 | opts, |
|
593 | opts, | |
595 | git=True, |
|
594 | git=True, | |
596 | whitespace=not plain, |
|
595 | whitespace=not plain, | |
597 | formatchanging=not plain, |
|
596 | formatchanging=not plain, | |
598 | ) |
|
597 | ) | |
599 | if self.gitmode == b'auto': |
|
598 | if self.gitmode == b'auto': | |
600 | diffopts.upgrade = True |
|
599 | diffopts.upgrade = True | |
601 | elif self.gitmode == b'keep': |
|
600 | elif self.gitmode == b'keep': | |
602 | pass |
|
601 | pass | |
603 | elif self.gitmode in (b'yes', b'no'): |
|
602 | elif self.gitmode in (b'yes', b'no'): | |
604 | diffopts.git = self.gitmode == b'yes' |
|
603 | diffopts.git = self.gitmode == b'yes' | |
605 | else: |
|
604 | else: | |
606 | raise error.Abort( |
|
605 | raise error.Abort( | |
607 | _(b'mq.git option can be auto/keep/yes/no got %s') |
|
606 | _(b'mq.git option can be auto/keep/yes/no got %s') | |
608 | % self.gitmode |
|
607 | % self.gitmode | |
609 | ) |
|
608 | ) | |
610 | if patchfn: |
|
609 | if patchfn: | |
611 | diffopts = self.patchopts(diffopts, patchfn) |
|
610 | diffopts = self.patchopts(diffopts, patchfn) | |
612 | return diffopts |
|
611 | return diffopts | |
613 |
|
612 | |||
614 | def patchopts(self, diffopts, *patches): |
|
613 | def patchopts(self, diffopts, *patches): | |
615 | """Return a copy of input diff options with git set to true if |
|
614 | """Return a copy of input diff options with git set to true if | |
616 | referenced patch is a git patch and should be preserved as such. |
|
615 | referenced patch is a git patch and should be preserved as such. | |
617 | """ |
|
616 | """ | |
618 | diffopts = diffopts.copy() |
|
617 | diffopts = diffopts.copy() | |
619 | if not diffopts.git and self.gitmode == b'keep': |
|
618 | if not diffopts.git and self.gitmode == b'keep': | |
620 | for patchfn in patches: |
|
619 | for patchfn in patches: | |
621 | patchf = self.opener(patchfn, b'r') |
|
620 | patchf = self.opener(patchfn, b'r') | |
622 | # if the patch was a git patch, refresh it as a git patch |
|
621 | # if the patch was a git patch, refresh it as a git patch | |
623 | diffopts.git = any( |
|
622 | diffopts.git = any( | |
624 | line.startswith(b'diff --git') for line in patchf |
|
623 | line.startswith(b'diff --git') for line in patchf | |
625 | ) |
|
624 | ) | |
626 | patchf.close() |
|
625 | patchf.close() | |
627 | return diffopts |
|
626 | return diffopts | |
628 |
|
627 | |||
629 | def join(self, *p): |
|
628 | def join(self, *p): | |
630 | return os.path.join(self.path, *p) |
|
629 | return os.path.join(self.path, *p) | |
631 |
|
630 | |||
632 | def findseries(self, patch): |
|
631 | def findseries(self, patch): | |
633 | def matchpatch(l): |
|
632 | def matchpatch(l): | |
634 | l = l.split(b'#', 1)[0] |
|
633 | l = l.split(b'#', 1)[0] | |
635 | return l.strip() == patch |
|
634 | return l.strip() == patch | |
636 |
|
635 | |||
637 | for index, l in enumerate(self.fullseries): |
|
636 | for index, l in enumerate(self.fullseries): | |
638 | if matchpatch(l): |
|
637 | if matchpatch(l): | |
639 | return index |
|
638 | return index | |
640 | return None |
|
639 | return None | |
641 |
|
640 | |||
642 | guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)') |
|
641 | guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)') | |
643 |
|
642 | |||
644 | def parseseries(self): |
|
643 | def parseseries(self): | |
645 | self.series = [] |
|
644 | self.series = [] | |
646 | self.seriesguards = [] |
|
645 | self.seriesguards = [] | |
647 | for l in self.fullseries: |
|
646 | for l in self.fullseries: | |
648 | h = l.find(b'#') |
|
647 | h = l.find(b'#') | |
649 | if h == -1: |
|
648 | if h == -1: | |
650 | patch = l |
|
649 | patch = l | |
651 | comment = b'' |
|
650 | comment = b'' | |
652 | elif h == 0: |
|
651 | elif h == 0: | |
653 | continue |
|
652 | continue | |
654 | else: |
|
653 | else: | |
655 | patch = l[:h] |
|
654 | patch = l[:h] | |
656 | comment = l[h:] |
|
655 | comment = l[h:] | |
657 | patch = patch.strip() |
|
656 | patch = patch.strip() | |
658 | if patch: |
|
657 | if patch: | |
659 | if patch in self.series: |
|
658 | if patch in self.series: | |
660 | raise error.Abort( |
|
659 | raise error.Abort( | |
661 | _(b'%s appears more than once in %s') |
|
660 | _(b'%s appears more than once in %s') | |
662 | % (patch, self.join(self.seriespath)) |
|
661 | % (patch, self.join(self.seriespath)) | |
663 | ) |
|
662 | ) | |
664 | self.series.append(patch) |
|
663 | self.series.append(patch) | |
665 | self.seriesguards.append(self.guard_re.findall(comment)) |
|
664 | self.seriesguards.append(self.guard_re.findall(comment)) | |
666 |
|
665 | |||
667 | def checkguard(self, guard): |
|
666 | def checkguard(self, guard): | |
668 | if not guard: |
|
667 | if not guard: | |
669 | return _(b'guard cannot be an empty string') |
|
668 | return _(b'guard cannot be an empty string') | |
670 | bad_chars = b'# \t\r\n\f' |
|
669 | bad_chars = b'# \t\r\n\f' | |
671 | first = guard[0] |
|
670 | first = guard[0] | |
672 | if first in b'-+': |
|
671 | if first in b'-+': | |
673 | return _(b'guard %r starts with invalid character: %r') % ( |
|
672 | return _(b'guard %r starts with invalid character: %r') % ( | |
674 | guard, |
|
673 | guard, | |
675 | first, |
|
674 | first, | |
676 | ) |
|
675 | ) | |
677 | for c in bad_chars: |
|
676 | for c in bad_chars: | |
678 | if c in guard: |
|
677 | if c in guard: | |
679 | return _(b'invalid character in guard %r: %r') % (guard, c) |
|
678 | return _(b'invalid character in guard %r: %r') % (guard, c) | |
680 |
|
679 | |||
681 | def setactive(self, guards): |
|
680 | def setactive(self, guards): | |
682 | for guard in guards: |
|
681 | for guard in guards: | |
683 | bad = self.checkguard(guard) |
|
682 | bad = self.checkguard(guard) | |
684 | if bad: |
|
683 | if bad: | |
685 | raise error.Abort(bad) |
|
684 | raise error.Abort(bad) | |
686 | guards = sorted(set(guards)) |
|
685 | guards = sorted(set(guards)) | |
687 | self.ui.debug(b'active guards: %s\n' % b' '.join(guards)) |
|
686 | self.ui.debug(b'active guards: %s\n' % b' '.join(guards)) | |
688 | self.activeguards = guards |
|
687 | self.activeguards = guards | |
689 | self.guardsdirty = True |
|
688 | self.guardsdirty = True | |
690 |
|
689 | |||
691 | def active(self): |
|
690 | def active(self): | |
692 | if self.activeguards is None: |
|
691 | if self.activeguards is None: | |
693 | self.activeguards = [] |
|
692 | self.activeguards = [] | |
694 | try: |
|
693 | try: | |
695 | guards = self.opener.read(self.guardspath).split() |
|
694 | guards = self.opener.read(self.guardspath).split() | |
696 | except IOError as err: |
|
695 | except IOError as err: | |
697 | if err.errno != errno.ENOENT: |
|
696 | if err.errno != errno.ENOENT: | |
698 | raise |
|
697 | raise | |
699 | guards = [] |
|
698 | guards = [] | |
700 | for i, guard in enumerate(guards): |
|
699 | for i, guard in enumerate(guards): | |
701 | bad = self.checkguard(guard) |
|
700 | bad = self.checkguard(guard) | |
702 | if bad: |
|
701 | if bad: | |
703 | self.ui.warn( |
|
702 | self.ui.warn( | |
704 | b'%s:%d: %s\n' |
|
703 | b'%s:%d: %s\n' | |
705 | % (self.join(self.guardspath), i + 1, bad) |
|
704 | % (self.join(self.guardspath), i + 1, bad) | |
706 | ) |
|
705 | ) | |
707 | else: |
|
706 | else: | |
708 | self.activeguards.append(guard) |
|
707 | self.activeguards.append(guard) | |
709 | return self.activeguards |
|
708 | return self.activeguards | |
710 |
|
709 | |||
711 | def setguards(self, idx, guards): |
|
710 | def setguards(self, idx, guards): | |
712 | for g in guards: |
|
711 | for g in guards: | |
713 | if len(g) < 2: |
|
712 | if len(g) < 2: | |
714 | raise error.Abort(_(b'guard %r too short') % g) |
|
713 | raise error.Abort(_(b'guard %r too short') % g) | |
715 | if g[0] not in b'-+': |
|
714 | if g[0] not in b'-+': | |
716 | raise error.Abort(_(b'guard %r starts with invalid char') % g) |
|
715 | raise error.Abort(_(b'guard %r starts with invalid char') % g) | |
717 | bad = self.checkguard(g[1:]) |
|
716 | bad = self.checkguard(g[1:]) | |
718 | if bad: |
|
717 | if bad: | |
719 | raise error.Abort(bad) |
|
718 | raise error.Abort(bad) | |
720 | drop = self.guard_re.sub(b'', self.fullseries[idx]) |
|
719 | drop = self.guard_re.sub(b'', self.fullseries[idx]) | |
721 | self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards]) |
|
720 | self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards]) | |
722 | self.parseseries() |
|
721 | self.parseseries() | |
723 | self.seriesdirty = True |
|
722 | self.seriesdirty = True | |
724 |
|
723 | |||
725 | def pushable(self, idx): |
|
724 | def pushable(self, idx): | |
726 | if isinstance(idx, bytes): |
|
725 | if isinstance(idx, bytes): | |
727 | idx = self.series.index(idx) |
|
726 | idx = self.series.index(idx) | |
728 | patchguards = self.seriesguards[idx] |
|
727 | patchguards = self.seriesguards[idx] | |
729 | if not patchguards: |
|
728 | if not patchguards: | |
730 | return True, None |
|
729 | return True, None | |
731 | guards = self.active() |
|
730 | guards = self.active() | |
732 | exactneg = [ |
|
731 | exactneg = [ | |
733 | g for g in patchguards if g.startswith(b'-') and g[1:] in guards |
|
732 | g for g in patchguards if g.startswith(b'-') and g[1:] in guards | |
734 | ] |
|
733 | ] | |
735 | if exactneg: |
|
734 | if exactneg: | |
736 | return False, stringutil.pprint(exactneg[0]) |
|
735 | return False, stringutil.pprint(exactneg[0]) | |
737 | pos = [g for g in patchguards if g.startswith(b'+')] |
|
736 | pos = [g for g in patchguards if g.startswith(b'+')] | |
738 | exactpos = [g for g in pos if g[1:] in guards] |
|
737 | exactpos = [g for g in pos if g[1:] in guards] | |
739 | if pos: |
|
738 | if pos: | |
740 | if exactpos: |
|
739 | if exactpos: | |
741 | return True, stringutil.pprint(exactpos[0]) |
|
740 | return True, stringutil.pprint(exactpos[0]) | |
742 | return False, b' '.join([stringutil.pprint(p) for p in pos]) |
|
741 | return False, b' '.join([stringutil.pprint(p) for p in pos]) | |
743 | return True, b'' |
|
742 | return True, b'' | |
744 |
|
743 | |||
745 | def explainpushable(self, idx, all_patches=False): |
|
744 | def explainpushable(self, idx, all_patches=False): | |
746 | if all_patches: |
|
745 | if all_patches: | |
747 | write = self.ui.write |
|
746 | write = self.ui.write | |
748 | else: |
|
747 | else: | |
749 | write = self.ui.warn |
|
748 | write = self.ui.warn | |
750 |
|
749 | |||
751 | if all_patches or self.ui.verbose: |
|
750 | if all_patches or self.ui.verbose: | |
752 | if isinstance(idx, bytes): |
|
751 | if isinstance(idx, bytes): | |
753 | idx = self.series.index(idx) |
|
752 | idx = self.series.index(idx) | |
754 | pushable, why = self.pushable(idx) |
|
753 | pushable, why = self.pushable(idx) | |
755 | if all_patches and pushable: |
|
754 | if all_patches and pushable: | |
756 | if why is None: |
|
755 | if why is None: | |
757 | write( |
|
756 | write( | |
758 | _(b'allowing %s - no guards in effect\n') |
|
757 | _(b'allowing %s - no guards in effect\n') | |
759 | % self.series[idx] |
|
758 | % self.series[idx] | |
760 | ) |
|
759 | ) | |
761 | else: |
|
760 | else: | |
762 | if not why: |
|
761 | if not why: | |
763 | write( |
|
762 | write( | |
764 | _(b'allowing %s - no matching negative guards\n') |
|
763 | _(b'allowing %s - no matching negative guards\n') | |
765 | % self.series[idx] |
|
764 | % self.series[idx] | |
766 | ) |
|
765 | ) | |
767 | else: |
|
766 | else: | |
768 | write( |
|
767 | write( | |
769 | _(b'allowing %s - guarded by %s\n') |
|
768 | _(b'allowing %s - guarded by %s\n') | |
770 | % (self.series[idx], why) |
|
769 | % (self.series[idx], why) | |
771 | ) |
|
770 | ) | |
772 | if not pushable: |
|
771 | if not pushable: | |
773 | if why: |
|
772 | if why: | |
774 | write( |
|
773 | write( | |
775 | _(b'skipping %s - guarded by %s\n') |
|
774 | _(b'skipping %s - guarded by %s\n') | |
776 | % (self.series[idx], why) |
|
775 | % (self.series[idx], why) | |
777 | ) |
|
776 | ) | |
778 | else: |
|
777 | else: | |
779 | write( |
|
778 | write( | |
780 | _(b'skipping %s - no matching guards\n') |
|
779 | _(b'skipping %s - no matching guards\n') | |
781 | % self.series[idx] |
|
780 | % self.series[idx] | |
782 | ) |
|
781 | ) | |
783 |
|
782 | |||
784 | def savedirty(self): |
|
783 | def savedirty(self): | |
785 | def writelist(items, path): |
|
784 | def writelist(items, path): | |
786 | fp = self.opener(path, b'wb') |
|
785 | fp = self.opener(path, b'wb') | |
787 | for i in items: |
|
786 | for i in items: | |
788 | fp.write(b"%s\n" % i) |
|
787 | fp.write(b"%s\n" % i) | |
789 | fp.close() |
|
788 | fp.close() | |
790 |
|
789 | |||
791 | if self.applieddirty: |
|
790 | if self.applieddirty: | |
792 | writelist(map(bytes, self.applied), self.statuspath) |
|
791 | writelist(map(bytes, self.applied), self.statuspath) | |
793 | self.applieddirty = False |
|
792 | self.applieddirty = False | |
794 | if self.seriesdirty: |
|
793 | if self.seriesdirty: | |
795 | writelist(self.fullseries, self.seriespath) |
|
794 | writelist(self.fullseries, self.seriespath) | |
796 | self.seriesdirty = False |
|
795 | self.seriesdirty = False | |
797 | if self.guardsdirty: |
|
796 | if self.guardsdirty: | |
798 | writelist(self.activeguards, self.guardspath) |
|
797 | writelist(self.activeguards, self.guardspath) | |
799 | self.guardsdirty = False |
|
798 | self.guardsdirty = False | |
800 | if self.added: |
|
799 | if self.added: | |
801 | qrepo = self.qrepo() |
|
800 | qrepo = self.qrepo() | |
802 | if qrepo: |
|
801 | if qrepo: | |
803 | qrepo[None].add(f for f in self.added if f not in qrepo[None]) |
|
802 | qrepo[None].add(f for f in self.added if f not in qrepo[None]) | |
804 | self.added = [] |
|
803 | self.added = [] | |
805 |
|
804 | |||
806 | def removeundo(self, repo): |
|
805 | def removeundo(self, repo): | |
807 | undo = repo.sjoin(b'undo') |
|
806 | undo = repo.sjoin(b'undo') | |
808 | if not os.path.exists(undo): |
|
807 | if not os.path.exists(undo): | |
809 | return |
|
808 | return | |
810 | try: |
|
809 | try: | |
811 | os.unlink(undo) |
|
810 | os.unlink(undo) | |
812 | except OSError as inst: |
|
811 | except OSError as inst: | |
813 | self.ui.warn( |
|
812 | self.ui.warn( | |
814 | _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst) |
|
813 | _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst) | |
815 | ) |
|
814 | ) | |
816 |
|
815 | |||
817 | def backup(self, repo, files, copy=False): |
|
816 | def backup(self, repo, files, copy=False): | |
818 | # backup local changes in --force case |
|
817 | # backup local changes in --force case | |
819 | for f in sorted(files): |
|
818 | for f in sorted(files): | |
820 | absf = repo.wjoin(f) |
|
819 | absf = repo.wjoin(f) | |
821 | if os.path.lexists(absf): |
|
820 | if os.path.lexists(absf): | |
822 | absorig = scmutil.backuppath(self.ui, repo, f) |
|
821 | absorig = scmutil.backuppath(self.ui, repo, f) | |
823 | self.ui.note( |
|
822 | self.ui.note( | |
824 | _(b'saving current version of %s as %s\n') |
|
823 | _(b'saving current version of %s as %s\n') | |
825 | % (f, os.path.relpath(absorig)) |
|
824 | % (f, os.path.relpath(absorig)) | |
826 | ) |
|
825 | ) | |
827 |
|
826 | |||
828 | if copy: |
|
827 | if copy: | |
829 | util.copyfile(absf, absorig) |
|
828 | util.copyfile(absf, absorig) | |
830 | else: |
|
829 | else: | |
831 | util.rename(absf, absorig) |
|
830 | util.rename(absf, absorig) | |
832 |
|
831 | |||
833 | def printdiff( |
|
832 | def printdiff( | |
834 | self, |
|
833 | self, | |
835 | repo, |
|
834 | repo, | |
836 | diffopts, |
|
835 | diffopts, | |
837 | node1, |
|
836 | node1, | |
838 | node2=None, |
|
837 | node2=None, | |
839 | files=None, |
|
838 | files=None, | |
840 | fp=None, |
|
839 | fp=None, | |
841 | changes=None, |
|
840 | changes=None, | |
842 | opts=None, |
|
841 | opts=None, | |
843 | ): |
|
842 | ): | |
844 | if opts is None: |
|
843 | if opts is None: | |
845 | opts = {} |
|
844 | opts = {} | |
846 | stat = opts.get(b'stat') |
|
845 | stat = opts.get(b'stat') | |
847 | m = scmutil.match(repo[node1], files, opts) |
|
846 | m = scmutil.match(repo[node1], files, opts) | |
848 | logcmdutil.diffordiffstat( |
|
847 | logcmdutil.diffordiffstat( | |
849 | self.ui, |
|
848 | self.ui, | |
850 | repo, |
|
849 | repo, | |
851 | diffopts, |
|
850 | diffopts, | |
852 | repo[node1], |
|
851 | repo[node1], | |
853 | repo[node2], |
|
852 | repo[node2], | |
854 | m, |
|
853 | m, | |
855 | changes, |
|
854 | changes, | |
856 | stat, |
|
855 | stat, | |
857 | fp, |
|
856 | fp, | |
858 | ) |
|
857 | ) | |
859 |
|
858 | |||
860 | def mergeone(self, repo, mergeq, head, patch, rev, diffopts): |
|
859 | def mergeone(self, repo, mergeq, head, patch, rev, diffopts): | |
861 | # first try just applying the patch |
|
860 | # first try just applying the patch | |
862 | (err, n) = self.apply( |
|
861 | (err, n) = self.apply( | |
863 | repo, [patch], update_status=False, strict=True, merge=rev |
|
862 | repo, [patch], update_status=False, strict=True, merge=rev | |
864 | ) |
|
863 | ) | |
865 |
|
864 | |||
866 | if err == 0: |
|
865 | if err == 0: | |
867 | return (err, n) |
|
866 | return (err, n) | |
868 |
|
867 | |||
869 | if n is None: |
|
868 | if n is None: | |
870 | raise error.Abort(_(b"apply failed for patch %s") % patch) |
|
869 | raise error.Abort(_(b"apply failed for patch %s") % patch) | |
871 |
|
870 | |||
872 | self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch) |
|
871 | self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch) | |
873 |
|
872 | |||
874 | # apply failed, strip away that rev and merge. |
|
873 | # apply failed, strip away that rev and merge. | |
875 | hg.clean(repo, head) |
|
874 | hg.clean(repo, head) | |
876 | strip(self.ui, repo, [n], update=False, backup=False) |
|
875 | strip(self.ui, repo, [n], update=False, backup=False) | |
877 |
|
876 | |||
878 | ctx = repo[rev] |
|
877 | ctx = repo[rev] | |
879 | ret = hg.merge(ctx, remind=False) |
|
878 | ret = hg.merge(ctx, remind=False) | |
880 | if ret: |
|
879 | if ret: | |
881 | raise error.Abort(_(b"update returned %d") % ret) |
|
880 | raise error.Abort(_(b"update returned %d") % ret) | |
882 | n = newcommit(repo, None, ctx.description(), ctx.user(), force=True) |
|
881 | n = newcommit(repo, None, ctx.description(), ctx.user(), force=True) | |
883 | if n is None: |
|
882 | if n is None: | |
884 | raise error.Abort(_(b"repo commit failed")) |
|
883 | raise error.Abort(_(b"repo commit failed")) | |
885 | try: |
|
884 | try: | |
886 | ph = patchheader(mergeq.join(patch), self.plainmode) |
|
885 | ph = patchheader(mergeq.join(patch), self.plainmode) | |
887 | except Exception: |
|
886 | except Exception: | |
888 | raise error.Abort(_(b"unable to read %s") % patch) |
|
887 | raise error.Abort(_(b"unable to read %s") % patch) | |
889 |
|
888 | |||
890 | diffopts = self.patchopts(diffopts, patch) |
|
889 | diffopts = self.patchopts(diffopts, patch) | |
891 | patchf = self.opener(patch, b"w") |
|
890 | patchf = self.opener(patch, b"w") | |
892 | comments = bytes(ph) |
|
891 | comments = bytes(ph) | |
893 | if comments: |
|
892 | if comments: | |
894 | patchf.write(comments) |
|
893 | patchf.write(comments) | |
895 | self.printdiff(repo, diffopts, head, n, fp=patchf) |
|
894 | self.printdiff(repo, diffopts, head, n, fp=patchf) | |
896 | patchf.close() |
|
895 | patchf.close() | |
897 | self.removeundo(repo) |
|
896 | self.removeundo(repo) | |
898 | return (0, n) |
|
897 | return (0, n) | |
899 |
|
898 | |||
900 | def qparents(self, repo, rev=None): |
|
899 | def qparents(self, repo, rev=None): | |
901 | """return the mq handled parent or p1 |
|
900 | """return the mq handled parent or p1 | |
902 |
|
901 | |||
903 | In some case where mq get himself in being the parent of a merge the |
|
902 | In some case where mq get himself in being the parent of a merge the | |
904 | appropriate parent may be p2. |
|
903 | appropriate parent may be p2. | |
905 | (eg: an in progress merge started with mq disabled) |
|
904 | (eg: an in progress merge started with mq disabled) | |
906 |
|
905 | |||
907 | If no parent are managed by mq, p1 is returned. |
|
906 | If no parent are managed by mq, p1 is returned. | |
908 | """ |
|
907 | """ | |
909 | if rev is None: |
|
908 | if rev is None: | |
910 | (p1, p2) = repo.dirstate.parents() |
|
909 | (p1, p2) = repo.dirstate.parents() | |
911 | if p2 == nullid: |
|
910 | if p2 == repo.nullid: | |
912 | return p1 |
|
911 | return p1 | |
913 | if not self.applied: |
|
912 | if not self.applied: | |
914 | return None |
|
913 | return None | |
915 | return self.applied[-1].node |
|
914 | return self.applied[-1].node | |
916 | p1, p2 = repo.changelog.parents(rev) |
|
915 | p1, p2 = repo.changelog.parents(rev) | |
917 | if p2 != nullid and p2 in [x.node for x in self.applied]: |
|
916 | if p2 != repo.nullid and p2 in [x.node for x in self.applied]: | |
918 | return p2 |
|
917 | return p2 | |
919 | return p1 |
|
918 | return p1 | |
920 |
|
919 | |||
921 | def mergepatch(self, repo, mergeq, series, diffopts): |
|
920 | def mergepatch(self, repo, mergeq, series, diffopts): | |
922 | if not self.applied: |
|
921 | if not self.applied: | |
923 | # each of the patches merged in will have two parents. This |
|
922 | # each of the patches merged in will have two parents. This | |
924 | # can confuse the qrefresh, qdiff, and strip code because it |
|
923 | # can confuse the qrefresh, qdiff, and strip code because it | |
925 | # needs to know which parent is actually in the patch queue. |
|
924 | # needs to know which parent is actually in the patch queue. | |
926 | # so, we insert a merge marker with only one parent. This way |
|
925 | # so, we insert a merge marker with only one parent. This way | |
927 | # the first patch in the queue is never a merge patch |
|
926 | # the first patch in the queue is never a merge patch | |
928 | # |
|
927 | # | |
929 | pname = b".hg.patches.merge.marker" |
|
928 | pname = b".hg.patches.merge.marker" | |
930 | n = newcommit(repo, None, b'[mq]: merge marker', force=True) |
|
929 | n = newcommit(repo, None, b'[mq]: merge marker', force=True) | |
931 | self.removeundo(repo) |
|
930 | self.removeundo(repo) | |
932 | self.applied.append(statusentry(n, pname)) |
|
931 | self.applied.append(statusentry(n, pname)) | |
933 | self.applieddirty = True |
|
932 | self.applieddirty = True | |
934 |
|
933 | |||
935 | head = self.qparents(repo) |
|
934 | head = self.qparents(repo) | |
936 |
|
935 | |||
937 | for patch in series: |
|
936 | for patch in series: | |
938 | patch = mergeq.lookup(patch, strict=True) |
|
937 | patch = mergeq.lookup(patch, strict=True) | |
939 | if not patch: |
|
938 | if not patch: | |
940 | self.ui.warn(_(b"patch %s does not exist\n") % patch) |
|
939 | self.ui.warn(_(b"patch %s does not exist\n") % patch) | |
941 | return (1, None) |
|
940 | return (1, None) | |
942 | pushable, reason = self.pushable(patch) |
|
941 | pushable, reason = self.pushable(patch) | |
943 | if not pushable: |
|
942 | if not pushable: | |
944 | self.explainpushable(patch, all_patches=True) |
|
943 | self.explainpushable(patch, all_patches=True) | |
945 | continue |
|
944 | continue | |
946 | info = mergeq.isapplied(patch) |
|
945 | info = mergeq.isapplied(patch) | |
947 | if not info: |
|
946 | if not info: | |
948 | self.ui.warn(_(b"patch %s is not applied\n") % patch) |
|
947 | self.ui.warn(_(b"patch %s is not applied\n") % patch) | |
949 | return (1, None) |
|
948 | return (1, None) | |
950 | rev = info[1] |
|
949 | rev = info[1] | |
951 | err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts) |
|
950 | err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts) | |
952 | if head: |
|
951 | if head: | |
953 | self.applied.append(statusentry(head, patch)) |
|
952 | self.applied.append(statusentry(head, patch)) | |
954 | self.applieddirty = True |
|
953 | self.applieddirty = True | |
955 | if err: |
|
954 | if err: | |
956 | return (err, head) |
|
955 | return (err, head) | |
957 | self.savedirty() |
|
956 | self.savedirty() | |
958 | return (0, head) |
|
957 | return (0, head) | |
959 |
|
958 | |||
960 | def patch(self, repo, patchfile): |
|
959 | def patch(self, repo, patchfile): | |
961 | """Apply patchfile to the working directory. |
|
960 | """Apply patchfile to the working directory. | |
962 | patchfile: name of patch file""" |
|
961 | patchfile: name of patch file""" | |
963 | files = set() |
|
962 | files = set() | |
964 | try: |
|
963 | try: | |
965 | fuzz = patchmod.patch( |
|
964 | fuzz = patchmod.patch( | |
966 | self.ui, repo, patchfile, strip=1, files=files, eolmode=None |
|
965 | self.ui, repo, patchfile, strip=1, files=files, eolmode=None | |
967 | ) |
|
966 | ) | |
968 | return (True, list(files), fuzz) |
|
967 | return (True, list(files), fuzz) | |
969 | except Exception as inst: |
|
968 | except Exception as inst: | |
970 | self.ui.note(stringutil.forcebytestr(inst) + b'\n') |
|
969 | self.ui.note(stringutil.forcebytestr(inst) + b'\n') | |
971 | if not self.ui.verbose: |
|
970 | if not self.ui.verbose: | |
972 | self.ui.warn(_(b"patch failed, unable to continue (try -v)\n")) |
|
971 | self.ui.warn(_(b"patch failed, unable to continue (try -v)\n")) | |
973 | self.ui.traceback() |
|
972 | self.ui.traceback() | |
974 | return (False, list(files), False) |
|
973 | return (False, list(files), False) | |
975 |
|
974 | |||
976 | def apply( |
|
975 | def apply( | |
977 | self, |
|
976 | self, | |
978 | repo, |
|
977 | repo, | |
979 | series, |
|
978 | series, | |
980 | list=False, |
|
979 | list=False, | |
981 | update_status=True, |
|
980 | update_status=True, | |
982 | strict=False, |
|
981 | strict=False, | |
983 | patchdir=None, |
|
982 | patchdir=None, | |
984 | merge=None, |
|
983 | merge=None, | |
985 | all_files=None, |
|
984 | all_files=None, | |
986 | tobackup=None, |
|
985 | tobackup=None, | |
987 | keepchanges=False, |
|
986 | keepchanges=False, | |
988 | ): |
|
987 | ): | |
989 | wlock = lock = tr = None |
|
988 | wlock = lock = tr = None | |
990 | try: |
|
989 | try: | |
991 | wlock = repo.wlock() |
|
990 | wlock = repo.wlock() | |
992 | lock = repo.lock() |
|
991 | lock = repo.lock() | |
993 | tr = repo.transaction(b"qpush") |
|
992 | tr = repo.transaction(b"qpush") | |
994 | try: |
|
993 | try: | |
995 | ret = self._apply( |
|
994 | ret = self._apply( | |
996 | repo, |
|
995 | repo, | |
997 | series, |
|
996 | series, | |
998 | list, |
|
997 | list, | |
999 | update_status, |
|
998 | update_status, | |
1000 | strict, |
|
999 | strict, | |
1001 | patchdir, |
|
1000 | patchdir, | |
1002 | merge, |
|
1001 | merge, | |
1003 | all_files=all_files, |
|
1002 | all_files=all_files, | |
1004 | tobackup=tobackup, |
|
1003 | tobackup=tobackup, | |
1005 | keepchanges=keepchanges, |
|
1004 | keepchanges=keepchanges, | |
1006 | ) |
|
1005 | ) | |
1007 | tr.close() |
|
1006 | tr.close() | |
1008 | self.savedirty() |
|
1007 | self.savedirty() | |
1009 | return ret |
|
1008 | return ret | |
1010 | except AbortNoCleanup: |
|
1009 | except AbortNoCleanup: | |
1011 | tr.close() |
|
1010 | tr.close() | |
1012 | self.savedirty() |
|
1011 | self.savedirty() | |
1013 | raise |
|
1012 | raise | |
1014 | except: # re-raises |
|
1013 | except: # re-raises | |
1015 | try: |
|
1014 | try: | |
1016 | tr.abort() |
|
1015 | tr.abort() | |
1017 | finally: |
|
1016 | finally: | |
1018 | self.invalidate() |
|
1017 | self.invalidate() | |
1019 | raise |
|
1018 | raise | |
1020 | finally: |
|
1019 | finally: | |
1021 | release(tr, lock, wlock) |
|
1020 | release(tr, lock, wlock) | |
1022 | self.removeundo(repo) |
|
1021 | self.removeundo(repo) | |
1023 |
|
1022 | |||
1024 | def _apply( |
|
1023 | def _apply( | |
1025 | self, |
|
1024 | self, | |
1026 | repo, |
|
1025 | repo, | |
1027 | series, |
|
1026 | series, | |
1028 | list=False, |
|
1027 | list=False, | |
1029 | update_status=True, |
|
1028 | update_status=True, | |
1030 | strict=False, |
|
1029 | strict=False, | |
1031 | patchdir=None, |
|
1030 | patchdir=None, | |
1032 | merge=None, |
|
1031 | merge=None, | |
1033 | all_files=None, |
|
1032 | all_files=None, | |
1034 | tobackup=None, |
|
1033 | tobackup=None, | |
1035 | keepchanges=False, |
|
1034 | keepchanges=False, | |
1036 | ): |
|
1035 | ): | |
1037 | """returns (error, hash) |
|
1036 | """returns (error, hash) | |
1038 |
|
1037 | |||
1039 | error = 1 for unable to read, 2 for patch failed, 3 for patch |
|
1038 | error = 1 for unable to read, 2 for patch failed, 3 for patch | |
1040 | fuzz. tobackup is None or a set of files to backup before they |
|
1039 | fuzz. tobackup is None or a set of files to backup before they | |
1041 | are modified by a patch. |
|
1040 | are modified by a patch. | |
1042 | """ |
|
1041 | """ | |
1043 | # TODO unify with commands.py |
|
1042 | # TODO unify with commands.py | |
1044 | if not patchdir: |
|
1043 | if not patchdir: | |
1045 | patchdir = self.path |
|
1044 | patchdir = self.path | |
1046 | err = 0 |
|
1045 | err = 0 | |
1047 | n = None |
|
1046 | n = None | |
1048 | for patchname in series: |
|
1047 | for patchname in series: | |
1049 | pushable, reason = self.pushable(patchname) |
|
1048 | pushable, reason = self.pushable(patchname) | |
1050 | if not pushable: |
|
1049 | if not pushable: | |
1051 | self.explainpushable(patchname, all_patches=True) |
|
1050 | self.explainpushable(patchname, all_patches=True) | |
1052 | continue |
|
1051 | continue | |
1053 | self.ui.status(_(b"applying %s\n") % patchname) |
|
1052 | self.ui.status(_(b"applying %s\n") % patchname) | |
1054 | pf = os.path.join(patchdir, patchname) |
|
1053 | pf = os.path.join(patchdir, patchname) | |
1055 |
|
1054 | |||
1056 | try: |
|
1055 | try: | |
1057 | ph = patchheader(self.join(patchname), self.plainmode) |
|
1056 | ph = patchheader(self.join(patchname), self.plainmode) | |
1058 | except IOError: |
|
1057 | except IOError: | |
1059 | self.ui.warn(_(b"unable to read %s\n") % patchname) |
|
1058 | self.ui.warn(_(b"unable to read %s\n") % patchname) | |
1060 | err = 1 |
|
1059 | err = 1 | |
1061 | break |
|
1060 | break | |
1062 |
|
1061 | |||
1063 | message = ph.message |
|
1062 | message = ph.message | |
1064 | if not message: |
|
1063 | if not message: | |
1065 | # The commit message should not be translated |
|
1064 | # The commit message should not be translated | |
1066 | message = b"imported patch %s\n" % patchname |
|
1065 | message = b"imported patch %s\n" % patchname | |
1067 | else: |
|
1066 | else: | |
1068 | if list: |
|
1067 | if list: | |
1069 | # The commit message should not be translated |
|
1068 | # The commit message should not be translated | |
1070 | message.append(b"\nimported patch %s" % patchname) |
|
1069 | message.append(b"\nimported patch %s" % patchname) | |
1071 | message = b'\n'.join(message) |
|
1070 | message = b'\n'.join(message) | |
1072 |
|
1071 | |||
1073 | if ph.haspatch: |
|
1072 | if ph.haspatch: | |
1074 | if tobackup: |
|
1073 | if tobackup: | |
1075 | touched = patchmod.changedfiles(self.ui, repo, pf) |
|
1074 | touched = patchmod.changedfiles(self.ui, repo, pf) | |
1076 | touched = set(touched) & tobackup |
|
1075 | touched = set(touched) & tobackup | |
1077 | if touched and keepchanges: |
|
1076 | if touched and keepchanges: | |
1078 | raise AbortNoCleanup( |
|
1077 | raise AbortNoCleanup( | |
1079 | _(b"conflicting local changes found"), |
|
1078 | _(b"conflicting local changes found"), | |
1080 | hint=_(b"did you forget to qrefresh?"), |
|
1079 | hint=_(b"did you forget to qrefresh?"), | |
1081 | ) |
|
1080 | ) | |
1082 | self.backup(repo, touched, copy=True) |
|
1081 | self.backup(repo, touched, copy=True) | |
1083 | tobackup = tobackup - touched |
|
1082 | tobackup = tobackup - touched | |
1084 | (patcherr, files, fuzz) = self.patch(repo, pf) |
|
1083 | (patcherr, files, fuzz) = self.patch(repo, pf) | |
1085 | if all_files is not None: |
|
1084 | if all_files is not None: | |
1086 | all_files.update(files) |
|
1085 | all_files.update(files) | |
1087 | patcherr = not patcherr |
|
1086 | patcherr = not patcherr | |
1088 | else: |
|
1087 | else: | |
1089 | self.ui.warn(_(b"patch %s is empty\n") % patchname) |
|
1088 | self.ui.warn(_(b"patch %s is empty\n") % patchname) | |
1090 | patcherr, files, fuzz = 0, [], 0 |
|
1089 | patcherr, files, fuzz = 0, [], 0 | |
1091 |
|
1090 | |||
1092 | if merge and files: |
|
1091 | if merge and files: | |
1093 | # Mark as removed/merged and update dirstate parent info |
|
1092 | # Mark as removed/merged and update dirstate parent info | |
1094 | removed = [] |
|
1093 | removed = [] | |
1095 | merged = [] |
|
1094 | merged = [] | |
1096 | for f in files: |
|
1095 | for f in files: | |
1097 | if os.path.lexists(repo.wjoin(f)): |
|
1096 | if os.path.lexists(repo.wjoin(f)): | |
1098 | merged.append(f) |
|
1097 | merged.append(f) | |
1099 | else: |
|
1098 | else: | |
1100 | removed.append(f) |
|
1099 | removed.append(f) | |
1101 | with repo.dirstate.parentchange(): |
|
1100 | with repo.dirstate.parentchange(): | |
1102 | for f in removed: |
|
1101 | for f in removed: | |
1103 | repo.dirstate.remove(f) |
|
1102 | repo.dirstate.remove(f) | |
1104 | for f in merged: |
|
1103 | for f in merged: | |
1105 | repo.dirstate.merge(f) |
|
1104 | repo.dirstate.merge(f) | |
1106 | p1 = repo.dirstate.p1() |
|
1105 | p1 = repo.dirstate.p1() | |
1107 | repo.setparents(p1, merge) |
|
1106 | repo.setparents(p1, merge) | |
1108 |
|
1107 | |||
1109 | if all_files and b'.hgsubstate' in all_files: |
|
1108 | if all_files and b'.hgsubstate' in all_files: | |
1110 | wctx = repo[None] |
|
1109 | wctx = repo[None] | |
1111 | pctx = repo[b'.'] |
|
1110 | pctx = repo[b'.'] | |
1112 | overwrite = False |
|
1111 | overwrite = False | |
1113 | mergedsubstate = subrepoutil.submerge( |
|
1112 | mergedsubstate = subrepoutil.submerge( | |
1114 | repo, pctx, wctx, wctx, overwrite |
|
1113 | repo, pctx, wctx, wctx, overwrite | |
1115 | ) |
|
1114 | ) | |
1116 | files += mergedsubstate.keys() |
|
1115 | files += mergedsubstate.keys() | |
1117 |
|
1116 | |||
1118 | match = scmutil.matchfiles(repo, files or []) |
|
1117 | match = scmutil.matchfiles(repo, files or []) | |
1119 | oldtip = repo.changelog.tip() |
|
1118 | oldtip = repo.changelog.tip() | |
1120 | n = newcommit( |
|
1119 | n = newcommit( | |
1121 | repo, None, message, ph.user, ph.date, match=match, force=True |
|
1120 | repo, None, message, ph.user, ph.date, match=match, force=True | |
1122 | ) |
|
1121 | ) | |
1123 | if repo.changelog.tip() == oldtip: |
|
1122 | if repo.changelog.tip() == oldtip: | |
1124 | raise error.Abort( |
|
1123 | raise error.Abort( | |
1125 | _(b"qpush exactly duplicates child changeset") |
|
1124 | _(b"qpush exactly duplicates child changeset") | |
1126 | ) |
|
1125 | ) | |
1127 | if n is None: |
|
1126 | if n is None: | |
1128 | raise error.Abort(_(b"repository commit failed")) |
|
1127 | raise error.Abort(_(b"repository commit failed")) | |
1129 |
|
1128 | |||
1130 | if update_status: |
|
1129 | if update_status: | |
1131 | self.applied.append(statusentry(n, patchname)) |
|
1130 | self.applied.append(statusentry(n, patchname)) | |
1132 |
|
1131 | |||
1133 | if patcherr: |
|
1132 | if patcherr: | |
1134 | self.ui.warn( |
|
1133 | self.ui.warn( | |
1135 | _(b"patch failed, rejects left in working directory\n") |
|
1134 | _(b"patch failed, rejects left in working directory\n") | |
1136 | ) |
|
1135 | ) | |
1137 | err = 2 |
|
1136 | err = 2 | |
1138 | break |
|
1137 | break | |
1139 |
|
1138 | |||
1140 | if fuzz and strict: |
|
1139 | if fuzz and strict: | |
1141 | self.ui.warn(_(b"fuzz found when applying patch, stopping\n")) |
|
1140 | self.ui.warn(_(b"fuzz found when applying patch, stopping\n")) | |
1142 | err = 3 |
|
1141 | err = 3 | |
1143 | break |
|
1142 | break | |
1144 | return (err, n) |
|
1143 | return (err, n) | |
1145 |
|
1144 | |||
1146 | def _cleanup(self, patches, numrevs, keep=False): |
|
1145 | def _cleanup(self, patches, numrevs, keep=False): | |
1147 | if not keep: |
|
1146 | if not keep: | |
1148 | r = self.qrepo() |
|
1147 | r = self.qrepo() | |
1149 | if r: |
|
1148 | if r: | |
1150 | r[None].forget(patches) |
|
1149 | r[None].forget(patches) | |
1151 | for p in patches: |
|
1150 | for p in patches: | |
1152 | try: |
|
1151 | try: | |
1153 | os.unlink(self.join(p)) |
|
1152 | os.unlink(self.join(p)) | |
1154 | except OSError as inst: |
|
1153 | except OSError as inst: | |
1155 | if inst.errno != errno.ENOENT: |
|
1154 | if inst.errno != errno.ENOENT: | |
1156 | raise |
|
1155 | raise | |
1157 |
|
1156 | |||
1158 | qfinished = [] |
|
1157 | qfinished = [] | |
1159 | if numrevs: |
|
1158 | if numrevs: | |
1160 | qfinished = self.applied[:numrevs] |
|
1159 | qfinished = self.applied[:numrevs] | |
1161 | del self.applied[:numrevs] |
|
1160 | del self.applied[:numrevs] | |
1162 | self.applieddirty = True |
|
1161 | self.applieddirty = True | |
1163 |
|
1162 | |||
1164 | unknown = [] |
|
1163 | unknown = [] | |
1165 |
|
1164 | |||
1166 | sortedseries = [] |
|
1165 | sortedseries = [] | |
1167 | for p in patches: |
|
1166 | for p in patches: | |
1168 | idx = self.findseries(p) |
|
1167 | idx = self.findseries(p) | |
1169 | if idx is None: |
|
1168 | if idx is None: | |
1170 | sortedseries.append((-1, p)) |
|
1169 | sortedseries.append((-1, p)) | |
1171 | else: |
|
1170 | else: | |
1172 | sortedseries.append((idx, p)) |
|
1171 | sortedseries.append((idx, p)) | |
1173 |
|
1172 | |||
1174 | sortedseries.sort(reverse=True) |
|
1173 | sortedseries.sort(reverse=True) | |
1175 | for (i, p) in sortedseries: |
|
1174 | for (i, p) in sortedseries: | |
1176 | if i != -1: |
|
1175 | if i != -1: | |
1177 | del self.fullseries[i] |
|
1176 | del self.fullseries[i] | |
1178 | else: |
|
1177 | else: | |
1179 | unknown.append(p) |
|
1178 | unknown.append(p) | |
1180 |
|
1179 | |||
1181 | if unknown: |
|
1180 | if unknown: | |
1182 | if numrevs: |
|
1181 | if numrevs: | |
1183 | rev = {entry.name: entry.node for entry in qfinished} |
|
1182 | rev = {entry.name: entry.node for entry in qfinished} | |
1184 | for p in unknown: |
|
1183 | for p in unknown: | |
1185 | msg = _(b'revision %s refers to unknown patches: %s\n') |
|
1184 | msg = _(b'revision %s refers to unknown patches: %s\n') | |
1186 | self.ui.warn(msg % (short(rev[p]), p)) |
|
1185 | self.ui.warn(msg % (short(rev[p]), p)) | |
1187 | else: |
|
1186 | else: | |
1188 | msg = _(b'unknown patches: %s\n') |
|
1187 | msg = _(b'unknown patches: %s\n') | |
1189 | raise error.Abort(b''.join(msg % p for p in unknown)) |
|
1188 | raise error.Abort(b''.join(msg % p for p in unknown)) | |
1190 |
|
1189 | |||
1191 | self.parseseries() |
|
1190 | self.parseseries() | |
1192 | self.seriesdirty = True |
|
1191 | self.seriesdirty = True | |
1193 | return [entry.node for entry in qfinished] |
|
1192 | return [entry.node for entry in qfinished] | |
1194 |
|
1193 | |||
1195 | def _revpatches(self, repo, revs): |
|
1194 | def _revpatches(self, repo, revs): | |
1196 | firstrev = repo[self.applied[0].node].rev() |
|
1195 | firstrev = repo[self.applied[0].node].rev() | |
1197 | patches = [] |
|
1196 | patches = [] | |
1198 | for i, rev in enumerate(revs): |
|
1197 | for i, rev in enumerate(revs): | |
1199 |
|
1198 | |||
1200 | if rev < firstrev: |
|
1199 | if rev < firstrev: | |
1201 | raise error.Abort(_(b'revision %d is not managed') % rev) |
|
1200 | raise error.Abort(_(b'revision %d is not managed') % rev) | |
1202 |
|
1201 | |||
1203 | ctx = repo[rev] |
|
1202 | ctx = repo[rev] | |
1204 | base = self.applied[i].node |
|
1203 | base = self.applied[i].node | |
1205 | if ctx.node() != base: |
|
1204 | if ctx.node() != base: | |
1206 | msg = _(b'cannot delete revision %d above applied patches') |
|
1205 | msg = _(b'cannot delete revision %d above applied patches') | |
1207 | raise error.Abort(msg % rev) |
|
1206 | raise error.Abort(msg % rev) | |
1208 |
|
1207 | |||
1209 | patch = self.applied[i].name |
|
1208 | patch = self.applied[i].name | |
1210 | for fmt in (b'[mq]: %s', b'imported patch %s'): |
|
1209 | for fmt in (b'[mq]: %s', b'imported patch %s'): | |
1211 | if ctx.description() == fmt % patch: |
|
1210 | if ctx.description() == fmt % patch: | |
1212 | msg = _(b'patch %s finalized without changeset message\n') |
|
1211 | msg = _(b'patch %s finalized without changeset message\n') | |
1213 | repo.ui.status(msg % patch) |
|
1212 | repo.ui.status(msg % patch) | |
1214 | break |
|
1213 | break | |
1215 |
|
1214 | |||
1216 | patches.append(patch) |
|
1215 | patches.append(patch) | |
1217 | return patches |
|
1216 | return patches | |
1218 |
|
1217 | |||
1219 | def finish(self, repo, revs): |
|
1218 | def finish(self, repo, revs): | |
1220 | # Manually trigger phase computation to ensure phasedefaults is |
|
1219 | # Manually trigger phase computation to ensure phasedefaults is | |
1221 | # executed before we remove the patches. |
|
1220 | # executed before we remove the patches. | |
1222 | repo._phasecache |
|
1221 | repo._phasecache | |
1223 | patches = self._revpatches(repo, sorted(revs)) |
|
1222 | patches = self._revpatches(repo, sorted(revs)) | |
1224 | qfinished = self._cleanup(patches, len(patches)) |
|
1223 | qfinished = self._cleanup(patches, len(patches)) | |
1225 | if qfinished and repo.ui.configbool(b'mq', b'secret'): |
|
1224 | if qfinished and repo.ui.configbool(b'mq', b'secret'): | |
1226 | # only use this logic when the secret option is added |
|
1225 | # only use this logic when the secret option is added | |
1227 | oldqbase = repo[qfinished[0]] |
|
1226 | oldqbase = repo[qfinished[0]] | |
1228 | tphase = phases.newcommitphase(repo.ui) |
|
1227 | tphase = phases.newcommitphase(repo.ui) | |
1229 | if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase: |
|
1228 | if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase: | |
1230 | with repo.transaction(b'qfinish') as tr: |
|
1229 | with repo.transaction(b'qfinish') as tr: | |
1231 | phases.advanceboundary(repo, tr, tphase, qfinished) |
|
1230 | phases.advanceboundary(repo, tr, tphase, qfinished) | |
1232 |
|
1231 | |||
1233 | def delete(self, repo, patches, opts): |
|
1232 | def delete(self, repo, patches, opts): | |
1234 | if not patches and not opts.get(b'rev'): |
|
1233 | if not patches and not opts.get(b'rev'): | |
1235 | raise error.Abort( |
|
1234 | raise error.Abort( | |
1236 | _(b'qdelete requires at least one revision or patch name') |
|
1235 | _(b'qdelete requires at least one revision or patch name') | |
1237 | ) |
|
1236 | ) | |
1238 |
|
1237 | |||
1239 | realpatches = [] |
|
1238 | realpatches = [] | |
1240 | for patch in patches: |
|
1239 | for patch in patches: | |
1241 | patch = self.lookup(patch, strict=True) |
|
1240 | patch = self.lookup(patch, strict=True) | |
1242 | info = self.isapplied(patch) |
|
1241 | info = self.isapplied(patch) | |
1243 | if info: |
|
1242 | if info: | |
1244 | raise error.Abort(_(b"cannot delete applied patch %s") % patch) |
|
1243 | raise error.Abort(_(b"cannot delete applied patch %s") % patch) | |
1245 | if patch not in self.series: |
|
1244 | if patch not in self.series: | |
1246 | raise error.Abort(_(b"patch %s not in series file") % patch) |
|
1245 | raise error.Abort(_(b"patch %s not in series file") % patch) | |
1247 | if patch not in realpatches: |
|
1246 | if patch not in realpatches: | |
1248 | realpatches.append(patch) |
|
1247 | realpatches.append(patch) | |
1249 |
|
1248 | |||
1250 | numrevs = 0 |
|
1249 | numrevs = 0 | |
1251 | if opts.get(b'rev'): |
|
1250 | if opts.get(b'rev'): | |
1252 | if not self.applied: |
|
1251 | if not self.applied: | |
1253 | raise error.Abort(_(b'no patches applied')) |
|
1252 | raise error.Abort(_(b'no patches applied')) | |
1254 | revs = scmutil.revrange(repo, opts.get(b'rev')) |
|
1253 | revs = scmutil.revrange(repo, opts.get(b'rev')) | |
1255 | revs.sort() |
|
1254 | revs.sort() | |
1256 | revpatches = self._revpatches(repo, revs) |
|
1255 | revpatches = self._revpatches(repo, revs) | |
1257 | realpatches += revpatches |
|
1256 | realpatches += revpatches | |
1258 | numrevs = len(revpatches) |
|
1257 | numrevs = len(revpatches) | |
1259 |
|
1258 | |||
1260 | self._cleanup(realpatches, numrevs, opts.get(b'keep')) |
|
1259 | self._cleanup(realpatches, numrevs, opts.get(b'keep')) | |
1261 |
|
1260 | |||
1262 | def checktoppatch(self, repo): |
|
1261 | def checktoppatch(self, repo): | |
1263 | '''check that working directory is at qtip''' |
|
1262 | '''check that working directory is at qtip''' | |
1264 | if self.applied: |
|
1263 | if self.applied: | |
1265 | top = self.applied[-1].node |
|
1264 | top = self.applied[-1].node | |
1266 | patch = self.applied[-1].name |
|
1265 | patch = self.applied[-1].name | |
1267 | if repo.dirstate.p1() != top: |
|
1266 | if repo.dirstate.p1() != top: | |
1268 | raise error.Abort(_(b"working directory revision is not qtip")) |
|
1267 | raise error.Abort(_(b"working directory revision is not qtip")) | |
1269 | return top, patch |
|
1268 | return top, patch | |
1270 | return None, None |
|
1269 | return None, None | |
1271 |
|
1270 | |||
1272 | def putsubstate2changes(self, substatestate, changes): |
|
1271 | def putsubstate2changes(self, substatestate, changes): | |
1273 | if isinstance(changes, list): |
|
1272 | if isinstance(changes, list): | |
1274 | mar = changes[:3] |
|
1273 | mar = changes[:3] | |
1275 | else: |
|
1274 | else: | |
1276 | mar = (changes.modified, changes.added, changes.removed) |
|
1275 | mar = (changes.modified, changes.added, changes.removed) | |
1277 | if any((b'.hgsubstate' in files for files in mar)): |
|
1276 | if any((b'.hgsubstate' in files for files in mar)): | |
1278 | return # already listed up |
|
1277 | return # already listed up | |
1279 | # not yet listed up |
|
1278 | # not yet listed up | |
1280 | if substatestate in b'a?': |
|
1279 | if substatestate in b'a?': | |
1281 | mar[1].append(b'.hgsubstate') |
|
1280 | mar[1].append(b'.hgsubstate') | |
1282 | elif substatestate in b'r': |
|
1281 | elif substatestate in b'r': | |
1283 | mar[2].append(b'.hgsubstate') |
|
1282 | mar[2].append(b'.hgsubstate') | |
1284 | else: # modified |
|
1283 | else: # modified | |
1285 | mar[0].append(b'.hgsubstate') |
|
1284 | mar[0].append(b'.hgsubstate') | |
1286 |
|
1285 | |||
1287 | def checklocalchanges(self, repo, force=False, refresh=True): |
|
1286 | def checklocalchanges(self, repo, force=False, refresh=True): | |
1288 | excsuffix = b'' |
|
1287 | excsuffix = b'' | |
1289 | if refresh: |
|
1288 | if refresh: | |
1290 | excsuffix = b', qrefresh first' |
|
1289 | excsuffix = b', qrefresh first' | |
1291 | # plain versions for i18n tool to detect them |
|
1290 | # plain versions for i18n tool to detect them | |
1292 | _(b"local changes found, qrefresh first") |
|
1291 | _(b"local changes found, qrefresh first") | |
1293 | _(b"local changed subrepos found, qrefresh first") |
|
1292 | _(b"local changed subrepos found, qrefresh first") | |
1294 |
|
1293 | |||
1295 | s = repo.status() |
|
1294 | s = repo.status() | |
1296 | if not force: |
|
1295 | if not force: | |
1297 | cmdutil.checkunfinished(repo) |
|
1296 | cmdutil.checkunfinished(repo) | |
1298 | if s.modified or s.added or s.removed or s.deleted: |
|
1297 | if s.modified or s.added or s.removed or s.deleted: | |
1299 | _(b"local changes found") # i18n tool detection |
|
1298 | _(b"local changes found") # i18n tool detection | |
1300 | raise error.Abort(_(b"local changes found" + excsuffix)) |
|
1299 | raise error.Abort(_(b"local changes found" + excsuffix)) | |
1301 | if checksubstate(repo): |
|
1300 | if checksubstate(repo): | |
1302 | _(b"local changed subrepos found") # i18n tool detection |
|
1301 | _(b"local changed subrepos found") # i18n tool detection | |
1303 | raise error.Abort( |
|
1302 | raise error.Abort( | |
1304 | _(b"local changed subrepos found" + excsuffix) |
|
1303 | _(b"local changed subrepos found" + excsuffix) | |
1305 | ) |
|
1304 | ) | |
1306 | else: |
|
1305 | else: | |
1307 | cmdutil.checkunfinished(repo, skipmerge=True) |
|
1306 | cmdutil.checkunfinished(repo, skipmerge=True) | |
1308 | return s |
|
1307 | return s | |
1309 |
|
1308 | |||
1310 | _reserved = (b'series', b'status', b'guards', b'.', b'..') |
|
1309 | _reserved = (b'series', b'status', b'guards', b'.', b'..') | |
1311 |
|
1310 | |||
1312 | def checkreservedname(self, name): |
|
1311 | def checkreservedname(self, name): | |
1313 | if name in self._reserved: |
|
1312 | if name in self._reserved: | |
1314 | raise error.Abort( |
|
1313 | raise error.Abort( | |
1315 | _(b'"%s" cannot be used as the name of a patch') % name |
|
1314 | _(b'"%s" cannot be used as the name of a patch') % name | |
1316 | ) |
|
1315 | ) | |
1317 | if name != name.strip(): |
|
1316 | if name != name.strip(): | |
1318 | # whitespace is stripped by parseseries() |
|
1317 | # whitespace is stripped by parseseries() | |
1319 | raise error.Abort( |
|
1318 | raise error.Abort( | |
1320 | _(b'patch name cannot begin or end with whitespace') |
|
1319 | _(b'patch name cannot begin or end with whitespace') | |
1321 | ) |
|
1320 | ) | |
1322 | for prefix in (b'.hg', b'.mq'): |
|
1321 | for prefix in (b'.hg', b'.mq'): | |
1323 | if name.startswith(prefix): |
|
1322 | if name.startswith(prefix): | |
1324 | raise error.Abort( |
|
1323 | raise error.Abort( | |
1325 | _(b'patch name cannot begin with "%s"') % prefix |
|
1324 | _(b'patch name cannot begin with "%s"') % prefix | |
1326 | ) |
|
1325 | ) | |
1327 | for c in (b'#', b':', b'\r', b'\n'): |
|
1326 | for c in (b'#', b':', b'\r', b'\n'): | |
1328 | if c in name: |
|
1327 | if c in name: | |
1329 | raise error.Abort( |
|
1328 | raise error.Abort( | |
1330 | _(b'%r cannot be used in the name of a patch') |
|
1329 | _(b'%r cannot be used in the name of a patch') | |
1331 | % pycompat.bytestr(c) |
|
1330 | % pycompat.bytestr(c) | |
1332 | ) |
|
1331 | ) | |
1333 |
|
1332 | |||
1334 | def checkpatchname(self, name, force=False): |
|
1333 | def checkpatchname(self, name, force=False): | |
1335 | self.checkreservedname(name) |
|
1334 | self.checkreservedname(name) | |
1336 | if not force and os.path.exists(self.join(name)): |
|
1335 | if not force and os.path.exists(self.join(name)): | |
1337 | if os.path.isdir(self.join(name)): |
|
1336 | if os.path.isdir(self.join(name)): | |
1338 | raise error.Abort( |
|
1337 | raise error.Abort( | |
1339 | _(b'"%s" already exists as a directory') % name |
|
1338 | _(b'"%s" already exists as a directory') % name | |
1340 | ) |
|
1339 | ) | |
1341 | else: |
|
1340 | else: | |
1342 | raise error.Abort(_(b'patch "%s" already exists') % name) |
|
1341 | raise error.Abort(_(b'patch "%s" already exists') % name) | |
1343 |
|
1342 | |||
1344 | def makepatchname(self, title, fallbackname): |
|
1343 | def makepatchname(self, title, fallbackname): | |
1345 | """Return a suitable filename for title, adding a suffix to make |
|
1344 | """Return a suitable filename for title, adding a suffix to make | |
1346 | it unique in the existing list""" |
|
1345 | it unique in the existing list""" | |
1347 | namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_') |
|
1346 | namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_') | |
1348 | namebase = namebase[:75] # avoid too long name (issue5117) |
|
1347 | namebase = namebase[:75] # avoid too long name (issue5117) | |
1349 | if namebase: |
|
1348 | if namebase: | |
1350 | try: |
|
1349 | try: | |
1351 | self.checkreservedname(namebase) |
|
1350 | self.checkreservedname(namebase) | |
1352 | except error.Abort: |
|
1351 | except error.Abort: | |
1353 | namebase = fallbackname |
|
1352 | namebase = fallbackname | |
1354 | else: |
|
1353 | else: | |
1355 | namebase = fallbackname |
|
1354 | namebase = fallbackname | |
1356 | name = namebase |
|
1355 | name = namebase | |
1357 | i = 0 |
|
1356 | i = 0 | |
1358 | while True: |
|
1357 | while True: | |
1359 | if name not in self.fullseries: |
|
1358 | if name not in self.fullseries: | |
1360 | try: |
|
1359 | try: | |
1361 | self.checkpatchname(name) |
|
1360 | self.checkpatchname(name) | |
1362 | break |
|
1361 | break | |
1363 | except error.Abort: |
|
1362 | except error.Abort: | |
1364 | pass |
|
1363 | pass | |
1365 | i += 1 |
|
1364 | i += 1 | |
1366 | name = b'%s__%d' % (namebase, i) |
|
1365 | name = b'%s__%d' % (namebase, i) | |
1367 | return name |
|
1366 | return name | |
1368 |
|
1367 | |||
1369 | def checkkeepchanges(self, keepchanges, force): |
|
1368 | def checkkeepchanges(self, keepchanges, force): | |
1370 | if force and keepchanges: |
|
1369 | if force and keepchanges: | |
1371 | raise error.Abort(_(b'cannot use both --force and --keep-changes')) |
|
1370 | raise error.Abort(_(b'cannot use both --force and --keep-changes')) | |
1372 |
|
1371 | |||
1373 | def new(self, repo, patchfn, *pats, **opts): |
|
1372 | def new(self, repo, patchfn, *pats, **opts): | |
1374 | """options: |
|
1373 | """options: | |
1375 | msg: a string or a no-argument function returning a string |
|
1374 | msg: a string or a no-argument function returning a string | |
1376 | """ |
|
1375 | """ | |
1377 | opts = pycompat.byteskwargs(opts) |
|
1376 | opts = pycompat.byteskwargs(opts) | |
1378 | msg = opts.get(b'msg') |
|
1377 | msg = opts.get(b'msg') | |
1379 | edit = opts.get(b'edit') |
|
1378 | edit = opts.get(b'edit') | |
1380 | editform = opts.get(b'editform', b'mq.qnew') |
|
1379 | editform = opts.get(b'editform', b'mq.qnew') | |
1381 | user = opts.get(b'user') |
|
1380 | user = opts.get(b'user') | |
1382 | date = opts.get(b'date') |
|
1381 | date = opts.get(b'date') | |
1383 | if date: |
|
1382 | if date: | |
1384 | date = dateutil.parsedate(date) |
|
1383 | date = dateutil.parsedate(date) | |
1385 | diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True) |
|
1384 | diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True) | |
1386 | if opts.get(b'checkname', True): |
|
1385 | if opts.get(b'checkname', True): | |
1387 | self.checkpatchname(patchfn) |
|
1386 | self.checkpatchname(patchfn) | |
1388 | inclsubs = checksubstate(repo) |
|
1387 | inclsubs = checksubstate(repo) | |
1389 | if inclsubs: |
|
1388 | if inclsubs: | |
1390 | substatestate = repo.dirstate[b'.hgsubstate'] |
|
1389 | substatestate = repo.dirstate[b'.hgsubstate'] | |
1391 | if opts.get(b'include') or opts.get(b'exclude') or pats: |
|
1390 | if opts.get(b'include') or opts.get(b'exclude') or pats: | |
1392 | # detect missing files in pats |
|
1391 | # detect missing files in pats | |
1393 | def badfn(f, msg): |
|
1392 | def badfn(f, msg): | |
1394 | if f != b'.hgsubstate': # .hgsubstate is auto-created |
|
1393 | if f != b'.hgsubstate': # .hgsubstate is auto-created | |
1395 | raise error.Abort(b'%s: %s' % (f, msg)) |
|
1394 | raise error.Abort(b'%s: %s' % (f, msg)) | |
1396 |
|
1395 | |||
1397 | match = scmutil.match(repo[None], pats, opts, badfn=badfn) |
|
1396 | match = scmutil.match(repo[None], pats, opts, badfn=badfn) | |
1398 | changes = repo.status(match=match) |
|
1397 | changes = repo.status(match=match) | |
1399 | else: |
|
1398 | else: | |
1400 | changes = self.checklocalchanges(repo, force=True) |
|
1399 | changes = self.checklocalchanges(repo, force=True) | |
1401 | commitfiles = list(inclsubs) |
|
1400 | commitfiles = list(inclsubs) | |
1402 | commitfiles.extend(changes.modified) |
|
1401 | commitfiles.extend(changes.modified) | |
1403 | commitfiles.extend(changes.added) |
|
1402 | commitfiles.extend(changes.added) | |
1404 | commitfiles.extend(changes.removed) |
|
1403 | commitfiles.extend(changes.removed) | |
1405 | match = scmutil.matchfiles(repo, commitfiles) |
|
1404 | match = scmutil.matchfiles(repo, commitfiles) | |
1406 | if len(repo[None].parents()) > 1: |
|
1405 | if len(repo[None].parents()) > 1: | |
1407 | raise error.Abort(_(b'cannot manage merge changesets')) |
|
1406 | raise error.Abort(_(b'cannot manage merge changesets')) | |
1408 | self.checktoppatch(repo) |
|
1407 | self.checktoppatch(repo) | |
1409 | insert = self.fullseriesend() |
|
1408 | insert = self.fullseriesend() | |
1410 | with repo.wlock(): |
|
1409 | with repo.wlock(): | |
1411 | try: |
|
1410 | try: | |
1412 | # if patch file write fails, abort early |
|
1411 | # if patch file write fails, abort early | |
1413 | p = self.opener(patchfn, b"w") |
|
1412 | p = self.opener(patchfn, b"w") | |
1414 | except IOError as e: |
|
1413 | except IOError as e: | |
1415 | raise error.Abort( |
|
1414 | raise error.Abort( | |
1416 | _(b'cannot write patch "%s": %s') |
|
1415 | _(b'cannot write patch "%s": %s') | |
1417 | % (patchfn, encoding.strtolocal(e.strerror)) |
|
1416 | % (patchfn, encoding.strtolocal(e.strerror)) | |
1418 | ) |
|
1417 | ) | |
1419 | try: |
|
1418 | try: | |
1420 | defaultmsg = b"[mq]: %s" % patchfn |
|
1419 | defaultmsg = b"[mq]: %s" % patchfn | |
1421 | editor = cmdutil.getcommiteditor(editform=editform) |
|
1420 | editor = cmdutil.getcommiteditor(editform=editform) | |
1422 | if edit: |
|
1421 | if edit: | |
1423 |
|
1422 | |||
1424 | def finishdesc(desc): |
|
1423 | def finishdesc(desc): | |
1425 | if desc.rstrip(): |
|
1424 | if desc.rstrip(): | |
1426 | return desc |
|
1425 | return desc | |
1427 | else: |
|
1426 | else: | |
1428 | return defaultmsg |
|
1427 | return defaultmsg | |
1429 |
|
1428 | |||
1430 | # i18n: this message is shown in editor with "HG: " prefix |
|
1429 | # i18n: this message is shown in editor with "HG: " prefix | |
1431 | extramsg = _(b'Leave message empty to use default message.') |
|
1430 | extramsg = _(b'Leave message empty to use default message.') | |
1432 | editor = cmdutil.getcommiteditor( |
|
1431 | editor = cmdutil.getcommiteditor( | |
1433 | finishdesc=finishdesc, |
|
1432 | finishdesc=finishdesc, | |
1434 | extramsg=extramsg, |
|
1433 | extramsg=extramsg, | |
1435 | editform=editform, |
|
1434 | editform=editform, | |
1436 | ) |
|
1435 | ) | |
1437 | commitmsg = msg |
|
1436 | commitmsg = msg | |
1438 | else: |
|
1437 | else: | |
1439 | commitmsg = msg or defaultmsg |
|
1438 | commitmsg = msg or defaultmsg | |
1440 |
|
1439 | |||
1441 | n = newcommit( |
|
1440 | n = newcommit( | |
1442 | repo, |
|
1441 | repo, | |
1443 | None, |
|
1442 | None, | |
1444 | commitmsg, |
|
1443 | commitmsg, | |
1445 | user, |
|
1444 | user, | |
1446 | date, |
|
1445 | date, | |
1447 | match=match, |
|
1446 | match=match, | |
1448 | force=True, |
|
1447 | force=True, | |
1449 | editor=editor, |
|
1448 | editor=editor, | |
1450 | ) |
|
1449 | ) | |
1451 | if n is None: |
|
1450 | if n is None: | |
1452 | raise error.Abort(_(b"repo commit failed")) |
|
1451 | raise error.Abort(_(b"repo commit failed")) | |
1453 | try: |
|
1452 | try: | |
1454 | self.fullseries[insert:insert] = [patchfn] |
|
1453 | self.fullseries[insert:insert] = [patchfn] | |
1455 | self.applied.append(statusentry(n, patchfn)) |
|
1454 | self.applied.append(statusentry(n, patchfn)) | |
1456 | self.parseseries() |
|
1455 | self.parseseries() | |
1457 | self.seriesdirty = True |
|
1456 | self.seriesdirty = True | |
1458 | self.applieddirty = True |
|
1457 | self.applieddirty = True | |
1459 | nctx = repo[n] |
|
1458 | nctx = repo[n] | |
1460 | ph = patchheader(self.join(patchfn), self.plainmode) |
|
1459 | ph = patchheader(self.join(patchfn), self.plainmode) | |
1461 | if user: |
|
1460 | if user: | |
1462 | ph.setuser(user) |
|
1461 | ph.setuser(user) | |
1463 | if date: |
|
1462 | if date: | |
1464 | ph.setdate(b'%d %d' % date) |
|
1463 | ph.setdate(b'%d %d' % date) | |
1465 | ph.setparent(hex(nctx.p1().node())) |
|
1464 | ph.setparent(hex(nctx.p1().node())) | |
1466 | msg = nctx.description().strip() |
|
1465 | msg = nctx.description().strip() | |
1467 | if msg == defaultmsg.strip(): |
|
1466 | if msg == defaultmsg.strip(): | |
1468 | msg = b'' |
|
1467 | msg = b'' | |
1469 | ph.setmessage(msg) |
|
1468 | ph.setmessage(msg) | |
1470 | p.write(bytes(ph)) |
|
1469 | p.write(bytes(ph)) | |
1471 | if commitfiles: |
|
1470 | if commitfiles: | |
1472 | parent = self.qparents(repo, n) |
|
1471 | parent = self.qparents(repo, n) | |
1473 | if inclsubs: |
|
1472 | if inclsubs: | |
1474 | self.putsubstate2changes(substatestate, changes) |
|
1473 | self.putsubstate2changes(substatestate, changes) | |
1475 | chunks = patchmod.diff( |
|
1474 | chunks = patchmod.diff( | |
1476 | repo, |
|
1475 | repo, | |
1477 | node1=parent, |
|
1476 | node1=parent, | |
1478 | node2=n, |
|
1477 | node2=n, | |
1479 | changes=changes, |
|
1478 | changes=changes, | |
1480 | opts=diffopts, |
|
1479 | opts=diffopts, | |
1481 | ) |
|
1480 | ) | |
1482 | for chunk in chunks: |
|
1481 | for chunk in chunks: | |
1483 | p.write(chunk) |
|
1482 | p.write(chunk) | |
1484 | p.close() |
|
1483 | p.close() | |
1485 | r = self.qrepo() |
|
1484 | r = self.qrepo() | |
1486 | if r: |
|
1485 | if r: | |
1487 | r[None].add([patchfn]) |
|
1486 | r[None].add([patchfn]) | |
1488 | except: # re-raises |
|
1487 | except: # re-raises | |
1489 | repo.rollback() |
|
1488 | repo.rollback() | |
1490 | raise |
|
1489 | raise | |
1491 | except Exception: |
|
1490 | except Exception: | |
1492 | patchpath = self.join(patchfn) |
|
1491 | patchpath = self.join(patchfn) | |
1493 | try: |
|
1492 | try: | |
1494 | os.unlink(patchpath) |
|
1493 | os.unlink(patchpath) | |
1495 | except OSError: |
|
1494 | except OSError: | |
1496 | self.ui.warn(_(b'error unlinking %s\n') % patchpath) |
|
1495 | self.ui.warn(_(b'error unlinking %s\n') % patchpath) | |
1497 | raise |
|
1496 | raise | |
1498 | self.removeundo(repo) |
|
1497 | self.removeundo(repo) | |
1499 |
|
1498 | |||
1500 | def isapplied(self, patch): |
|
1499 | def isapplied(self, patch): | |
1501 | """returns (index, rev, patch)""" |
|
1500 | """returns (index, rev, patch)""" | |
1502 | for i, a in enumerate(self.applied): |
|
1501 | for i, a in enumerate(self.applied): | |
1503 | if a.name == patch: |
|
1502 | if a.name == patch: | |
1504 | return (i, a.node, a.name) |
|
1503 | return (i, a.node, a.name) | |
1505 | return None |
|
1504 | return None | |
1506 |
|
1505 | |||
1507 | # if the exact patch name does not exist, we try a few |
|
1506 | # if the exact patch name does not exist, we try a few | |
1508 | # variations. If strict is passed, we try only #1 |
|
1507 | # variations. If strict is passed, we try only #1 | |
1509 | # |
|
1508 | # | |
1510 | # 1) a number (as string) to indicate an offset in the series file |
|
1509 | # 1) a number (as string) to indicate an offset in the series file | |
1511 | # 2) a unique substring of the patch name was given |
|
1510 | # 2) a unique substring of the patch name was given | |
1512 | # 3) patchname[-+]num to indicate an offset in the series file |
|
1511 | # 3) patchname[-+]num to indicate an offset in the series file | |
1513 | def lookup(self, patch, strict=False): |
|
1512 | def lookup(self, patch, strict=False): | |
1514 | def partialname(s): |
|
1513 | def partialname(s): | |
1515 | if s in self.series: |
|
1514 | if s in self.series: | |
1516 | return s |
|
1515 | return s | |
1517 | matches = [x for x in self.series if s in x] |
|
1516 | matches = [x for x in self.series if s in x] | |
1518 | if len(matches) > 1: |
|
1517 | if len(matches) > 1: | |
1519 | self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s) |
|
1518 | self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s) | |
1520 | for m in matches: |
|
1519 | for m in matches: | |
1521 | self.ui.warn(b' %s\n' % m) |
|
1520 | self.ui.warn(b' %s\n' % m) | |
1522 | return None |
|
1521 | return None | |
1523 | if matches: |
|
1522 | if matches: | |
1524 | return matches[0] |
|
1523 | return matches[0] | |
1525 | if self.series and self.applied: |
|
1524 | if self.series and self.applied: | |
1526 | if s == b'qtip': |
|
1525 | if s == b'qtip': | |
1527 | return self.series[self.seriesend(True) - 1] |
|
1526 | return self.series[self.seriesend(True) - 1] | |
1528 | if s == b'qbase': |
|
1527 | if s == b'qbase': | |
1529 | return self.series[0] |
|
1528 | return self.series[0] | |
1530 | return None |
|
1529 | return None | |
1531 |
|
1530 | |||
1532 | if patch in self.series: |
|
1531 | if patch in self.series: | |
1533 | return patch |
|
1532 | return patch | |
1534 |
|
1533 | |||
1535 | if not os.path.isfile(self.join(patch)): |
|
1534 | if not os.path.isfile(self.join(patch)): | |
1536 | try: |
|
1535 | try: | |
1537 | sno = int(patch) |
|
1536 | sno = int(patch) | |
1538 | except (ValueError, OverflowError): |
|
1537 | except (ValueError, OverflowError): | |
1539 | pass |
|
1538 | pass | |
1540 | else: |
|
1539 | else: | |
1541 | if -len(self.series) <= sno < len(self.series): |
|
1540 | if -len(self.series) <= sno < len(self.series): | |
1542 | return self.series[sno] |
|
1541 | return self.series[sno] | |
1543 |
|
1542 | |||
1544 | if not strict: |
|
1543 | if not strict: | |
1545 | res = partialname(patch) |
|
1544 | res = partialname(patch) | |
1546 | if res: |
|
1545 | if res: | |
1547 | return res |
|
1546 | return res | |
1548 | minus = patch.rfind(b'-') |
|
1547 | minus = patch.rfind(b'-') | |
1549 | if minus >= 0: |
|
1548 | if minus >= 0: | |
1550 | res = partialname(patch[:minus]) |
|
1549 | res = partialname(patch[:minus]) | |
1551 | if res: |
|
1550 | if res: | |
1552 | i = self.series.index(res) |
|
1551 | i = self.series.index(res) | |
1553 | try: |
|
1552 | try: | |
1554 | off = int(patch[minus + 1 :] or 1) |
|
1553 | off = int(patch[minus + 1 :] or 1) | |
1555 | except (ValueError, OverflowError): |
|
1554 | except (ValueError, OverflowError): | |
1556 | pass |
|
1555 | pass | |
1557 | else: |
|
1556 | else: | |
1558 | if i - off >= 0: |
|
1557 | if i - off >= 0: | |
1559 | return self.series[i - off] |
|
1558 | return self.series[i - off] | |
1560 | plus = patch.rfind(b'+') |
|
1559 | plus = patch.rfind(b'+') | |
1561 | if plus >= 0: |
|
1560 | if plus >= 0: | |
1562 | res = partialname(patch[:plus]) |
|
1561 | res = partialname(patch[:plus]) | |
1563 | if res: |
|
1562 | if res: | |
1564 | i = self.series.index(res) |
|
1563 | i = self.series.index(res) | |
1565 | try: |
|
1564 | try: | |
1566 | off = int(patch[plus + 1 :] or 1) |
|
1565 | off = int(patch[plus + 1 :] or 1) | |
1567 | except (ValueError, OverflowError): |
|
1566 | except (ValueError, OverflowError): | |
1568 | pass |
|
1567 | pass | |
1569 | else: |
|
1568 | else: | |
1570 | if i + off < len(self.series): |
|
1569 | if i + off < len(self.series): | |
1571 | return self.series[i + off] |
|
1570 | return self.series[i + off] | |
1572 | raise error.Abort(_(b"patch %s not in series") % patch) |
|
1571 | raise error.Abort(_(b"patch %s not in series") % patch) | |
1573 |
|
1572 | |||
1574 | def push( |
|
1573 | def push( | |
1575 | self, |
|
1574 | self, | |
1576 | repo, |
|
1575 | repo, | |
1577 | patch=None, |
|
1576 | patch=None, | |
1578 | force=False, |
|
1577 | force=False, | |
1579 | list=False, |
|
1578 | list=False, | |
1580 | mergeq=None, |
|
1579 | mergeq=None, | |
1581 | all=False, |
|
1580 | all=False, | |
1582 | move=False, |
|
1581 | move=False, | |
1583 | exact=False, |
|
1582 | exact=False, | |
1584 | nobackup=False, |
|
1583 | nobackup=False, | |
1585 | keepchanges=False, |
|
1584 | keepchanges=False, | |
1586 | ): |
|
1585 | ): | |
1587 | self.checkkeepchanges(keepchanges, force) |
|
1586 | self.checkkeepchanges(keepchanges, force) | |
1588 | diffopts = self.diffopts() |
|
1587 | diffopts = self.diffopts() | |
1589 | with repo.wlock(): |
|
1588 | with repo.wlock(): | |
1590 | heads = [] |
|
1589 | heads = [] | |
1591 | for hs in repo.branchmap().iterheads(): |
|
1590 | for hs in repo.branchmap().iterheads(): | |
1592 | heads.extend(hs) |
|
1591 | heads.extend(hs) | |
1593 | if not heads: |
|
1592 | if not heads: | |
1594 | heads = [nullid] |
|
1593 | heads = [repo.nullid] | |
1595 | if repo.dirstate.p1() not in heads and not exact: |
|
1594 | if repo.dirstate.p1() not in heads and not exact: | |
1596 | self.ui.status(_(b"(working directory not at a head)\n")) |
|
1595 | self.ui.status(_(b"(working directory not at a head)\n")) | |
1597 |
|
1596 | |||
1598 | if not self.series: |
|
1597 | if not self.series: | |
1599 | self.ui.warn(_(b'no patches in series\n')) |
|
1598 | self.ui.warn(_(b'no patches in series\n')) | |
1600 | return 0 |
|
1599 | return 0 | |
1601 |
|
1600 | |||
1602 | # Suppose our series file is: A B C and the current 'top' |
|
1601 | # Suppose our series file is: A B C and the current 'top' | |
1603 | # patch is B. qpush C should be performed (moving forward) |
|
1602 | # patch is B. qpush C should be performed (moving forward) | |
1604 | # qpush B is a NOP (no change) qpush A is an error (can't |
|
1603 | # qpush B is a NOP (no change) qpush A is an error (can't | |
1605 | # go backwards with qpush) |
|
1604 | # go backwards with qpush) | |
1606 | if patch: |
|
1605 | if patch: | |
1607 | patch = self.lookup(patch) |
|
1606 | patch = self.lookup(patch) | |
1608 | info = self.isapplied(patch) |
|
1607 | info = self.isapplied(patch) | |
1609 | if info and info[0] >= len(self.applied) - 1: |
|
1608 | if info and info[0] >= len(self.applied) - 1: | |
1610 | self.ui.warn( |
|
1609 | self.ui.warn( | |
1611 | _(b'qpush: %s is already at the top\n') % patch |
|
1610 | _(b'qpush: %s is already at the top\n') % patch | |
1612 | ) |
|
1611 | ) | |
1613 | return 0 |
|
1612 | return 0 | |
1614 |
|
1613 | |||
1615 | pushable, reason = self.pushable(patch) |
|
1614 | pushable, reason = self.pushable(patch) | |
1616 | if pushable: |
|
1615 | if pushable: | |
1617 | if self.series.index(patch) < self.seriesend(): |
|
1616 | if self.series.index(patch) < self.seriesend(): | |
1618 | raise error.Abort( |
|
1617 | raise error.Abort( | |
1619 | _(b"cannot push to a previous patch: %s") % patch |
|
1618 | _(b"cannot push to a previous patch: %s") % patch | |
1620 | ) |
|
1619 | ) | |
1621 | else: |
|
1620 | else: | |
1622 | if reason: |
|
1621 | if reason: | |
1623 | reason = _(b'guarded by %s') % reason |
|
1622 | reason = _(b'guarded by %s') % reason | |
1624 | else: |
|
1623 | else: | |
1625 | reason = _(b'no matching guards') |
|
1624 | reason = _(b'no matching guards') | |
1626 | self.ui.warn( |
|
1625 | self.ui.warn( | |
1627 | _(b"cannot push '%s' - %s\n") % (patch, reason) |
|
1626 | _(b"cannot push '%s' - %s\n") % (patch, reason) | |
1628 | ) |
|
1627 | ) | |
1629 | return 1 |
|
1628 | return 1 | |
1630 | elif all: |
|
1629 | elif all: | |
1631 | patch = self.series[-1] |
|
1630 | patch = self.series[-1] | |
1632 | if self.isapplied(patch): |
|
1631 | if self.isapplied(patch): | |
1633 | self.ui.warn(_(b'all patches are currently applied\n')) |
|
1632 | self.ui.warn(_(b'all patches are currently applied\n')) | |
1634 | return 0 |
|
1633 | return 0 | |
1635 |
|
1634 | |||
1636 | # Following the above example, starting at 'top' of B: |
|
1635 | # Following the above example, starting at 'top' of B: | |
1637 | # qpush should be performed (pushes C), but a subsequent |
|
1636 | # qpush should be performed (pushes C), but a subsequent | |
1638 | # qpush without an argument is an error (nothing to |
|
1637 | # qpush without an argument is an error (nothing to | |
1639 | # apply). This allows a loop of "...while hg qpush..." to |
|
1638 | # apply). This allows a loop of "...while hg qpush..." to | |
1640 | # work as it detects an error when done |
|
1639 | # work as it detects an error when done | |
1641 | start = self.seriesend() |
|
1640 | start = self.seriesend() | |
1642 | if start == len(self.series): |
|
1641 | if start == len(self.series): | |
1643 | self.ui.warn(_(b'patch series already fully applied\n')) |
|
1642 | self.ui.warn(_(b'patch series already fully applied\n')) | |
1644 | return 1 |
|
1643 | return 1 | |
1645 | if not force and not keepchanges: |
|
1644 | if not force and not keepchanges: | |
1646 | self.checklocalchanges(repo, refresh=self.applied) |
|
1645 | self.checklocalchanges(repo, refresh=self.applied) | |
1647 |
|
1646 | |||
1648 | if exact: |
|
1647 | if exact: | |
1649 | if keepchanges: |
|
1648 | if keepchanges: | |
1650 | raise error.Abort( |
|
1649 | raise error.Abort( | |
1651 | _(b"cannot use --exact and --keep-changes together") |
|
1650 | _(b"cannot use --exact and --keep-changes together") | |
1652 | ) |
|
1651 | ) | |
1653 | if move: |
|
1652 | if move: | |
1654 | raise error.Abort( |
|
1653 | raise error.Abort( | |
1655 | _(b'cannot use --exact and --move together') |
|
1654 | _(b'cannot use --exact and --move together') | |
1656 | ) |
|
1655 | ) | |
1657 | if self.applied: |
|
1656 | if self.applied: | |
1658 | raise error.Abort( |
|
1657 | raise error.Abort( | |
1659 | _(b'cannot push --exact with applied patches') |
|
1658 | _(b'cannot push --exact with applied patches') | |
1660 | ) |
|
1659 | ) | |
1661 | root = self.series[start] |
|
1660 | root = self.series[start] | |
1662 | target = patchheader(self.join(root), self.plainmode).parent |
|
1661 | target = patchheader(self.join(root), self.plainmode).parent | |
1663 | if not target: |
|
1662 | if not target: | |
1664 | raise error.Abort( |
|
1663 | raise error.Abort( | |
1665 | _(b"%s does not have a parent recorded") % root |
|
1664 | _(b"%s does not have a parent recorded") % root | |
1666 | ) |
|
1665 | ) | |
1667 | if not repo[target] == repo[b'.']: |
|
1666 | if not repo[target] == repo[b'.']: | |
1668 | hg.update(repo, target) |
|
1667 | hg.update(repo, target) | |
1669 |
|
1668 | |||
1670 | if move: |
|
1669 | if move: | |
1671 | if not patch: |
|
1670 | if not patch: | |
1672 | raise error.Abort(_(b"please specify the patch to move")) |
|
1671 | raise error.Abort(_(b"please specify the patch to move")) | |
1673 | for fullstart, rpn in enumerate(self.fullseries): |
|
1672 | for fullstart, rpn in enumerate(self.fullseries): | |
1674 | # strip markers for patch guards |
|
1673 | # strip markers for patch guards | |
1675 | if self.guard_re.split(rpn, 1)[0] == self.series[start]: |
|
1674 | if self.guard_re.split(rpn, 1)[0] == self.series[start]: | |
1676 | break |
|
1675 | break | |
1677 | for i, rpn in enumerate(self.fullseries[fullstart:]): |
|
1676 | for i, rpn in enumerate(self.fullseries[fullstart:]): | |
1678 | # strip markers for patch guards |
|
1677 | # strip markers for patch guards | |
1679 | if self.guard_re.split(rpn, 1)[0] == patch: |
|
1678 | if self.guard_re.split(rpn, 1)[0] == patch: | |
1680 | break |
|
1679 | break | |
1681 | index = fullstart + i |
|
1680 | index = fullstart + i | |
1682 | assert index < len(self.fullseries) |
|
1681 | assert index < len(self.fullseries) | |
1683 | fullpatch = self.fullseries[index] |
|
1682 | fullpatch = self.fullseries[index] | |
1684 | del self.fullseries[index] |
|
1683 | del self.fullseries[index] | |
1685 | self.fullseries.insert(fullstart, fullpatch) |
|
1684 | self.fullseries.insert(fullstart, fullpatch) | |
1686 | self.parseseries() |
|
1685 | self.parseseries() | |
1687 | self.seriesdirty = True |
|
1686 | self.seriesdirty = True | |
1688 |
|
1687 | |||
1689 | self.applieddirty = True |
|
1688 | self.applieddirty = True | |
1690 | if start > 0: |
|
1689 | if start > 0: | |
1691 | self.checktoppatch(repo) |
|
1690 | self.checktoppatch(repo) | |
1692 | if not patch: |
|
1691 | if not patch: | |
1693 | patch = self.series[start] |
|
1692 | patch = self.series[start] | |
1694 | end = start + 1 |
|
1693 | end = start + 1 | |
1695 | else: |
|
1694 | else: | |
1696 | end = self.series.index(patch, start) + 1 |
|
1695 | end = self.series.index(patch, start) + 1 | |
1697 |
|
1696 | |||
1698 | tobackup = set() |
|
1697 | tobackup = set() | |
1699 | if (not nobackup and force) or keepchanges: |
|
1698 | if (not nobackup and force) or keepchanges: | |
1700 | status = self.checklocalchanges(repo, force=True) |
|
1699 | status = self.checklocalchanges(repo, force=True) | |
1701 | if keepchanges: |
|
1700 | if keepchanges: | |
1702 | tobackup.update( |
|
1701 | tobackup.update( | |
1703 | status.modified |
|
1702 | status.modified | |
1704 | + status.added |
|
1703 | + status.added | |
1705 | + status.removed |
|
1704 | + status.removed | |
1706 | + status.deleted |
|
1705 | + status.deleted | |
1707 | ) |
|
1706 | ) | |
1708 | else: |
|
1707 | else: | |
1709 | tobackup.update(status.modified + status.added) |
|
1708 | tobackup.update(status.modified + status.added) | |
1710 |
|
1709 | |||
1711 | s = self.series[start:end] |
|
1710 | s = self.series[start:end] | |
1712 | all_files = set() |
|
1711 | all_files = set() | |
1713 | try: |
|
1712 | try: | |
1714 | if mergeq: |
|
1713 | if mergeq: | |
1715 | ret = self.mergepatch(repo, mergeq, s, diffopts) |
|
1714 | ret = self.mergepatch(repo, mergeq, s, diffopts) | |
1716 | else: |
|
1715 | else: | |
1717 | ret = self.apply( |
|
1716 | ret = self.apply( | |
1718 | repo, |
|
1717 | repo, | |
1719 | s, |
|
1718 | s, | |
1720 | list, |
|
1719 | list, | |
1721 | all_files=all_files, |
|
1720 | all_files=all_files, | |
1722 | tobackup=tobackup, |
|
1721 | tobackup=tobackup, | |
1723 | keepchanges=keepchanges, |
|
1722 | keepchanges=keepchanges, | |
1724 | ) |
|
1723 | ) | |
1725 | except AbortNoCleanup: |
|
1724 | except AbortNoCleanup: | |
1726 | raise |
|
1725 | raise | |
1727 | except: # re-raises |
|
1726 | except: # re-raises | |
1728 | self.ui.warn(_(b'cleaning up working directory...\n')) |
|
1727 | self.ui.warn(_(b'cleaning up working directory...\n')) | |
1729 | cmdutil.revert( |
|
1728 | cmdutil.revert( | |
1730 | self.ui, |
|
1729 | self.ui, | |
1731 | repo, |
|
1730 | repo, | |
1732 | repo[b'.'], |
|
1731 | repo[b'.'], | |
1733 | no_backup=True, |
|
1732 | no_backup=True, | |
1734 | ) |
|
1733 | ) | |
1735 | # only remove unknown files that we know we touched or |
|
1734 | # only remove unknown files that we know we touched or | |
1736 | # created while patching |
|
1735 | # created while patching | |
1737 | for f in all_files: |
|
1736 | for f in all_files: | |
1738 | if f not in repo.dirstate: |
|
1737 | if f not in repo.dirstate: | |
1739 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1738 | repo.wvfs.unlinkpath(f, ignoremissing=True) | |
1740 | self.ui.warn(_(b'done\n')) |
|
1739 | self.ui.warn(_(b'done\n')) | |
1741 | raise |
|
1740 | raise | |
1742 |
|
1741 | |||
1743 | if not self.applied: |
|
1742 | if not self.applied: | |
1744 | return ret[0] |
|
1743 | return ret[0] | |
1745 | top = self.applied[-1].name |
|
1744 | top = self.applied[-1].name | |
1746 | if ret[0] and ret[0] > 1: |
|
1745 | if ret[0] and ret[0] > 1: | |
1747 | msg = _(b"errors during apply, please fix and qrefresh %s\n") |
|
1746 | msg = _(b"errors during apply, please fix and qrefresh %s\n") | |
1748 | self.ui.write(msg % top) |
|
1747 | self.ui.write(msg % top) | |
1749 | else: |
|
1748 | else: | |
1750 | self.ui.write(_(b"now at: %s\n") % top) |
|
1749 | self.ui.write(_(b"now at: %s\n") % top) | |
1751 | return ret[0] |
|
1750 | return ret[0] | |
1752 |
|
1751 | |||
1753 | def pop( |
|
1752 | def pop( | |
1754 | self, |
|
1753 | self, | |
1755 | repo, |
|
1754 | repo, | |
1756 | patch=None, |
|
1755 | patch=None, | |
1757 | force=False, |
|
1756 | force=False, | |
1758 | update=True, |
|
1757 | update=True, | |
1759 | all=False, |
|
1758 | all=False, | |
1760 | nobackup=False, |
|
1759 | nobackup=False, | |
1761 | keepchanges=False, |
|
1760 | keepchanges=False, | |
1762 | ): |
|
1761 | ): | |
1763 | self.checkkeepchanges(keepchanges, force) |
|
1762 | self.checkkeepchanges(keepchanges, force) | |
1764 | with repo.wlock(): |
|
1763 | with repo.wlock(): | |
1765 | if patch: |
|
1764 | if patch: | |
1766 | # index, rev, patch |
|
1765 | # index, rev, patch | |
1767 | info = self.isapplied(patch) |
|
1766 | info = self.isapplied(patch) | |
1768 | if not info: |
|
1767 | if not info: | |
1769 | patch = self.lookup(patch) |
|
1768 | patch = self.lookup(patch) | |
1770 | info = self.isapplied(patch) |
|
1769 | info = self.isapplied(patch) | |
1771 | if not info: |
|
1770 | if not info: | |
1772 | raise error.Abort(_(b"patch %s is not applied") % patch) |
|
1771 | raise error.Abort(_(b"patch %s is not applied") % patch) | |
1773 |
|
1772 | |||
1774 | if not self.applied: |
|
1773 | if not self.applied: | |
1775 | # Allow qpop -a to work repeatedly, |
|
1774 | # Allow qpop -a to work repeatedly, | |
1776 | # but not qpop without an argument |
|
1775 | # but not qpop without an argument | |
1777 | self.ui.warn(_(b"no patches applied\n")) |
|
1776 | self.ui.warn(_(b"no patches applied\n")) | |
1778 | return not all |
|
1777 | return not all | |
1779 |
|
1778 | |||
1780 | if all: |
|
1779 | if all: | |
1781 | start = 0 |
|
1780 | start = 0 | |
1782 | elif patch: |
|
1781 | elif patch: | |
1783 | start = info[0] + 1 |
|
1782 | start = info[0] + 1 | |
1784 | else: |
|
1783 | else: | |
1785 | start = len(self.applied) - 1 |
|
1784 | start = len(self.applied) - 1 | |
1786 |
|
1785 | |||
1787 | if start >= len(self.applied): |
|
1786 | if start >= len(self.applied): | |
1788 | self.ui.warn(_(b"qpop: %s is already at the top\n") % patch) |
|
1787 | self.ui.warn(_(b"qpop: %s is already at the top\n") % patch) | |
1789 | return |
|
1788 | return | |
1790 |
|
1789 | |||
1791 | if not update: |
|
1790 | if not update: | |
1792 | parents = repo.dirstate.parents() |
|
1791 | parents = repo.dirstate.parents() | |
1793 | rr = [x.node for x in self.applied] |
|
1792 | rr = [x.node for x in self.applied] | |
1794 | for p in parents: |
|
1793 | for p in parents: | |
1795 | if p in rr: |
|
1794 | if p in rr: | |
1796 | self.ui.warn(_(b"qpop: forcing dirstate update\n")) |
|
1795 | self.ui.warn(_(b"qpop: forcing dirstate update\n")) | |
1797 | update = True |
|
1796 | update = True | |
1798 | else: |
|
1797 | else: | |
1799 | parents = [p.node() for p in repo[None].parents()] |
|
1798 | parents = [p.node() for p in repo[None].parents()] | |
1800 | update = any( |
|
1799 | update = any( | |
1801 | entry.node in parents for entry in self.applied[start:] |
|
1800 | entry.node in parents for entry in self.applied[start:] | |
1802 | ) |
|
1801 | ) | |
1803 |
|
1802 | |||
1804 | tobackup = set() |
|
1803 | tobackup = set() | |
1805 | if update: |
|
1804 | if update: | |
1806 | s = self.checklocalchanges(repo, force=force or keepchanges) |
|
1805 | s = self.checklocalchanges(repo, force=force or keepchanges) | |
1807 | if force: |
|
1806 | if force: | |
1808 | if not nobackup: |
|
1807 | if not nobackup: | |
1809 | tobackup.update(s.modified + s.added) |
|
1808 | tobackup.update(s.modified + s.added) | |
1810 | elif keepchanges: |
|
1809 | elif keepchanges: | |
1811 | tobackup.update( |
|
1810 | tobackup.update( | |
1812 | s.modified + s.added + s.removed + s.deleted |
|
1811 | s.modified + s.added + s.removed + s.deleted | |
1813 | ) |
|
1812 | ) | |
1814 |
|
1813 | |||
1815 | self.applieddirty = True |
|
1814 | self.applieddirty = True | |
1816 | end = len(self.applied) |
|
1815 | end = len(self.applied) | |
1817 | rev = self.applied[start].node |
|
1816 | rev = self.applied[start].node | |
1818 |
|
1817 | |||
1819 | try: |
|
1818 | try: | |
1820 | heads = repo.changelog.heads(rev) |
|
1819 | heads = repo.changelog.heads(rev) | |
1821 | except error.LookupError: |
|
1820 | except error.LookupError: | |
1822 | node = short(rev) |
|
1821 | node = short(rev) | |
1823 | raise error.Abort(_(b'trying to pop unknown node %s') % node) |
|
1822 | raise error.Abort(_(b'trying to pop unknown node %s') % node) | |
1824 |
|
1823 | |||
1825 | if heads != [self.applied[-1].node]: |
|
1824 | if heads != [self.applied[-1].node]: | |
1826 | raise error.Abort( |
|
1825 | raise error.Abort( | |
1827 | _( |
|
1826 | _( | |
1828 | b"popping would remove a revision not " |
|
1827 | b"popping would remove a revision not " | |
1829 | b"managed by this patch queue" |
|
1828 | b"managed by this patch queue" | |
1830 | ) |
|
1829 | ) | |
1831 | ) |
|
1830 | ) | |
1832 | if not repo[self.applied[-1].node].mutable(): |
|
1831 | if not repo[self.applied[-1].node].mutable(): | |
1833 | raise error.Abort( |
|
1832 | raise error.Abort( | |
1834 | _(b"popping would remove a public revision"), |
|
1833 | _(b"popping would remove a public revision"), | |
1835 | hint=_(b"see 'hg help phases' for details"), |
|
1834 | hint=_(b"see 'hg help phases' for details"), | |
1836 | ) |
|
1835 | ) | |
1837 |
|
1836 | |||
1838 | # we know there are no local changes, so we can make a simplified |
|
1837 | # we know there are no local changes, so we can make a simplified | |
1839 | # form of hg.update. |
|
1838 | # form of hg.update. | |
1840 | if update: |
|
1839 | if update: | |
1841 | qp = self.qparents(repo, rev) |
|
1840 | qp = self.qparents(repo, rev) | |
1842 | ctx = repo[qp] |
|
1841 | ctx = repo[qp] | |
1843 | st = repo.status(qp, b'.') |
|
1842 | st = repo.status(qp, b'.') | |
1844 | m, a, r, d = st.modified, st.added, st.removed, st.deleted |
|
1843 | m, a, r, d = st.modified, st.added, st.removed, st.deleted | |
1845 | if d: |
|
1844 | if d: | |
1846 | raise error.Abort(_(b"deletions found between repo revs")) |
|
1845 | raise error.Abort(_(b"deletions found between repo revs")) | |
1847 |
|
1846 | |||
1848 | tobackup = set(a + m + r) & tobackup |
|
1847 | tobackup = set(a + m + r) & tobackup | |
1849 | if keepchanges and tobackup: |
|
1848 | if keepchanges and tobackup: | |
1850 | raise error.Abort(_(b"local changes found, qrefresh first")) |
|
1849 | raise error.Abort(_(b"local changes found, qrefresh first")) | |
1851 | self.backup(repo, tobackup) |
|
1850 | self.backup(repo, tobackup) | |
1852 | with repo.dirstate.parentchange(): |
|
1851 | with repo.dirstate.parentchange(): | |
1853 | for f in a: |
|
1852 | for f in a: | |
1854 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1853 | repo.wvfs.unlinkpath(f, ignoremissing=True) | |
1855 | repo.dirstate.drop(f) |
|
1854 | repo.dirstate.drop(f) | |
1856 | for f in m + r: |
|
1855 | for f in m + r: | |
1857 | fctx = ctx[f] |
|
1856 | fctx = ctx[f] | |
1858 | repo.wwrite(f, fctx.data(), fctx.flags()) |
|
1857 | repo.wwrite(f, fctx.data(), fctx.flags()) | |
1859 | repo.dirstate.normal(f) |
|
1858 | repo.dirstate.normal(f) | |
1860 | repo.setparents(qp, nullid) |
|
1859 | repo.setparents(qp, repo.nullid) | |
1861 | for patch in reversed(self.applied[start:end]): |
|
1860 | for patch in reversed(self.applied[start:end]): | |
1862 | self.ui.status(_(b"popping %s\n") % patch.name) |
|
1861 | self.ui.status(_(b"popping %s\n") % patch.name) | |
1863 | del self.applied[start:end] |
|
1862 | del self.applied[start:end] | |
1864 | strip(self.ui, repo, [rev], update=False, backup=False) |
|
1863 | strip(self.ui, repo, [rev], update=False, backup=False) | |
1865 | for s, state in repo[b'.'].substate.items(): |
|
1864 | for s, state in repo[b'.'].substate.items(): | |
1866 | repo[b'.'].sub(s).get(state) |
|
1865 | repo[b'.'].sub(s).get(state) | |
1867 | if self.applied: |
|
1866 | if self.applied: | |
1868 | self.ui.write(_(b"now at: %s\n") % self.applied[-1].name) |
|
1867 | self.ui.write(_(b"now at: %s\n") % self.applied[-1].name) | |
1869 | else: |
|
1868 | else: | |
1870 | self.ui.write(_(b"patch queue now empty\n")) |
|
1869 | self.ui.write(_(b"patch queue now empty\n")) | |
1871 |
|
1870 | |||
1872 | def diff(self, repo, pats, opts): |
|
1871 | def diff(self, repo, pats, opts): | |
1873 | top, patch = self.checktoppatch(repo) |
|
1872 | top, patch = self.checktoppatch(repo) | |
1874 | if not top: |
|
1873 | if not top: | |
1875 | self.ui.write(_(b"no patches applied\n")) |
|
1874 | self.ui.write(_(b"no patches applied\n")) | |
1876 | return |
|
1875 | return | |
1877 | qp = self.qparents(repo, top) |
|
1876 | qp = self.qparents(repo, top) | |
1878 | if opts.get(b'reverse'): |
|
1877 | if opts.get(b'reverse'): | |
1879 | node1, node2 = None, qp |
|
1878 | node1, node2 = None, qp | |
1880 | else: |
|
1879 | else: | |
1881 | node1, node2 = qp, None |
|
1880 | node1, node2 = qp, None | |
1882 | diffopts = self.diffopts(opts, patch) |
|
1881 | diffopts = self.diffopts(opts, patch) | |
1883 | self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts) |
|
1882 | self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts) | |
1884 |
|
1883 | |||
1885 | def refresh(self, repo, pats=None, **opts): |
|
1884 | def refresh(self, repo, pats=None, **opts): | |
1886 | opts = pycompat.byteskwargs(opts) |
|
1885 | opts = pycompat.byteskwargs(opts) | |
1887 | if not self.applied: |
|
1886 | if not self.applied: | |
1888 | self.ui.write(_(b"no patches applied\n")) |
|
1887 | self.ui.write(_(b"no patches applied\n")) | |
1889 | return 1 |
|
1888 | return 1 | |
1890 | msg = opts.get(b'msg', b'').rstrip() |
|
1889 | msg = opts.get(b'msg', b'').rstrip() | |
1891 | edit = opts.get(b'edit') |
|
1890 | edit = opts.get(b'edit') | |
1892 | editform = opts.get(b'editform', b'mq.qrefresh') |
|
1891 | editform = opts.get(b'editform', b'mq.qrefresh') | |
1893 | newuser = opts.get(b'user') |
|
1892 | newuser = opts.get(b'user') | |
1894 | newdate = opts.get(b'date') |
|
1893 | newdate = opts.get(b'date') | |
1895 | if newdate: |
|
1894 | if newdate: | |
1896 | newdate = b'%d %d' % dateutil.parsedate(newdate) |
|
1895 | newdate = b'%d %d' % dateutil.parsedate(newdate) | |
1897 | wlock = repo.wlock() |
|
1896 | wlock = repo.wlock() | |
1898 |
|
1897 | |||
1899 | try: |
|
1898 | try: | |
1900 | self.checktoppatch(repo) |
|
1899 | self.checktoppatch(repo) | |
1901 | (top, patchfn) = (self.applied[-1].node, self.applied[-1].name) |
|
1900 | (top, patchfn) = (self.applied[-1].node, self.applied[-1].name) | |
1902 | if repo.changelog.heads(top) != [top]: |
|
1901 | if repo.changelog.heads(top) != [top]: | |
1903 | raise error.Abort( |
|
1902 | raise error.Abort( | |
1904 | _(b"cannot qrefresh a revision with children") |
|
1903 | _(b"cannot qrefresh a revision with children") | |
1905 | ) |
|
1904 | ) | |
1906 | if not repo[top].mutable(): |
|
1905 | if not repo[top].mutable(): | |
1907 | raise error.Abort( |
|
1906 | raise error.Abort( | |
1908 | _(b"cannot qrefresh public revision"), |
|
1907 | _(b"cannot qrefresh public revision"), | |
1909 | hint=_(b"see 'hg help phases' for details"), |
|
1908 | hint=_(b"see 'hg help phases' for details"), | |
1910 | ) |
|
1909 | ) | |
1911 |
|
1910 | |||
1912 | cparents = repo.changelog.parents(top) |
|
1911 | cparents = repo.changelog.parents(top) | |
1913 | patchparent = self.qparents(repo, top) |
|
1912 | patchparent = self.qparents(repo, top) | |
1914 |
|
1913 | |||
1915 | inclsubs = checksubstate(repo, patchparent) |
|
1914 | inclsubs = checksubstate(repo, patchparent) | |
1916 | if inclsubs: |
|
1915 | if inclsubs: | |
1917 | substatestate = repo.dirstate[b'.hgsubstate'] |
|
1916 | substatestate = repo.dirstate[b'.hgsubstate'] | |
1918 |
|
1917 | |||
1919 | ph = patchheader(self.join(patchfn), self.plainmode) |
|
1918 | ph = patchheader(self.join(patchfn), self.plainmode) | |
1920 | diffopts = self.diffopts( |
|
1919 | diffopts = self.diffopts( | |
1921 | {b'git': opts.get(b'git')}, patchfn, plain=True |
|
1920 | {b'git': opts.get(b'git')}, patchfn, plain=True | |
1922 | ) |
|
1921 | ) | |
1923 | if newuser: |
|
1922 | if newuser: | |
1924 | ph.setuser(newuser) |
|
1923 | ph.setuser(newuser) | |
1925 | if newdate: |
|
1924 | if newdate: | |
1926 | ph.setdate(newdate) |
|
1925 | ph.setdate(newdate) | |
1927 | ph.setparent(hex(patchparent)) |
|
1926 | ph.setparent(hex(patchparent)) | |
1928 |
|
1927 | |||
1929 | # only commit new patch when write is complete |
|
1928 | # only commit new patch when write is complete | |
1930 | patchf = self.opener(patchfn, b'w', atomictemp=True) |
|
1929 | patchf = self.opener(patchfn, b'w', atomictemp=True) | |
1931 |
|
1930 | |||
1932 | # update the dirstate in place, strip off the qtip commit |
|
1931 | # update the dirstate in place, strip off the qtip commit | |
1933 | # and then commit. |
|
1932 | # and then commit. | |
1934 | # |
|
1933 | # | |
1935 | # this should really read: |
|
1934 | # this should really read: | |
1936 | # st = repo.status(top, patchparent) |
|
1935 | # st = repo.status(top, patchparent) | |
1937 | # but we do it backwards to take advantage of manifest/changelog |
|
1936 | # but we do it backwards to take advantage of manifest/changelog | |
1938 | # caching against the next repo.status call |
|
1937 | # caching against the next repo.status call | |
1939 | st = repo.status(patchparent, top) |
|
1938 | st = repo.status(patchparent, top) | |
1940 | mm, aa, dd = st.modified, st.added, st.removed |
|
1939 | mm, aa, dd = st.modified, st.added, st.removed | |
1941 | ctx = repo[top] |
|
1940 | ctx = repo[top] | |
1942 | aaa = aa[:] |
|
1941 | aaa = aa[:] | |
1943 | match1 = scmutil.match(repo[None], pats, opts) |
|
1942 | match1 = scmutil.match(repo[None], pats, opts) | |
1944 | # in short mode, we only diff the files included in the |
|
1943 | # in short mode, we only diff the files included in the | |
1945 | # patch already plus specified files |
|
1944 | # patch already plus specified files | |
1946 | if opts.get(b'short'): |
|
1945 | if opts.get(b'short'): | |
1947 | # if amending a patch, we start with existing |
|
1946 | # if amending a patch, we start with existing | |
1948 | # files plus specified files - unfiltered |
|
1947 | # files plus specified files - unfiltered | |
1949 | match = scmutil.matchfiles(repo, mm + aa + dd + match1.files()) |
|
1948 | match = scmutil.matchfiles(repo, mm + aa + dd + match1.files()) | |
1950 | # filter with include/exclude options |
|
1949 | # filter with include/exclude options | |
1951 | match1 = scmutil.match(repo[None], opts=opts) |
|
1950 | match1 = scmutil.match(repo[None], opts=opts) | |
1952 | else: |
|
1951 | else: | |
1953 | match = scmutil.matchall(repo) |
|
1952 | match = scmutil.matchall(repo) | |
1954 | stb = repo.status(match=match) |
|
1953 | stb = repo.status(match=match) | |
1955 | m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted |
|
1954 | m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted | |
1956 | mm = set(mm) |
|
1955 | mm = set(mm) | |
1957 | aa = set(aa) |
|
1956 | aa = set(aa) | |
1958 | dd = set(dd) |
|
1957 | dd = set(dd) | |
1959 |
|
1958 | |||
1960 | # we might end up with files that were added between |
|
1959 | # we might end up with files that were added between | |
1961 | # qtip and the dirstate parent, but then changed in the |
|
1960 | # qtip and the dirstate parent, but then changed in the | |
1962 | # local dirstate. in this case, we want them to only |
|
1961 | # local dirstate. in this case, we want them to only | |
1963 | # show up in the added section |
|
1962 | # show up in the added section | |
1964 | for x in m: |
|
1963 | for x in m: | |
1965 | if x not in aa: |
|
1964 | if x not in aa: | |
1966 | mm.add(x) |
|
1965 | mm.add(x) | |
1967 | # we might end up with files added by the local dirstate that |
|
1966 | # we might end up with files added by the local dirstate that | |
1968 | # were deleted by the patch. In this case, they should only |
|
1967 | # were deleted by the patch. In this case, they should only | |
1969 | # show up in the changed section. |
|
1968 | # show up in the changed section. | |
1970 | for x in a: |
|
1969 | for x in a: | |
1971 | if x in dd: |
|
1970 | if x in dd: | |
1972 | dd.remove(x) |
|
1971 | dd.remove(x) | |
1973 | mm.add(x) |
|
1972 | mm.add(x) | |
1974 | else: |
|
1973 | else: | |
1975 | aa.add(x) |
|
1974 | aa.add(x) | |
1976 | # make sure any files deleted in the local dirstate |
|
1975 | # make sure any files deleted in the local dirstate | |
1977 | # are not in the add or change column of the patch |
|
1976 | # are not in the add or change column of the patch | |
1978 | forget = [] |
|
1977 | forget = [] | |
1979 | for x in d + r: |
|
1978 | for x in d + r: | |
1980 | if x in aa: |
|
1979 | if x in aa: | |
1981 | aa.remove(x) |
|
1980 | aa.remove(x) | |
1982 | forget.append(x) |
|
1981 | forget.append(x) | |
1983 | continue |
|
1982 | continue | |
1984 | else: |
|
1983 | else: | |
1985 | mm.discard(x) |
|
1984 | mm.discard(x) | |
1986 | dd.add(x) |
|
1985 | dd.add(x) | |
1987 |
|
1986 | |||
1988 | m = list(mm) |
|
1987 | m = list(mm) | |
1989 | r = list(dd) |
|
1988 | r = list(dd) | |
1990 | a = list(aa) |
|
1989 | a = list(aa) | |
1991 |
|
1990 | |||
1992 | # create 'match' that includes the files to be recommitted. |
|
1991 | # create 'match' that includes the files to be recommitted. | |
1993 | # apply match1 via repo.status to ensure correct case handling. |
|
1992 | # apply match1 via repo.status to ensure correct case handling. | |
1994 | st = repo.status(patchparent, match=match1) |
|
1993 | st = repo.status(patchparent, match=match1) | |
1995 | cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted |
|
1994 | cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted | |
1996 | allmatches = set(cm + ca + cr + cd) |
|
1995 | allmatches = set(cm + ca + cr + cd) | |
1997 | refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)] |
|
1996 | refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)] | |
1998 |
|
1997 | |||
1999 | files = set(inclsubs) |
|
1998 | files = set(inclsubs) | |
2000 | for x in refreshchanges: |
|
1999 | for x in refreshchanges: | |
2001 | files.update(x) |
|
2000 | files.update(x) | |
2002 | match = scmutil.matchfiles(repo, files) |
|
2001 | match = scmutil.matchfiles(repo, files) | |
2003 |
|
2002 | |||
2004 | bmlist = repo[top].bookmarks() |
|
2003 | bmlist = repo[top].bookmarks() | |
2005 |
|
2004 | |||
2006 | dsguard = None |
|
2005 | dsguard = None | |
2007 | try: |
|
2006 | try: | |
2008 | dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh') |
|
2007 | dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh') | |
2009 | if diffopts.git or diffopts.upgrade: |
|
2008 | if diffopts.git or diffopts.upgrade: | |
2010 | copies = {} |
|
2009 | copies = {} | |
2011 | for dst in a: |
|
2010 | for dst in a: | |
2012 | src = repo.dirstate.copied(dst) |
|
2011 | src = repo.dirstate.copied(dst) | |
2013 | # during qfold, the source file for copies may |
|
2012 | # during qfold, the source file for copies may | |
2014 | # be removed. Treat this as a simple add. |
|
2013 | # be removed. Treat this as a simple add. | |
2015 | if src is not None and src in repo.dirstate: |
|
2014 | if src is not None and src in repo.dirstate: | |
2016 | copies.setdefault(src, []).append(dst) |
|
2015 | copies.setdefault(src, []).append(dst) | |
2017 | repo.dirstate.add(dst) |
|
2016 | repo.dirstate.add(dst) | |
2018 | # remember the copies between patchparent and qtip |
|
2017 | # remember the copies between patchparent and qtip | |
2019 | for dst in aaa: |
|
2018 | for dst in aaa: | |
2020 | src = ctx[dst].copysource() |
|
2019 | src = ctx[dst].copysource() | |
2021 | if src: |
|
2020 | if src: | |
2022 | copies.setdefault(src, []).extend( |
|
2021 | copies.setdefault(src, []).extend( | |
2023 | copies.get(dst, []) |
|
2022 | copies.get(dst, []) | |
2024 | ) |
|
2023 | ) | |
2025 | if dst in a: |
|
2024 | if dst in a: | |
2026 | copies[src].append(dst) |
|
2025 | copies[src].append(dst) | |
2027 | # we can't copy a file created by the patch itself |
|
2026 | # we can't copy a file created by the patch itself | |
2028 | if dst in copies: |
|
2027 | if dst in copies: | |
2029 | del copies[dst] |
|
2028 | del copies[dst] | |
2030 | for src, dsts in pycompat.iteritems(copies): |
|
2029 | for src, dsts in pycompat.iteritems(copies): | |
2031 | for dst in dsts: |
|
2030 | for dst in dsts: | |
2032 | repo.dirstate.copy(src, dst) |
|
2031 | repo.dirstate.copy(src, dst) | |
2033 | else: |
|
2032 | else: | |
2034 | for dst in a: |
|
2033 | for dst in a: | |
2035 | repo.dirstate.add(dst) |
|
2034 | repo.dirstate.add(dst) | |
2036 | # Drop useless copy information |
|
2035 | # Drop useless copy information | |
2037 | for f in list(repo.dirstate.copies()): |
|
2036 | for f in list(repo.dirstate.copies()): | |
2038 | repo.dirstate.copy(None, f) |
|
2037 | repo.dirstate.copy(None, f) | |
2039 | for f in r: |
|
2038 | for f in r: | |
2040 | repo.dirstate.remove(f) |
|
2039 | repo.dirstate.remove(f) | |
2041 | # if the patch excludes a modified file, mark that |
|
2040 | # if the patch excludes a modified file, mark that | |
2042 | # file with mtime=0 so status can see it. |
|
2041 | # file with mtime=0 so status can see it. | |
2043 | mm = [] |
|
2042 | mm = [] | |
2044 | for i in pycompat.xrange(len(m) - 1, -1, -1): |
|
2043 | for i in pycompat.xrange(len(m) - 1, -1, -1): | |
2045 | if not match1(m[i]): |
|
2044 | if not match1(m[i]): | |
2046 | mm.append(m[i]) |
|
2045 | mm.append(m[i]) | |
2047 | del m[i] |
|
2046 | del m[i] | |
2048 | for f in m: |
|
2047 | for f in m: | |
2049 | repo.dirstate.normal(f) |
|
2048 | repo.dirstate.normal(f) | |
2050 | for f in mm: |
|
2049 | for f in mm: | |
2051 | repo.dirstate.normallookup(f) |
|
2050 | repo.dirstate.normallookup(f) | |
2052 | for f in forget: |
|
2051 | for f in forget: | |
2053 | repo.dirstate.drop(f) |
|
2052 | repo.dirstate.drop(f) | |
2054 |
|
2053 | |||
2055 | user = ph.user or ctx.user() |
|
2054 | user = ph.user or ctx.user() | |
2056 |
|
2055 | |||
2057 | oldphase = repo[top].phase() |
|
2056 | oldphase = repo[top].phase() | |
2058 |
|
2057 | |||
2059 | # assumes strip can roll itself back if interrupted |
|
2058 | # assumes strip can roll itself back if interrupted | |
2060 | repo.setparents(*cparents) |
|
2059 | repo.setparents(*cparents) | |
2061 | self.applied.pop() |
|
2060 | self.applied.pop() | |
2062 | self.applieddirty = True |
|
2061 | self.applieddirty = True | |
2063 | strip(self.ui, repo, [top], update=False, backup=False) |
|
2062 | strip(self.ui, repo, [top], update=False, backup=False) | |
2064 | dsguard.close() |
|
2063 | dsguard.close() | |
2065 | finally: |
|
2064 | finally: | |
2066 | release(dsguard) |
|
2065 | release(dsguard) | |
2067 |
|
2066 | |||
2068 | try: |
|
2067 | try: | |
2069 | # might be nice to attempt to roll back strip after this |
|
2068 | # might be nice to attempt to roll back strip after this | |
2070 |
|
2069 | |||
2071 | defaultmsg = b"[mq]: %s" % patchfn |
|
2070 | defaultmsg = b"[mq]: %s" % patchfn | |
2072 | editor = cmdutil.getcommiteditor(editform=editform) |
|
2071 | editor = cmdutil.getcommiteditor(editform=editform) | |
2073 | if edit: |
|
2072 | if edit: | |
2074 |
|
2073 | |||
2075 | def finishdesc(desc): |
|
2074 | def finishdesc(desc): | |
2076 | if desc.rstrip(): |
|
2075 | if desc.rstrip(): | |
2077 | ph.setmessage(desc) |
|
2076 | ph.setmessage(desc) | |
2078 | return desc |
|
2077 | return desc | |
2079 | return defaultmsg |
|
2078 | return defaultmsg | |
2080 |
|
2079 | |||
2081 | # i18n: this message is shown in editor with "HG: " prefix |
|
2080 | # i18n: this message is shown in editor with "HG: " prefix | |
2082 | extramsg = _(b'Leave message empty to use default message.') |
|
2081 | extramsg = _(b'Leave message empty to use default message.') | |
2083 | editor = cmdutil.getcommiteditor( |
|
2082 | editor = cmdutil.getcommiteditor( | |
2084 | finishdesc=finishdesc, |
|
2083 | finishdesc=finishdesc, | |
2085 | extramsg=extramsg, |
|
2084 | extramsg=extramsg, | |
2086 | editform=editform, |
|
2085 | editform=editform, | |
2087 | ) |
|
2086 | ) | |
2088 | message = msg or b"\n".join(ph.message) |
|
2087 | message = msg or b"\n".join(ph.message) | |
2089 | elif not msg: |
|
2088 | elif not msg: | |
2090 | if not ph.message: |
|
2089 | if not ph.message: | |
2091 | message = defaultmsg |
|
2090 | message = defaultmsg | |
2092 | else: |
|
2091 | else: | |
2093 | message = b"\n".join(ph.message) |
|
2092 | message = b"\n".join(ph.message) | |
2094 | else: |
|
2093 | else: | |
2095 | message = msg |
|
2094 | message = msg | |
2096 | ph.setmessage(msg) |
|
2095 | ph.setmessage(msg) | |
2097 |
|
2096 | |||
2098 | # Ensure we create a new changeset in the same phase than |
|
2097 | # Ensure we create a new changeset in the same phase than | |
2099 | # the old one. |
|
2098 | # the old one. | |
2100 | lock = tr = None |
|
2099 | lock = tr = None | |
2101 | try: |
|
2100 | try: | |
2102 | lock = repo.lock() |
|
2101 | lock = repo.lock() | |
2103 | tr = repo.transaction(b'mq') |
|
2102 | tr = repo.transaction(b'mq') | |
2104 | n = newcommit( |
|
2103 | n = newcommit( | |
2105 | repo, |
|
2104 | repo, | |
2106 | oldphase, |
|
2105 | oldphase, | |
2107 | message, |
|
2106 | message, | |
2108 | user, |
|
2107 | user, | |
2109 | ph.date, |
|
2108 | ph.date, | |
2110 | match=match, |
|
2109 | match=match, | |
2111 | force=True, |
|
2110 | force=True, | |
2112 | editor=editor, |
|
2111 | editor=editor, | |
2113 | ) |
|
2112 | ) | |
2114 | # only write patch after a successful commit |
|
2113 | # only write patch after a successful commit | |
2115 | c = [list(x) for x in refreshchanges] |
|
2114 | c = [list(x) for x in refreshchanges] | |
2116 | if inclsubs: |
|
2115 | if inclsubs: | |
2117 | self.putsubstate2changes(substatestate, c) |
|
2116 | self.putsubstate2changes(substatestate, c) | |
2118 | chunks = patchmod.diff( |
|
2117 | chunks = patchmod.diff( | |
2119 | repo, patchparent, changes=c, opts=diffopts |
|
2118 | repo, patchparent, changes=c, opts=diffopts | |
2120 | ) |
|
2119 | ) | |
2121 | comments = bytes(ph) |
|
2120 | comments = bytes(ph) | |
2122 | if comments: |
|
2121 | if comments: | |
2123 | patchf.write(comments) |
|
2122 | patchf.write(comments) | |
2124 | for chunk in chunks: |
|
2123 | for chunk in chunks: | |
2125 | patchf.write(chunk) |
|
2124 | patchf.write(chunk) | |
2126 | patchf.close() |
|
2125 | patchf.close() | |
2127 |
|
2126 | |||
2128 | marks = repo._bookmarks |
|
2127 | marks = repo._bookmarks | |
2129 | marks.applychanges(repo, tr, [(bm, n) for bm in bmlist]) |
|
2128 | marks.applychanges(repo, tr, [(bm, n) for bm in bmlist]) | |
2130 | tr.close() |
|
2129 | tr.close() | |
2131 |
|
2130 | |||
2132 | self.applied.append(statusentry(n, patchfn)) |
|
2131 | self.applied.append(statusentry(n, patchfn)) | |
2133 | finally: |
|
2132 | finally: | |
2134 | lockmod.release(tr, lock) |
|
2133 | lockmod.release(tr, lock) | |
2135 | except: # re-raises |
|
2134 | except: # re-raises | |
2136 | ctx = repo[cparents[0]] |
|
2135 | ctx = repo[cparents[0]] | |
2137 | repo.dirstate.rebuild(ctx.node(), ctx.manifest()) |
|
2136 | repo.dirstate.rebuild(ctx.node(), ctx.manifest()) | |
2138 | self.savedirty() |
|
2137 | self.savedirty() | |
2139 | self.ui.warn( |
|
2138 | self.ui.warn( | |
2140 | _( |
|
2139 | _( | |
2141 | b'qrefresh interrupted while patch was popped! ' |
|
2140 | b'qrefresh interrupted while patch was popped! ' | |
2142 | b'(revert --all, qpush to recover)\n' |
|
2141 | b'(revert --all, qpush to recover)\n' | |
2143 | ) |
|
2142 | ) | |
2144 | ) |
|
2143 | ) | |
2145 | raise |
|
2144 | raise | |
2146 | finally: |
|
2145 | finally: | |
2147 | wlock.release() |
|
2146 | wlock.release() | |
2148 | self.removeundo(repo) |
|
2147 | self.removeundo(repo) | |
2149 |
|
2148 | |||
2150 | def init(self, repo, create=False): |
|
2149 | def init(self, repo, create=False): | |
2151 | if not create and os.path.isdir(self.path): |
|
2150 | if not create and os.path.isdir(self.path): | |
2152 | raise error.Abort(_(b"patch queue directory already exists")) |
|
2151 | raise error.Abort(_(b"patch queue directory already exists")) | |
2153 | try: |
|
2152 | try: | |
2154 | os.mkdir(self.path) |
|
2153 | os.mkdir(self.path) | |
2155 | except OSError as inst: |
|
2154 | except OSError as inst: | |
2156 | if inst.errno != errno.EEXIST or not create: |
|
2155 | if inst.errno != errno.EEXIST or not create: | |
2157 | raise |
|
2156 | raise | |
2158 | if create: |
|
2157 | if create: | |
2159 | return self.qrepo(create=True) |
|
2158 | return self.qrepo(create=True) | |
2160 |
|
2159 | |||
2161 | def unapplied(self, repo, patch=None): |
|
2160 | def unapplied(self, repo, patch=None): | |
2162 | if patch and patch not in self.series: |
|
2161 | if patch and patch not in self.series: | |
2163 | raise error.Abort(_(b"patch %s is not in series file") % patch) |
|
2162 | raise error.Abort(_(b"patch %s is not in series file") % patch) | |
2164 | if not patch: |
|
2163 | if not patch: | |
2165 | start = self.seriesend() |
|
2164 | start = self.seriesend() | |
2166 | else: |
|
2165 | else: | |
2167 | start = self.series.index(patch) + 1 |
|
2166 | start = self.series.index(patch) + 1 | |
2168 | unapplied = [] |
|
2167 | unapplied = [] | |
2169 | for i in pycompat.xrange(start, len(self.series)): |
|
2168 | for i in pycompat.xrange(start, len(self.series)): | |
2170 | pushable, reason = self.pushable(i) |
|
2169 | pushable, reason = self.pushable(i) | |
2171 | if pushable: |
|
2170 | if pushable: | |
2172 | unapplied.append((i, self.series[i])) |
|
2171 | unapplied.append((i, self.series[i])) | |
2173 | self.explainpushable(i) |
|
2172 | self.explainpushable(i) | |
2174 | return unapplied |
|
2173 | return unapplied | |
2175 |
|
2174 | |||
2176 | def qseries( |
|
2175 | def qseries( | |
2177 | self, |
|
2176 | self, | |
2178 | repo, |
|
2177 | repo, | |
2179 | missing=None, |
|
2178 | missing=None, | |
2180 | start=0, |
|
2179 | start=0, | |
2181 | length=None, |
|
2180 | length=None, | |
2182 | status=None, |
|
2181 | status=None, | |
2183 | summary=False, |
|
2182 | summary=False, | |
2184 | ): |
|
2183 | ): | |
2185 | def displayname(pfx, patchname, state): |
|
2184 | def displayname(pfx, patchname, state): | |
2186 | if pfx: |
|
2185 | if pfx: | |
2187 | self.ui.write(pfx) |
|
2186 | self.ui.write(pfx) | |
2188 | if summary: |
|
2187 | if summary: | |
2189 | ph = patchheader(self.join(patchname), self.plainmode) |
|
2188 | ph = patchheader(self.join(patchname), self.plainmode) | |
2190 | if ph.message: |
|
2189 | if ph.message: | |
2191 | msg = ph.message[0] |
|
2190 | msg = ph.message[0] | |
2192 | else: |
|
2191 | else: | |
2193 | msg = b'' |
|
2192 | msg = b'' | |
2194 |
|
2193 | |||
2195 | if self.ui.formatted(): |
|
2194 | if self.ui.formatted(): | |
2196 | width = self.ui.termwidth() - len(pfx) - len(patchname) - 2 |
|
2195 | width = self.ui.termwidth() - len(pfx) - len(patchname) - 2 | |
2197 | if width > 0: |
|
2196 | if width > 0: | |
2198 | msg = stringutil.ellipsis(msg, width) |
|
2197 | msg = stringutil.ellipsis(msg, width) | |
2199 | else: |
|
2198 | else: | |
2200 | msg = b'' |
|
2199 | msg = b'' | |
2201 | self.ui.write(patchname, label=b'qseries.' + state) |
|
2200 | self.ui.write(patchname, label=b'qseries.' + state) | |
2202 | self.ui.write(b': ') |
|
2201 | self.ui.write(b': ') | |
2203 | self.ui.write(msg, label=b'qseries.message.' + state) |
|
2202 | self.ui.write(msg, label=b'qseries.message.' + state) | |
2204 | else: |
|
2203 | else: | |
2205 | self.ui.write(patchname, label=b'qseries.' + state) |
|
2204 | self.ui.write(patchname, label=b'qseries.' + state) | |
2206 | self.ui.write(b'\n') |
|
2205 | self.ui.write(b'\n') | |
2207 |
|
2206 | |||
2208 | applied = {p.name for p in self.applied} |
|
2207 | applied = {p.name for p in self.applied} | |
2209 | if length is None: |
|
2208 | if length is None: | |
2210 | length = len(self.series) - start |
|
2209 | length = len(self.series) - start | |
2211 | if not missing: |
|
2210 | if not missing: | |
2212 | if self.ui.verbose: |
|
2211 | if self.ui.verbose: | |
2213 | idxwidth = len(b"%d" % (start + length - 1)) |
|
2212 | idxwidth = len(b"%d" % (start + length - 1)) | |
2214 | for i in pycompat.xrange(start, start + length): |
|
2213 | for i in pycompat.xrange(start, start + length): | |
2215 | patch = self.series[i] |
|
2214 | patch = self.series[i] | |
2216 | if patch in applied: |
|
2215 | if patch in applied: | |
2217 | char, state = b'A', b'applied' |
|
2216 | char, state = b'A', b'applied' | |
2218 | elif self.pushable(i)[0]: |
|
2217 | elif self.pushable(i)[0]: | |
2219 | char, state = b'U', b'unapplied' |
|
2218 | char, state = b'U', b'unapplied' | |
2220 | else: |
|
2219 | else: | |
2221 | char, state = b'G', b'guarded' |
|
2220 | char, state = b'G', b'guarded' | |
2222 | pfx = b'' |
|
2221 | pfx = b'' | |
2223 | if self.ui.verbose: |
|
2222 | if self.ui.verbose: | |
2224 | pfx = b'%*d %s ' % (idxwidth, i, char) |
|
2223 | pfx = b'%*d %s ' % (idxwidth, i, char) | |
2225 | elif status and status != char: |
|
2224 | elif status and status != char: | |
2226 | continue |
|
2225 | continue | |
2227 | displayname(pfx, patch, state) |
|
2226 | displayname(pfx, patch, state) | |
2228 | else: |
|
2227 | else: | |
2229 | msng_list = [] |
|
2228 | msng_list = [] | |
2230 | for root, dirs, files in os.walk(self.path): |
|
2229 | for root, dirs, files in os.walk(self.path): | |
2231 | d = root[len(self.path) + 1 :] |
|
2230 | d = root[len(self.path) + 1 :] | |
2232 | for f in files: |
|
2231 | for f in files: | |
2233 | fl = os.path.join(d, f) |
|
2232 | fl = os.path.join(d, f) | |
2234 | if ( |
|
2233 | if ( | |
2235 | fl not in self.series |
|
2234 | fl not in self.series | |
2236 | and fl |
|
2235 | and fl | |
2237 | not in ( |
|
2236 | not in ( | |
2238 | self.statuspath, |
|
2237 | self.statuspath, | |
2239 | self.seriespath, |
|
2238 | self.seriespath, | |
2240 | self.guardspath, |
|
2239 | self.guardspath, | |
2241 | ) |
|
2240 | ) | |
2242 | and not fl.startswith(b'.') |
|
2241 | and not fl.startswith(b'.') | |
2243 | ): |
|
2242 | ): | |
2244 | msng_list.append(fl) |
|
2243 | msng_list.append(fl) | |
2245 | for x in sorted(msng_list): |
|
2244 | for x in sorted(msng_list): | |
2246 | pfx = self.ui.verbose and b'D ' or b'' |
|
2245 | pfx = self.ui.verbose and b'D ' or b'' | |
2247 | displayname(pfx, x, b'missing') |
|
2246 | displayname(pfx, x, b'missing') | |
2248 |
|
2247 | |||
2249 | def issaveline(self, l): |
|
2248 | def issaveline(self, l): | |
2250 | if l.name == b'.hg.patches.save.line': |
|
2249 | if l.name == b'.hg.patches.save.line': | |
2251 | return True |
|
2250 | return True | |
2252 |
|
2251 | |||
2253 | def qrepo(self, create=False): |
|
2252 | def qrepo(self, create=False): | |
2254 | ui = self.baseui.copy() |
|
2253 | ui = self.baseui.copy() | |
2255 | # copy back attributes set by ui.pager() |
|
2254 | # copy back attributes set by ui.pager() | |
2256 | if self.ui.pageractive and not ui.pageractive: |
|
2255 | if self.ui.pageractive and not ui.pageractive: | |
2257 | ui.pageractive = self.ui.pageractive |
|
2256 | ui.pageractive = self.ui.pageractive | |
2258 | # internal config: ui.formatted |
|
2257 | # internal config: ui.formatted | |
2259 | ui.setconfig( |
|
2258 | ui.setconfig( | |
2260 | b'ui', |
|
2259 | b'ui', | |
2261 | b'formatted', |
|
2260 | b'formatted', | |
2262 | self.ui.config(b'ui', b'formatted'), |
|
2261 | self.ui.config(b'ui', b'formatted'), | |
2263 | b'mqpager', |
|
2262 | b'mqpager', | |
2264 | ) |
|
2263 | ) | |
2265 | ui.setconfig( |
|
2264 | ui.setconfig( | |
2266 | b'ui', |
|
2265 | b'ui', | |
2267 | b'interactive', |
|
2266 | b'interactive', | |
2268 | self.ui.config(b'ui', b'interactive'), |
|
2267 | self.ui.config(b'ui', b'interactive'), | |
2269 | b'mqpager', |
|
2268 | b'mqpager', | |
2270 | ) |
|
2269 | ) | |
2271 | if create or os.path.isdir(self.join(b".hg")): |
|
2270 | if create or os.path.isdir(self.join(b".hg")): | |
2272 | return hg.repository(ui, path=self.path, create=create) |
|
2271 | return hg.repository(ui, path=self.path, create=create) | |
2273 |
|
2272 | |||
2274 | def restore(self, repo, rev, delete=None, qupdate=None): |
|
2273 | def restore(self, repo, rev, delete=None, qupdate=None): | |
2275 | desc = repo[rev].description().strip() |
|
2274 | desc = repo[rev].description().strip() | |
2276 | lines = desc.splitlines() |
|
2275 | lines = desc.splitlines() | |
2277 | datastart = None |
|
2276 | datastart = None | |
2278 | series = [] |
|
2277 | series = [] | |
2279 | applied = [] |
|
2278 | applied = [] | |
2280 | qpp = None |
|
2279 | qpp = None | |
2281 | for i, line in enumerate(lines): |
|
2280 | for i, line in enumerate(lines): | |
2282 | if line == b'Patch Data:': |
|
2281 | if line == b'Patch Data:': | |
2283 | datastart = i + 1 |
|
2282 | datastart = i + 1 | |
2284 | elif line.startswith(b'Dirstate:'): |
|
2283 | elif line.startswith(b'Dirstate:'): | |
2285 | l = line.rstrip() |
|
2284 | l = line.rstrip() | |
2286 | l = l[10:].split(b' ') |
|
2285 | l = l[10:].split(b' ') | |
2287 | qpp = [bin(x) for x in l] |
|
2286 | qpp = [bin(x) for x in l] | |
2288 | elif datastart is not None: |
|
2287 | elif datastart is not None: | |
2289 | l = line.rstrip() |
|
2288 | l = line.rstrip() | |
2290 | n, name = l.split(b':', 1) |
|
2289 | n, name = l.split(b':', 1) | |
2291 | if n: |
|
2290 | if n: | |
2292 | applied.append(statusentry(bin(n), name)) |
|
2291 | applied.append(statusentry(bin(n), name)) | |
2293 | else: |
|
2292 | else: | |
2294 | series.append(l) |
|
2293 | series.append(l) | |
2295 | if datastart is None: |
|
2294 | if datastart is None: | |
2296 | self.ui.warn(_(b"no saved patch data found\n")) |
|
2295 | self.ui.warn(_(b"no saved patch data found\n")) | |
2297 | return 1 |
|
2296 | return 1 | |
2298 | self.ui.warn(_(b"restoring status: %s\n") % lines[0]) |
|
2297 | self.ui.warn(_(b"restoring status: %s\n") % lines[0]) | |
2299 | self.fullseries = series |
|
2298 | self.fullseries = series | |
2300 | self.applied = applied |
|
2299 | self.applied = applied | |
2301 | self.parseseries() |
|
2300 | self.parseseries() | |
2302 | self.seriesdirty = True |
|
2301 | self.seriesdirty = True | |
2303 | self.applieddirty = True |
|
2302 | self.applieddirty = True | |
2304 | heads = repo.changelog.heads() |
|
2303 | heads = repo.changelog.heads() | |
2305 | if delete: |
|
2304 | if delete: | |
2306 | if rev not in heads: |
|
2305 | if rev not in heads: | |
2307 | self.ui.warn(_(b"save entry has children, leaving it alone\n")) |
|
2306 | self.ui.warn(_(b"save entry has children, leaving it alone\n")) | |
2308 | else: |
|
2307 | else: | |
2309 | self.ui.warn(_(b"removing save entry %s\n") % short(rev)) |
|
2308 | self.ui.warn(_(b"removing save entry %s\n") % short(rev)) | |
2310 | pp = repo.dirstate.parents() |
|
2309 | pp = repo.dirstate.parents() | |
2311 | if rev in pp: |
|
2310 | if rev in pp: | |
2312 | update = True |
|
2311 | update = True | |
2313 | else: |
|
2312 | else: | |
2314 | update = False |
|
2313 | update = False | |
2315 | strip(self.ui, repo, [rev], update=update, backup=False) |
|
2314 | strip(self.ui, repo, [rev], update=update, backup=False) | |
2316 | if qpp: |
|
2315 | if qpp: | |
2317 | self.ui.warn( |
|
2316 | self.ui.warn( | |
2318 | _(b"saved queue repository parents: %s %s\n") |
|
2317 | _(b"saved queue repository parents: %s %s\n") | |
2319 | % (short(qpp[0]), short(qpp[1])) |
|
2318 | % (short(qpp[0]), short(qpp[1])) | |
2320 | ) |
|
2319 | ) | |
2321 | if qupdate: |
|
2320 | if qupdate: | |
2322 | self.ui.status(_(b"updating queue directory\n")) |
|
2321 | self.ui.status(_(b"updating queue directory\n")) | |
2323 | r = self.qrepo() |
|
2322 | r = self.qrepo() | |
2324 | if not r: |
|
2323 | if not r: | |
2325 | self.ui.warn(_(b"unable to load queue repository\n")) |
|
2324 | self.ui.warn(_(b"unable to load queue repository\n")) | |
2326 | return 1 |
|
2325 | return 1 | |
2327 | hg.clean(r, qpp[0]) |
|
2326 | hg.clean(r, qpp[0]) | |
2328 |
|
2327 | |||
2329 | def save(self, repo, msg=None): |
|
2328 | def save(self, repo, msg=None): | |
2330 | if not self.applied: |
|
2329 | if not self.applied: | |
2331 | self.ui.warn(_(b"save: no patches applied, exiting\n")) |
|
2330 | self.ui.warn(_(b"save: no patches applied, exiting\n")) | |
2332 | return 1 |
|
2331 | return 1 | |
2333 | if self.issaveline(self.applied[-1]): |
|
2332 | if self.issaveline(self.applied[-1]): | |
2334 | self.ui.warn(_(b"status is already saved\n")) |
|
2333 | self.ui.warn(_(b"status is already saved\n")) | |
2335 | return 1 |
|
2334 | return 1 | |
2336 |
|
2335 | |||
2337 | if not msg: |
|
2336 | if not msg: | |
2338 | msg = _(b"hg patches saved state") |
|
2337 | msg = _(b"hg patches saved state") | |
2339 | else: |
|
2338 | else: | |
2340 | msg = b"hg patches: " + msg.rstrip(b'\r\n') |
|
2339 | msg = b"hg patches: " + msg.rstrip(b'\r\n') | |
2341 | r = self.qrepo() |
|
2340 | r = self.qrepo() | |
2342 | if r: |
|
2341 | if r: | |
2343 | pp = r.dirstate.parents() |
|
2342 | pp = r.dirstate.parents() | |
2344 | msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1])) |
|
2343 | msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1])) | |
2345 | msg += b"\n\nPatch Data:\n" |
|
2344 | msg += b"\n\nPatch Data:\n" | |
2346 | msg += b''.join(b'%s\n' % x for x in self.applied) |
|
2345 | msg += b''.join(b'%s\n' % x for x in self.applied) | |
2347 | msg += b''.join(b':%s\n' % x for x in self.fullseries) |
|
2346 | msg += b''.join(b':%s\n' % x for x in self.fullseries) | |
2348 | n = repo.commit(msg, force=True) |
|
2347 | n = repo.commit(msg, force=True) | |
2349 | if not n: |
|
2348 | if not n: | |
2350 | self.ui.warn(_(b"repo commit failed\n")) |
|
2349 | self.ui.warn(_(b"repo commit failed\n")) | |
2351 | return 1 |
|
2350 | return 1 | |
2352 | self.applied.append(statusentry(n, b'.hg.patches.save.line')) |
|
2351 | self.applied.append(statusentry(n, b'.hg.patches.save.line')) | |
2353 | self.applieddirty = True |
|
2352 | self.applieddirty = True | |
2354 | self.removeundo(repo) |
|
2353 | self.removeundo(repo) | |
2355 |
|
2354 | |||
2356 | def fullseriesend(self): |
|
2355 | def fullseriesend(self): | |
2357 | if self.applied: |
|
2356 | if self.applied: | |
2358 | p = self.applied[-1].name |
|
2357 | p = self.applied[-1].name | |
2359 | end = self.findseries(p) |
|
2358 | end = self.findseries(p) | |
2360 | if end is None: |
|
2359 | if end is None: | |
2361 | return len(self.fullseries) |
|
2360 | return len(self.fullseries) | |
2362 | return end + 1 |
|
2361 | return end + 1 | |
2363 | return 0 |
|
2362 | return 0 | |
2364 |
|
2363 | |||
2365 | def seriesend(self, all_patches=False): |
|
2364 | def seriesend(self, all_patches=False): | |
2366 | """If all_patches is False, return the index of the next pushable patch |
|
2365 | """If all_patches is False, return the index of the next pushable patch | |
2367 | in the series, or the series length. If all_patches is True, return the |
|
2366 | in the series, or the series length. If all_patches is True, return the | |
2368 | index of the first patch past the last applied one. |
|
2367 | index of the first patch past the last applied one. | |
2369 | """ |
|
2368 | """ | |
2370 | end = 0 |
|
2369 | end = 0 | |
2371 |
|
2370 | |||
2372 | def nextpatch(start): |
|
2371 | def nextpatch(start): | |
2373 | if all_patches or start >= len(self.series): |
|
2372 | if all_patches or start >= len(self.series): | |
2374 | return start |
|
2373 | return start | |
2375 | for i in pycompat.xrange(start, len(self.series)): |
|
2374 | for i in pycompat.xrange(start, len(self.series)): | |
2376 | p, reason = self.pushable(i) |
|
2375 | p, reason = self.pushable(i) | |
2377 | if p: |
|
2376 | if p: | |
2378 | return i |
|
2377 | return i | |
2379 | self.explainpushable(i) |
|
2378 | self.explainpushable(i) | |
2380 | return len(self.series) |
|
2379 | return len(self.series) | |
2381 |
|
2380 | |||
2382 | if self.applied: |
|
2381 | if self.applied: | |
2383 | p = self.applied[-1].name |
|
2382 | p = self.applied[-1].name | |
2384 | try: |
|
2383 | try: | |
2385 | end = self.series.index(p) |
|
2384 | end = self.series.index(p) | |
2386 | except ValueError: |
|
2385 | except ValueError: | |
2387 | return 0 |
|
2386 | return 0 | |
2388 | return nextpatch(end + 1) |
|
2387 | return nextpatch(end + 1) | |
2389 | return nextpatch(end) |
|
2388 | return nextpatch(end) | |
2390 |
|
2389 | |||
2391 | def appliedname(self, index): |
|
2390 | def appliedname(self, index): | |
2392 | pname = self.applied[index].name |
|
2391 | pname = self.applied[index].name | |
2393 | if not self.ui.verbose: |
|
2392 | if not self.ui.verbose: | |
2394 | p = pname |
|
2393 | p = pname | |
2395 | else: |
|
2394 | else: | |
2396 | p = (b"%d" % self.series.index(pname)) + b" " + pname |
|
2395 | p = (b"%d" % self.series.index(pname)) + b" " + pname | |
2397 | return p |
|
2396 | return p | |
2398 |
|
2397 | |||
2399 | def qimport( |
|
2398 | def qimport( | |
2400 | self, |
|
2399 | self, | |
2401 | repo, |
|
2400 | repo, | |
2402 | files, |
|
2401 | files, | |
2403 | patchname=None, |
|
2402 | patchname=None, | |
2404 | rev=None, |
|
2403 | rev=None, | |
2405 | existing=None, |
|
2404 | existing=None, | |
2406 | force=None, |
|
2405 | force=None, | |
2407 | git=False, |
|
2406 | git=False, | |
2408 | ): |
|
2407 | ): | |
2409 | def checkseries(patchname): |
|
2408 | def checkseries(patchname): | |
2410 | if patchname in self.series: |
|
2409 | if patchname in self.series: | |
2411 | raise error.Abort( |
|
2410 | raise error.Abort( | |
2412 | _(b'patch %s is already in the series file') % patchname |
|
2411 | _(b'patch %s is already in the series file') % patchname | |
2413 | ) |
|
2412 | ) | |
2414 |
|
2413 | |||
2415 | if rev: |
|
2414 | if rev: | |
2416 | if files: |
|
2415 | if files: | |
2417 | raise error.Abort( |
|
2416 | raise error.Abort( | |
2418 | _(b'option "-r" not valid when importing files') |
|
2417 | _(b'option "-r" not valid when importing files') | |
2419 | ) |
|
2418 | ) | |
2420 | rev = scmutil.revrange(repo, rev) |
|
2419 | rev = scmutil.revrange(repo, rev) | |
2421 | rev.sort(reverse=True) |
|
2420 | rev.sort(reverse=True) | |
2422 | elif not files: |
|
2421 | elif not files: | |
2423 | raise error.Abort(_(b'no files or revisions specified')) |
|
2422 | raise error.Abort(_(b'no files or revisions specified')) | |
2424 | if (len(files) > 1 or len(rev) > 1) and patchname: |
|
2423 | if (len(files) > 1 or len(rev) > 1) and patchname: | |
2425 | raise error.Abort( |
|
2424 | raise error.Abort( | |
2426 | _(b'option "-n" not valid when importing multiple patches') |
|
2425 | _(b'option "-n" not valid when importing multiple patches') | |
2427 | ) |
|
2426 | ) | |
2428 | imported = [] |
|
2427 | imported = [] | |
2429 | if rev: |
|
2428 | if rev: | |
2430 | # If mq patches are applied, we can only import revisions |
|
2429 | # If mq patches are applied, we can only import revisions | |
2431 | # that form a linear path to qbase. |
|
2430 | # that form a linear path to qbase. | |
2432 | # Otherwise, they should form a linear path to a head. |
|
2431 | # Otherwise, they should form a linear path to a head. | |
2433 | heads = repo.changelog.heads(repo.changelog.node(rev.first())) |
|
2432 | heads = repo.changelog.heads(repo.changelog.node(rev.first())) | |
2434 | if len(heads) > 1: |
|
2433 | if len(heads) > 1: | |
2435 | raise error.Abort( |
|
2434 | raise error.Abort( | |
2436 | _(b'revision %d is the root of more than one branch') |
|
2435 | _(b'revision %d is the root of more than one branch') | |
2437 | % rev.last() |
|
2436 | % rev.last() | |
2438 | ) |
|
2437 | ) | |
2439 | if self.applied: |
|
2438 | if self.applied: | |
2440 | base = repo.changelog.node(rev.first()) |
|
2439 | base = repo.changelog.node(rev.first()) | |
2441 | if base in [n.node for n in self.applied]: |
|
2440 | if base in [n.node for n in self.applied]: | |
2442 | raise error.Abort( |
|
2441 | raise error.Abort( | |
2443 | _(b'revision %d is already managed') % rev.first() |
|
2442 | _(b'revision %d is already managed') % rev.first() | |
2444 | ) |
|
2443 | ) | |
2445 | if heads != [self.applied[-1].node]: |
|
2444 | if heads != [self.applied[-1].node]: | |
2446 | raise error.Abort( |
|
2445 | raise error.Abort( | |
2447 | _(b'revision %d is not the parent of the queue') |
|
2446 | _(b'revision %d is not the parent of the queue') | |
2448 | % rev.first() |
|
2447 | % rev.first() | |
2449 | ) |
|
2448 | ) | |
2450 | base = repo.changelog.rev(self.applied[0].node) |
|
2449 | base = repo.changelog.rev(self.applied[0].node) | |
2451 | lastparent = repo.changelog.parentrevs(base)[0] |
|
2450 | lastparent = repo.changelog.parentrevs(base)[0] | |
2452 | else: |
|
2451 | else: | |
2453 | if heads != [repo.changelog.node(rev.first())]: |
|
2452 | if heads != [repo.changelog.node(rev.first())]: | |
2454 | raise error.Abort( |
|
2453 | raise error.Abort( | |
2455 | _(b'revision %d has unmanaged children') % rev.first() |
|
2454 | _(b'revision %d has unmanaged children') % rev.first() | |
2456 | ) |
|
2455 | ) | |
2457 | lastparent = None |
|
2456 | lastparent = None | |
2458 |
|
2457 | |||
2459 | diffopts = self.diffopts({b'git': git}) |
|
2458 | diffopts = self.diffopts({b'git': git}) | |
2460 | with repo.transaction(b'qimport') as tr: |
|
2459 | with repo.transaction(b'qimport') as tr: | |
2461 | for r in rev: |
|
2460 | for r in rev: | |
2462 | if not repo[r].mutable(): |
|
2461 | if not repo[r].mutable(): | |
2463 | raise error.Abort( |
|
2462 | raise error.Abort( | |
2464 | _(b'revision %d is not mutable') % r, |
|
2463 | _(b'revision %d is not mutable') % r, | |
2465 | hint=_(b"see 'hg help phases' " b'for details'), |
|
2464 | hint=_(b"see 'hg help phases' " b'for details'), | |
2466 | ) |
|
2465 | ) | |
2467 | p1, p2 = repo.changelog.parentrevs(r) |
|
2466 | p1, p2 = repo.changelog.parentrevs(r) | |
2468 | n = repo.changelog.node(r) |
|
2467 | n = repo.changelog.node(r) | |
2469 | if p2 != nullrev: |
|
2468 | if p2 != nullrev: | |
2470 | raise error.Abort( |
|
2469 | raise error.Abort( | |
2471 | _(b'cannot import merge revision %d') % r |
|
2470 | _(b'cannot import merge revision %d') % r | |
2472 | ) |
|
2471 | ) | |
2473 | if lastparent and lastparent != r: |
|
2472 | if lastparent and lastparent != r: | |
2474 | raise error.Abort( |
|
2473 | raise error.Abort( | |
2475 | _(b'revision %d is not the parent of %d') |
|
2474 | _(b'revision %d is not the parent of %d') | |
2476 | % (r, lastparent) |
|
2475 | % (r, lastparent) | |
2477 | ) |
|
2476 | ) | |
2478 | lastparent = p1 |
|
2477 | lastparent = p1 | |
2479 |
|
2478 | |||
2480 | if not patchname: |
|
2479 | if not patchname: | |
2481 | patchname = self.makepatchname( |
|
2480 | patchname = self.makepatchname( | |
2482 | repo[r].description().split(b'\n', 1)[0], |
|
2481 | repo[r].description().split(b'\n', 1)[0], | |
2483 | b'%d.diff' % r, |
|
2482 | b'%d.diff' % r, | |
2484 | ) |
|
2483 | ) | |
2485 | checkseries(patchname) |
|
2484 | checkseries(patchname) | |
2486 | self.checkpatchname(patchname, force) |
|
2485 | self.checkpatchname(patchname, force) | |
2487 | self.fullseries.insert(0, patchname) |
|
2486 | self.fullseries.insert(0, patchname) | |
2488 |
|
2487 | |||
2489 | with self.opener(patchname, b"w") as fp: |
|
2488 | with self.opener(patchname, b"w") as fp: | |
2490 | cmdutil.exportfile(repo, [n], fp, opts=diffopts) |
|
2489 | cmdutil.exportfile(repo, [n], fp, opts=diffopts) | |
2491 |
|
2490 | |||
2492 | se = statusentry(n, patchname) |
|
2491 | se = statusentry(n, patchname) | |
2493 | self.applied.insert(0, se) |
|
2492 | self.applied.insert(0, se) | |
2494 |
|
2493 | |||
2495 | self.added.append(patchname) |
|
2494 | self.added.append(patchname) | |
2496 | imported.append(patchname) |
|
2495 | imported.append(patchname) | |
2497 | patchname = None |
|
2496 | patchname = None | |
2498 | if rev and repo.ui.configbool(b'mq', b'secret'): |
|
2497 | if rev and repo.ui.configbool(b'mq', b'secret'): | |
2499 | # if we added anything with --rev, move the secret root |
|
2498 | # if we added anything with --rev, move the secret root | |
2500 | phases.retractboundary(repo, tr, phases.secret, [n]) |
|
2499 | phases.retractboundary(repo, tr, phases.secret, [n]) | |
2501 | self.parseseries() |
|
2500 | self.parseseries() | |
2502 | self.applieddirty = True |
|
2501 | self.applieddirty = True | |
2503 | self.seriesdirty = True |
|
2502 | self.seriesdirty = True | |
2504 |
|
2503 | |||
2505 | for i, filename in enumerate(files): |
|
2504 | for i, filename in enumerate(files): | |
2506 | if existing: |
|
2505 | if existing: | |
2507 | if filename == b'-': |
|
2506 | if filename == b'-': | |
2508 | raise error.Abort( |
|
2507 | raise error.Abort( | |
2509 | _(b'-e is incompatible with import from -') |
|
2508 | _(b'-e is incompatible with import from -') | |
2510 | ) |
|
2509 | ) | |
2511 | filename = normname(filename) |
|
2510 | filename = normname(filename) | |
2512 | self.checkreservedname(filename) |
|
2511 | self.checkreservedname(filename) | |
2513 | if urlutil.url(filename).islocal(): |
|
2512 | if urlutil.url(filename).islocal(): | |
2514 | originpath = self.join(filename) |
|
2513 | originpath = self.join(filename) | |
2515 | if not os.path.isfile(originpath): |
|
2514 | if not os.path.isfile(originpath): | |
2516 | raise error.Abort( |
|
2515 | raise error.Abort( | |
2517 | _(b"patch %s does not exist") % filename |
|
2516 | _(b"patch %s does not exist") % filename | |
2518 | ) |
|
2517 | ) | |
2519 |
|
2518 | |||
2520 | if patchname: |
|
2519 | if patchname: | |
2521 | self.checkpatchname(patchname, force) |
|
2520 | self.checkpatchname(patchname, force) | |
2522 |
|
2521 | |||
2523 | self.ui.write( |
|
2522 | self.ui.write( | |
2524 | _(b'renaming %s to %s\n') % (filename, patchname) |
|
2523 | _(b'renaming %s to %s\n') % (filename, patchname) | |
2525 | ) |
|
2524 | ) | |
2526 | util.rename(originpath, self.join(patchname)) |
|
2525 | util.rename(originpath, self.join(patchname)) | |
2527 | else: |
|
2526 | else: | |
2528 | patchname = filename |
|
2527 | patchname = filename | |
2529 |
|
2528 | |||
2530 | else: |
|
2529 | else: | |
2531 | if filename == b'-' and not patchname: |
|
2530 | if filename == b'-' and not patchname: | |
2532 | raise error.Abort( |
|
2531 | raise error.Abort( | |
2533 | _(b'need --name to import a patch from -') |
|
2532 | _(b'need --name to import a patch from -') | |
2534 | ) |
|
2533 | ) | |
2535 | elif not patchname: |
|
2534 | elif not patchname: | |
2536 | patchname = normname( |
|
2535 | patchname = normname( | |
2537 | os.path.basename(filename.rstrip(b'/')) |
|
2536 | os.path.basename(filename.rstrip(b'/')) | |
2538 | ) |
|
2537 | ) | |
2539 | self.checkpatchname(patchname, force) |
|
2538 | self.checkpatchname(patchname, force) | |
2540 | try: |
|
2539 | try: | |
2541 | if filename == b'-': |
|
2540 | if filename == b'-': | |
2542 | text = self.ui.fin.read() |
|
2541 | text = self.ui.fin.read() | |
2543 | else: |
|
2542 | else: | |
2544 | fp = hg.openpath(self.ui, filename) |
|
2543 | fp = hg.openpath(self.ui, filename) | |
2545 | text = fp.read() |
|
2544 | text = fp.read() | |
2546 | fp.close() |
|
2545 | fp.close() | |
2547 | except (OSError, IOError): |
|
2546 | except (OSError, IOError): | |
2548 | raise error.Abort(_(b"unable to read file %s") % filename) |
|
2547 | raise error.Abort(_(b"unable to read file %s") % filename) | |
2549 | patchf = self.opener(patchname, b"w") |
|
2548 | patchf = self.opener(patchname, b"w") | |
2550 | patchf.write(text) |
|
2549 | patchf.write(text) | |
2551 | patchf.close() |
|
2550 | patchf.close() | |
2552 | if not force: |
|
2551 | if not force: | |
2553 | checkseries(patchname) |
|
2552 | checkseries(patchname) | |
2554 | if patchname not in self.series: |
|
2553 | if patchname not in self.series: | |
2555 | index = self.fullseriesend() + i |
|
2554 | index = self.fullseriesend() + i | |
2556 | self.fullseries[index:index] = [patchname] |
|
2555 | self.fullseries[index:index] = [patchname] | |
2557 | self.parseseries() |
|
2556 | self.parseseries() | |
2558 | self.seriesdirty = True |
|
2557 | self.seriesdirty = True | |
2559 | self.ui.warn(_(b"adding %s to series file\n") % patchname) |
|
2558 | self.ui.warn(_(b"adding %s to series file\n") % patchname) | |
2560 | self.added.append(patchname) |
|
2559 | self.added.append(patchname) | |
2561 | imported.append(patchname) |
|
2560 | imported.append(patchname) | |
2562 | patchname = None |
|
2561 | patchname = None | |
2563 |
|
2562 | |||
2564 | self.removeundo(repo) |
|
2563 | self.removeundo(repo) | |
2565 | return imported |
|
2564 | return imported | |
2566 |
|
2565 | |||
2567 |
|
2566 | |||
2568 | def fixkeepchangesopts(ui, opts): |
|
2567 | def fixkeepchangesopts(ui, opts): | |
2569 | if ( |
|
2568 | if ( | |
2570 | not ui.configbool(b'mq', b'keepchanges') |
|
2569 | not ui.configbool(b'mq', b'keepchanges') | |
2571 | or opts.get(b'force') |
|
2570 | or opts.get(b'force') | |
2572 | or opts.get(b'exact') |
|
2571 | or opts.get(b'exact') | |
2573 | ): |
|
2572 | ): | |
2574 | return opts |
|
2573 | return opts | |
2575 | opts = dict(opts) |
|
2574 | opts = dict(opts) | |
2576 | opts[b'keep_changes'] = True |
|
2575 | opts[b'keep_changes'] = True | |
2577 | return opts |
|
2576 | return opts | |
2578 |
|
2577 | |||
2579 |
|
2578 | |||
2580 | @command( |
|
2579 | @command( | |
2581 | b"qdelete|qremove|qrm", |
|
2580 | b"qdelete|qremove|qrm", | |
2582 | [ |
|
2581 | [ | |
2583 | (b'k', b'keep', None, _(b'keep patch file')), |
|
2582 | (b'k', b'keep', None, _(b'keep patch file')), | |
2584 | ( |
|
2583 | ( | |
2585 | b'r', |
|
2584 | b'r', | |
2586 | b'rev', |
|
2585 | b'rev', | |
2587 | [], |
|
2586 | [], | |
2588 | _(b'stop managing a revision (DEPRECATED)'), |
|
2587 | _(b'stop managing a revision (DEPRECATED)'), | |
2589 | _(b'REV'), |
|
2588 | _(b'REV'), | |
2590 | ), |
|
2589 | ), | |
2591 | ], |
|
2590 | ], | |
2592 | _(b'hg qdelete [-k] [PATCH]...'), |
|
2591 | _(b'hg qdelete [-k] [PATCH]...'), | |
2593 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2592 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
2594 | ) |
|
2593 | ) | |
2595 | def delete(ui, repo, *patches, **opts): |
|
2594 | def delete(ui, repo, *patches, **opts): | |
2596 | """remove patches from queue |
|
2595 | """remove patches from queue | |
2597 |
|
2596 | |||
2598 | The patches must not be applied, and at least one patch is required. Exact |
|
2597 | The patches must not be applied, and at least one patch is required. Exact | |
2599 | patch identifiers must be given. With -k/--keep, the patch files are |
|
2598 | patch identifiers must be given. With -k/--keep, the patch files are | |
2600 | preserved in the patch directory. |
|
2599 | preserved in the patch directory. | |
2601 |
|
2600 | |||
2602 | To stop managing a patch and move it into permanent history, |
|
2601 | To stop managing a patch and move it into permanent history, | |
2603 | use the :hg:`qfinish` command.""" |
|
2602 | use the :hg:`qfinish` command.""" | |
2604 | q = repo.mq |
|
2603 | q = repo.mq | |
2605 | q.delete(repo, patches, pycompat.byteskwargs(opts)) |
|
2604 | q.delete(repo, patches, pycompat.byteskwargs(opts)) | |
2606 | q.savedirty() |
|
2605 | q.savedirty() | |
2607 | return 0 |
|
2606 | return 0 | |
2608 |
|
2607 | |||
2609 |
|
2608 | |||
2610 | @command( |
|
2609 | @command( | |
2611 | b"qapplied", |
|
2610 | b"qapplied", | |
2612 | [(b'1', b'last', None, _(b'show only the preceding applied patch'))] |
|
2611 | [(b'1', b'last', None, _(b'show only the preceding applied patch'))] | |
2613 | + seriesopts, |
|
2612 | + seriesopts, | |
2614 | _(b'hg qapplied [-1] [-s] [PATCH]'), |
|
2613 | _(b'hg qapplied [-1] [-s] [PATCH]'), | |
2615 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2614 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
2616 | ) |
|
2615 | ) | |
2617 | def applied(ui, repo, patch=None, **opts): |
|
2616 | def applied(ui, repo, patch=None, **opts): | |
2618 | """print the patches already applied |
|
2617 | """print the patches already applied | |
2619 |
|
2618 | |||
2620 | Returns 0 on success.""" |
|
2619 | Returns 0 on success.""" | |
2621 |
|
2620 | |||
2622 | q = repo.mq |
|
2621 | q = repo.mq | |
2623 | opts = pycompat.byteskwargs(opts) |
|
2622 | opts = pycompat.byteskwargs(opts) | |
2624 |
|
2623 | |||
2625 | if patch: |
|
2624 | if patch: | |
2626 | if patch not in q.series: |
|
2625 | if patch not in q.series: | |
2627 | raise error.Abort(_(b"patch %s is not in series file") % patch) |
|
2626 | raise error.Abort(_(b"patch %s is not in series file") % patch) | |
2628 | end = q.series.index(patch) + 1 |
|
2627 | end = q.series.index(patch) + 1 | |
2629 | else: |
|
2628 | else: | |
2630 | end = q.seriesend(True) |
|
2629 | end = q.seriesend(True) | |
2631 |
|
2630 | |||
2632 | if opts.get(b'last') and not end: |
|
2631 | if opts.get(b'last') and not end: | |
2633 | ui.write(_(b"no patches applied\n")) |
|
2632 | ui.write(_(b"no patches applied\n")) | |
2634 | return 1 |
|
2633 | return 1 | |
2635 | elif opts.get(b'last') and end == 1: |
|
2634 | elif opts.get(b'last') and end == 1: | |
2636 | ui.write(_(b"only one patch applied\n")) |
|
2635 | ui.write(_(b"only one patch applied\n")) | |
2637 | return 1 |
|
2636 | return 1 | |
2638 | elif opts.get(b'last'): |
|
2637 | elif opts.get(b'last'): | |
2639 | start = end - 2 |
|
2638 | start = end - 2 | |
2640 | end = 1 |
|
2639 | end = 1 | |
2641 | else: |
|
2640 | else: | |
2642 | start = 0 |
|
2641 | start = 0 | |
2643 |
|
2642 | |||
2644 | q.qseries( |
|
2643 | q.qseries( | |
2645 | repo, length=end, start=start, status=b'A', summary=opts.get(b'summary') |
|
2644 | repo, length=end, start=start, status=b'A', summary=opts.get(b'summary') | |
2646 | ) |
|
2645 | ) | |
2647 |
|
2646 | |||
2648 |
|
2647 | |||
2649 | @command( |
|
2648 | @command( | |
2650 | b"qunapplied", |
|
2649 | b"qunapplied", | |
2651 | [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts, |
|
2650 | [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts, | |
2652 | _(b'hg qunapplied [-1] [-s] [PATCH]'), |
|
2651 | _(b'hg qunapplied [-1] [-s] [PATCH]'), | |
2653 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2652 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
2654 | ) |
|
2653 | ) | |
2655 | def unapplied(ui, repo, patch=None, **opts): |
|
2654 | def unapplied(ui, repo, patch=None, **opts): | |
2656 | """print the patches not yet applied |
|
2655 | """print the patches not yet applied | |
2657 |
|
2656 | |||
2658 | Returns 0 on success.""" |
|
2657 | Returns 0 on success.""" | |
2659 |
|
2658 | |||
2660 | q = repo.mq |
|
2659 | q = repo.mq | |
2661 | opts = pycompat.byteskwargs(opts) |
|
2660 | opts = pycompat.byteskwargs(opts) | |
2662 | if patch: |
|
2661 | if patch: | |
2663 | if patch not in q.series: |
|
2662 | if patch not in q.series: | |
2664 | raise error.Abort(_(b"patch %s is not in series file") % patch) |
|
2663 | raise error.Abort(_(b"patch %s is not in series file") % patch) | |
2665 | start = q.series.index(patch) + 1 |
|
2664 | start = q.series.index(patch) + 1 | |
2666 | else: |
|
2665 | else: | |
2667 | start = q.seriesend(True) |
|
2666 | start = q.seriesend(True) | |
2668 |
|
2667 | |||
2669 | if start == len(q.series) and opts.get(b'first'): |
|
2668 | if start == len(q.series) and opts.get(b'first'): | |
2670 | ui.write(_(b"all patches applied\n")) |
|
2669 | ui.write(_(b"all patches applied\n")) | |
2671 | return 1 |
|
2670 | return 1 | |
2672 |
|
2671 | |||
2673 | if opts.get(b'first'): |
|
2672 | if opts.get(b'first'): | |
2674 | length = 1 |
|
2673 | length = 1 | |
2675 | else: |
|
2674 | else: | |
2676 | length = None |
|
2675 | length = None | |
2677 | q.qseries( |
|
2676 | q.qseries( | |
2678 | repo, |
|
2677 | repo, | |
2679 | start=start, |
|
2678 | start=start, | |
2680 | length=length, |
|
2679 | length=length, | |
2681 | status=b'U', |
|
2680 | status=b'U', | |
2682 | summary=opts.get(b'summary'), |
|
2681 | summary=opts.get(b'summary'), | |
2683 | ) |
|
2682 | ) | |
2684 |
|
2683 | |||
2685 |
|
2684 | |||
2686 | @command( |
|
2685 | @command( | |
2687 | b"qimport", |
|
2686 | b"qimport", | |
2688 | [ |
|
2687 | [ | |
2689 | (b'e', b'existing', None, _(b'import file in patch directory')), |
|
2688 | (b'e', b'existing', None, _(b'import file in patch directory')), | |
2690 | (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')), |
|
2689 | (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')), | |
2691 | (b'f', b'force', None, _(b'overwrite existing files')), |
|
2690 | (b'f', b'force', None, _(b'overwrite existing files')), | |
2692 | ( |
|
2691 | ( | |
2693 | b'r', |
|
2692 | b'r', | |
2694 | b'rev', |
|
2693 | b'rev', | |
2695 | [], |
|
2694 | [], | |
2696 | _(b'place existing revisions under mq control'), |
|
2695 | _(b'place existing revisions under mq control'), | |
2697 | _(b'REV'), |
|
2696 | _(b'REV'), | |
2698 | ), |
|
2697 | ), | |
2699 | (b'g', b'git', None, _(b'use git extended diff format')), |
|
2698 | (b'g', b'git', None, _(b'use git extended diff format')), | |
2700 | (b'P', b'push', None, _(b'qpush after importing')), |
|
2699 | (b'P', b'push', None, _(b'qpush after importing')), | |
2701 | ], |
|
2700 | ], | |
2702 | _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'), |
|
2701 | _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'), | |
2703 | helpcategory=command.CATEGORY_IMPORT_EXPORT, |
|
2702 | helpcategory=command.CATEGORY_IMPORT_EXPORT, | |
2704 | ) |
|
2703 | ) | |
2705 | def qimport(ui, repo, *filename, **opts): |
|
2704 | def qimport(ui, repo, *filename, **opts): | |
2706 | """import a patch or existing changeset |
|
2705 | """import a patch or existing changeset | |
2707 |
|
2706 | |||
2708 | The patch is inserted into the series after the last applied |
|
2707 | The patch is inserted into the series after the last applied | |
2709 | patch. If no patches have been applied, qimport prepends the patch |
|
2708 | patch. If no patches have been applied, qimport prepends the patch | |
2710 | to the series. |
|
2709 | to the series. | |
2711 |
|
2710 | |||
2712 | The patch will have the same name as its source file unless you |
|
2711 | The patch will have the same name as its source file unless you | |
2713 | give it a new one with -n/--name. |
|
2712 | give it a new one with -n/--name. | |
2714 |
|
2713 | |||
2715 | You can register an existing patch inside the patch directory with |
|
2714 | You can register an existing patch inside the patch directory with | |
2716 | the -e/--existing flag. |
|
2715 | the -e/--existing flag. | |
2717 |
|
2716 | |||
2718 | With -f/--force, an existing patch of the same name will be |
|
2717 | With -f/--force, an existing patch of the same name will be | |
2719 | overwritten. |
|
2718 | overwritten. | |
2720 |
|
2719 | |||
2721 | An existing changeset may be placed under mq control with -r/--rev |
|
2720 | An existing changeset may be placed under mq control with -r/--rev | |
2722 | (e.g. qimport --rev . -n patch will place the current revision |
|
2721 | (e.g. qimport --rev . -n patch will place the current revision | |
2723 | under mq control). With -g/--git, patches imported with --rev will |
|
2722 | under mq control). With -g/--git, patches imported with --rev will | |
2724 | use the git diff format. See the diffs help topic for information |
|
2723 | use the git diff format. See the diffs help topic for information | |
2725 | on why this is important for preserving rename/copy information |
|
2724 | on why this is important for preserving rename/copy information | |
2726 | and permission changes. Use :hg:`qfinish` to remove changesets |
|
2725 | and permission changes. Use :hg:`qfinish` to remove changesets | |
2727 | from mq control. |
|
2726 | from mq control. | |
2728 |
|
2727 | |||
2729 | To import a patch from standard input, pass - as the patch file. |
|
2728 | To import a patch from standard input, pass - as the patch file. | |
2730 | When importing from standard input, a patch name must be specified |
|
2729 | When importing from standard input, a patch name must be specified | |
2731 | using the --name flag. |
|
2730 | using the --name flag. | |
2732 |
|
2731 | |||
2733 | To import an existing patch while renaming it:: |
|
2732 | To import an existing patch while renaming it:: | |
2734 |
|
2733 | |||
2735 | hg qimport -e existing-patch -n new-name |
|
2734 | hg qimport -e existing-patch -n new-name | |
2736 |
|
2735 | |||
2737 | Returns 0 if import succeeded. |
|
2736 | Returns 0 if import succeeded. | |
2738 | """ |
|
2737 | """ | |
2739 | opts = pycompat.byteskwargs(opts) |
|
2738 | opts = pycompat.byteskwargs(opts) | |
2740 | with repo.lock(): # cause this may move phase |
|
2739 | with repo.lock(): # cause this may move phase | |
2741 | q = repo.mq |
|
2740 | q = repo.mq | |
2742 | try: |
|
2741 | try: | |
2743 | imported = q.qimport( |
|
2742 | imported = q.qimport( | |
2744 | repo, |
|
2743 | repo, | |
2745 | filename, |
|
2744 | filename, | |
2746 | patchname=opts.get(b'name'), |
|
2745 | patchname=opts.get(b'name'), | |
2747 | existing=opts.get(b'existing'), |
|
2746 | existing=opts.get(b'existing'), | |
2748 | force=opts.get(b'force'), |
|
2747 | force=opts.get(b'force'), | |
2749 | rev=opts.get(b'rev'), |
|
2748 | rev=opts.get(b'rev'), | |
2750 | git=opts.get(b'git'), |
|
2749 | git=opts.get(b'git'), | |
2751 | ) |
|
2750 | ) | |
2752 | finally: |
|
2751 | finally: | |
2753 | q.savedirty() |
|
2752 | q.savedirty() | |
2754 |
|
2753 | |||
2755 | if imported and opts.get(b'push') and not opts.get(b'rev'): |
|
2754 | if imported and opts.get(b'push') and not opts.get(b'rev'): | |
2756 | return q.push(repo, imported[-1]) |
|
2755 | return q.push(repo, imported[-1]) | |
2757 | return 0 |
|
2756 | return 0 | |
2758 |
|
2757 | |||
2759 |
|
2758 | |||
2760 | def qinit(ui, repo, create): |
|
2759 | def qinit(ui, repo, create): | |
2761 | """initialize a new queue repository |
|
2760 | """initialize a new queue repository | |
2762 |
|
2761 | |||
2763 | This command also creates a series file for ordering patches, and |
|
2762 | This command also creates a series file for ordering patches, and | |
2764 | an mq-specific .hgignore file in the queue repository, to exclude |
|
2763 | an mq-specific .hgignore file in the queue repository, to exclude | |
2765 | the status and guards files (these contain mostly transient state). |
|
2764 | the status and guards files (these contain mostly transient state). | |
2766 |
|
2765 | |||
2767 | Returns 0 if initialization succeeded.""" |
|
2766 | Returns 0 if initialization succeeded.""" | |
2768 | q = repo.mq |
|
2767 | q = repo.mq | |
2769 | r = q.init(repo, create) |
|
2768 | r = q.init(repo, create) | |
2770 | q.savedirty() |
|
2769 | q.savedirty() | |
2771 | if r: |
|
2770 | if r: | |
2772 | if not os.path.exists(r.wjoin(b'.hgignore')): |
|
2771 | if not os.path.exists(r.wjoin(b'.hgignore')): | |
2773 | fp = r.wvfs(b'.hgignore', b'w') |
|
2772 | fp = r.wvfs(b'.hgignore', b'w') | |
2774 | fp.write(b'^\\.hg\n') |
|
2773 | fp.write(b'^\\.hg\n') | |
2775 | fp.write(b'^\\.mq\n') |
|
2774 | fp.write(b'^\\.mq\n') | |
2776 | fp.write(b'syntax: glob\n') |
|
2775 | fp.write(b'syntax: glob\n') | |
2777 | fp.write(b'status\n') |
|
2776 | fp.write(b'status\n') | |
2778 | fp.write(b'guards\n') |
|
2777 | fp.write(b'guards\n') | |
2779 | fp.close() |
|
2778 | fp.close() | |
2780 | if not os.path.exists(r.wjoin(b'series')): |
|
2779 | if not os.path.exists(r.wjoin(b'series')): | |
2781 | r.wvfs(b'series', b'w').close() |
|
2780 | r.wvfs(b'series', b'w').close() | |
2782 | r[None].add([b'.hgignore', b'series']) |
|
2781 | r[None].add([b'.hgignore', b'series']) | |
2783 | commands.add(ui, r) |
|
2782 | commands.add(ui, r) | |
2784 | return 0 |
|
2783 | return 0 | |
2785 |
|
2784 | |||
2786 |
|
2785 | |||
2787 | @command( |
|
2786 | @command( | |
2788 | b"qinit", |
|
2787 | b"qinit", | |
2789 | [(b'c', b'create-repo', None, _(b'create queue repository'))], |
|
2788 | [(b'c', b'create-repo', None, _(b'create queue repository'))], | |
2790 | _(b'hg qinit [-c]'), |
|
2789 | _(b'hg qinit [-c]'), | |
2791 | helpcategory=command.CATEGORY_REPO_CREATION, |
|
2790 | helpcategory=command.CATEGORY_REPO_CREATION, | |
2792 | helpbasic=True, |
|
2791 | helpbasic=True, | |
2793 | ) |
|
2792 | ) | |
2794 | def init(ui, repo, **opts): |
|
2793 | def init(ui, repo, **opts): | |
2795 | """init a new queue repository (DEPRECATED) |
|
2794 | """init a new queue repository (DEPRECATED) | |
2796 |
|
2795 | |||
2797 | The queue repository is unversioned by default. If |
|
2796 | The queue repository is unversioned by default. If | |
2798 | -c/--create-repo is specified, qinit will create a separate nested |
|
2797 | -c/--create-repo is specified, qinit will create a separate nested | |
2799 | repository for patches (qinit -c may also be run later to convert |
|
2798 | repository for patches (qinit -c may also be run later to convert | |
2800 | an unversioned patch repository into a versioned one). You can use |
|
2799 | an unversioned patch repository into a versioned one). You can use | |
2801 | qcommit to commit changes to this queue repository. |
|
2800 | qcommit to commit changes to this queue repository. | |
2802 |
|
2801 | |||
2803 | This command is deprecated. Without -c, it's implied by other relevant |
|
2802 | This command is deprecated. Without -c, it's implied by other relevant | |
2804 | commands. With -c, use :hg:`init --mq` instead.""" |
|
2803 | commands. With -c, use :hg:`init --mq` instead.""" | |
2805 | return qinit(ui, repo, create=opts.get('create_repo')) |
|
2804 | return qinit(ui, repo, create=opts.get('create_repo')) | |
2806 |
|
2805 | |||
2807 |
|
2806 | |||
2808 | @command( |
|
2807 | @command( | |
2809 | b"qclone", |
|
2808 | b"qclone", | |
2810 | [ |
|
2809 | [ | |
2811 | (b'', b'pull', None, _(b'use pull protocol to copy metadata')), |
|
2810 | (b'', b'pull', None, _(b'use pull protocol to copy metadata')), | |
2812 | ( |
|
2811 | ( | |
2813 | b'U', |
|
2812 | b'U', | |
2814 | b'noupdate', |
|
2813 | b'noupdate', | |
2815 | None, |
|
2814 | None, | |
2816 | _(b'do not update the new working directories'), |
|
2815 | _(b'do not update the new working directories'), | |
2817 | ), |
|
2816 | ), | |
2818 | ( |
|
2817 | ( | |
2819 | b'', |
|
2818 | b'', | |
2820 | b'uncompressed', |
|
2819 | b'uncompressed', | |
2821 | None, |
|
2820 | None, | |
2822 | _(b'use uncompressed transfer (fast over LAN)'), |
|
2821 | _(b'use uncompressed transfer (fast over LAN)'), | |
2823 | ), |
|
2822 | ), | |
2824 | ( |
|
2823 | ( | |
2825 | b'p', |
|
2824 | b'p', | |
2826 | b'patches', |
|
2825 | b'patches', | |
2827 | b'', |
|
2826 | b'', | |
2828 | _(b'location of source patch repository'), |
|
2827 | _(b'location of source patch repository'), | |
2829 | _(b'REPO'), |
|
2828 | _(b'REPO'), | |
2830 | ), |
|
2829 | ), | |
2831 | ] |
|
2830 | ] | |
2832 | + cmdutil.remoteopts, |
|
2831 | + cmdutil.remoteopts, | |
2833 | _(b'hg qclone [OPTION]... SOURCE [DEST]'), |
|
2832 | _(b'hg qclone [OPTION]... SOURCE [DEST]'), | |
2834 | helpcategory=command.CATEGORY_REPO_CREATION, |
|
2833 | helpcategory=command.CATEGORY_REPO_CREATION, | |
2835 | norepo=True, |
|
2834 | norepo=True, | |
2836 | ) |
|
2835 | ) | |
2837 | def clone(ui, source, dest=None, **opts): |
|
2836 | def clone(ui, source, dest=None, **opts): | |
2838 | """clone main and patch repository at same time |
|
2837 | """clone main and patch repository at same time | |
2839 |
|
2838 | |||
2840 | If source is local, destination will have no patches applied. If |
|
2839 | If source is local, destination will have no patches applied. If | |
2841 | source is remote, this command can not check if patches are |
|
2840 | source is remote, this command can not check if patches are | |
2842 | applied in source, so cannot guarantee that patches are not |
|
2841 | applied in source, so cannot guarantee that patches are not | |
2843 | applied in destination. If you clone remote repository, be sure |
|
2842 | applied in destination. If you clone remote repository, be sure | |
2844 | before that it has no patches applied. |
|
2843 | before that it has no patches applied. | |
2845 |
|
2844 | |||
2846 | Source patch repository is looked for in <src>/.hg/patches by |
|
2845 | Source patch repository is looked for in <src>/.hg/patches by | |
2847 | default. Use -p <url> to change. |
|
2846 | default. Use -p <url> to change. | |
2848 |
|
2847 | |||
2849 | The patch directory must be a nested Mercurial repository, as |
|
2848 | The patch directory must be a nested Mercurial repository, as | |
2850 | would be created by :hg:`init --mq`. |
|
2849 | would be created by :hg:`init --mq`. | |
2851 |
|
2850 | |||
2852 | Return 0 on success. |
|
2851 | Return 0 on success. | |
2853 | """ |
|
2852 | """ | |
2854 | opts = pycompat.byteskwargs(opts) |
|
2853 | opts = pycompat.byteskwargs(opts) | |
2855 |
|
2854 | |||
2856 | def patchdir(repo): |
|
2855 | def patchdir(repo): | |
2857 | """compute a patch repo url from a repo object""" |
|
2856 | """compute a patch repo url from a repo object""" | |
2858 | url = repo.url() |
|
2857 | url = repo.url() | |
2859 | if url.endswith(b'/'): |
|
2858 | if url.endswith(b'/'): | |
2860 | url = url[:-1] |
|
2859 | url = url[:-1] | |
2861 | return url + b'/.hg/patches' |
|
2860 | return url + b'/.hg/patches' | |
2862 |
|
2861 | |||
2863 | # main repo (destination and sources) |
|
2862 | # main repo (destination and sources) | |
2864 | if dest is None: |
|
2863 | if dest is None: | |
2865 | dest = hg.defaultdest(source) |
|
2864 | dest = hg.defaultdest(source) | |
2866 | __, source_path, __ = urlutil.get_clone_path(ui, source) |
|
2865 | __, source_path, __ = urlutil.get_clone_path(ui, source) | |
2867 | sr = hg.peer(ui, opts, source_path) |
|
2866 | sr = hg.peer(ui, opts, source_path) | |
2868 |
|
2867 | |||
2869 | # patches repo (source only) |
|
2868 | # patches repo (source only) | |
2870 | if opts.get(b'patches'): |
|
2869 | if opts.get(b'patches'): | |
2871 | __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches')) |
|
2870 | __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches')) | |
2872 | else: |
|
2871 | else: | |
2873 | patchespath = patchdir(sr) |
|
2872 | patchespath = patchdir(sr) | |
2874 | try: |
|
2873 | try: | |
2875 | hg.peer(ui, opts, patchespath) |
|
2874 | hg.peer(ui, opts, patchespath) | |
2876 | except error.RepoError: |
|
2875 | except error.RepoError: | |
2877 | raise error.Abort( |
|
2876 | raise error.Abort( | |
2878 | _(b'versioned patch repository not found (see init --mq)') |
|
2877 | _(b'versioned patch repository not found (see init --mq)') | |
2879 | ) |
|
2878 | ) | |
2880 | qbase, destrev = None, None |
|
2879 | qbase, destrev = None, None | |
2881 | if sr.local(): |
|
2880 | if sr.local(): | |
2882 | repo = sr.local() |
|
2881 | repo = sr.local() | |
2883 | if repo.mq.applied and repo[qbase].phase() != phases.secret: |
|
2882 | if repo.mq.applied and repo[qbase].phase() != phases.secret: | |
2884 | qbase = repo.mq.applied[0].node |
|
2883 | qbase = repo.mq.applied[0].node | |
2885 | if not hg.islocal(dest): |
|
2884 | if not hg.islocal(dest): | |
2886 | heads = set(repo.heads()) |
|
2885 | heads = set(repo.heads()) | |
2887 | destrev = list(heads.difference(repo.heads(qbase))) |
|
2886 | destrev = list(heads.difference(repo.heads(qbase))) | |
2888 | destrev.append(repo.changelog.parents(qbase)[0]) |
|
2887 | destrev.append(repo.changelog.parents(qbase)[0]) | |
2889 | elif sr.capable(b'lookup'): |
|
2888 | elif sr.capable(b'lookup'): | |
2890 | try: |
|
2889 | try: | |
2891 | qbase = sr.lookup(b'qbase') |
|
2890 | qbase = sr.lookup(b'qbase') | |
2892 | except error.RepoError: |
|
2891 | except error.RepoError: | |
2893 | pass |
|
2892 | pass | |
2894 |
|
2893 | |||
2895 | ui.note(_(b'cloning main repository\n')) |
|
2894 | ui.note(_(b'cloning main repository\n')) | |
2896 | sr, dr = hg.clone( |
|
2895 | sr, dr = hg.clone( | |
2897 | ui, |
|
2896 | ui, | |
2898 | opts, |
|
2897 | opts, | |
2899 | sr.url(), |
|
2898 | sr.url(), | |
2900 | dest, |
|
2899 | dest, | |
2901 | pull=opts.get(b'pull'), |
|
2900 | pull=opts.get(b'pull'), | |
2902 | revs=destrev, |
|
2901 | revs=destrev, | |
2903 | update=False, |
|
2902 | update=False, | |
2904 | stream=opts.get(b'uncompressed'), |
|
2903 | stream=opts.get(b'uncompressed'), | |
2905 | ) |
|
2904 | ) | |
2906 |
|
2905 | |||
2907 | ui.note(_(b'cloning patch repository\n')) |
|
2906 | ui.note(_(b'cloning patch repository\n')) | |
2908 | hg.clone( |
|
2907 | hg.clone( | |
2909 | ui, |
|
2908 | ui, | |
2910 | opts, |
|
2909 | opts, | |
2911 | opts.get(b'patches') or patchdir(sr), |
|
2910 | opts.get(b'patches') or patchdir(sr), | |
2912 | patchdir(dr), |
|
2911 | patchdir(dr), | |
2913 | pull=opts.get(b'pull'), |
|
2912 | pull=opts.get(b'pull'), | |
2914 | update=not opts.get(b'noupdate'), |
|
2913 | update=not opts.get(b'noupdate'), | |
2915 | stream=opts.get(b'uncompressed'), |
|
2914 | stream=opts.get(b'uncompressed'), | |
2916 | ) |
|
2915 | ) | |
2917 |
|
2916 | |||
2918 | if dr.local(): |
|
2917 | if dr.local(): | |
2919 | repo = dr.local() |
|
2918 | repo = dr.local() | |
2920 | if qbase: |
|
2919 | if qbase: | |
2921 | ui.note( |
|
2920 | ui.note( | |
2922 | _( |
|
2921 | _( | |
2923 | b'stripping applied patches from destination ' |
|
2922 | b'stripping applied patches from destination ' | |
2924 | b'repository\n' |
|
2923 | b'repository\n' | |
2925 | ) |
|
2924 | ) | |
2926 | ) |
|
2925 | ) | |
2927 | strip(ui, repo, [qbase], update=False, backup=None) |
|
2926 | strip(ui, repo, [qbase], update=False, backup=None) | |
2928 | if not opts.get(b'noupdate'): |
|
2927 | if not opts.get(b'noupdate'): | |
2929 | ui.note(_(b'updating destination repository\n')) |
|
2928 | ui.note(_(b'updating destination repository\n')) | |
2930 | hg.update(repo, repo.changelog.tip()) |
|
2929 | hg.update(repo, repo.changelog.tip()) | |
2931 |
|
2930 | |||
2932 |
|
2931 | |||
2933 | @command( |
|
2932 | @command( | |
2934 | b"qcommit|qci", |
|
2933 | b"qcommit|qci", | |
2935 | commands.table[b"commit|ci"][1], |
|
2934 | commands.table[b"commit|ci"][1], | |
2936 | _(b'hg qcommit [OPTION]... [FILE]...'), |
|
2935 | _(b'hg qcommit [OPTION]... [FILE]...'), | |
2937 | helpcategory=command.CATEGORY_COMMITTING, |
|
2936 | helpcategory=command.CATEGORY_COMMITTING, | |
2938 | inferrepo=True, |
|
2937 | inferrepo=True, | |
2939 | ) |
|
2938 | ) | |
2940 | def commit(ui, repo, *pats, **opts): |
|
2939 | def commit(ui, repo, *pats, **opts): | |
2941 | """commit changes in the queue repository (DEPRECATED) |
|
2940 | """commit changes in the queue repository (DEPRECATED) | |
2942 |
|
2941 | |||
2943 | This command is deprecated; use :hg:`commit --mq` instead.""" |
|
2942 | This command is deprecated; use :hg:`commit --mq` instead.""" | |
2944 | q = repo.mq |
|
2943 | q = repo.mq | |
2945 | r = q.qrepo() |
|
2944 | r = q.qrepo() | |
2946 | if not r: |
|
2945 | if not r: | |
2947 | raise error.Abort(b'no queue repository') |
|
2946 | raise error.Abort(b'no queue repository') | |
2948 | commands.commit(r.ui, r, *pats, **opts) |
|
2947 | commands.commit(r.ui, r, *pats, **opts) | |
2949 |
|
2948 | |||
2950 |
|
2949 | |||
2951 | @command( |
|
2950 | @command( | |
2952 | b"qseries", |
|
2951 | b"qseries", | |
2953 | [ |
|
2952 | [ | |
2954 | (b'm', b'missing', None, _(b'print patches not in series')), |
|
2953 | (b'm', b'missing', None, _(b'print patches not in series')), | |
2955 | ] |
|
2954 | ] | |
2956 | + seriesopts, |
|
2955 | + seriesopts, | |
2957 | _(b'hg qseries [-ms]'), |
|
2956 | _(b'hg qseries [-ms]'), | |
2958 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2957 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
2959 | ) |
|
2958 | ) | |
2960 | def series(ui, repo, **opts): |
|
2959 | def series(ui, repo, **opts): | |
2961 | """print the entire series file |
|
2960 | """print the entire series file | |
2962 |
|
2961 | |||
2963 | Returns 0 on success.""" |
|
2962 | Returns 0 on success.""" | |
2964 | repo.mq.qseries( |
|
2963 | repo.mq.qseries( | |
2965 | repo, missing=opts.get('missing'), summary=opts.get('summary') |
|
2964 | repo, missing=opts.get('missing'), summary=opts.get('summary') | |
2966 | ) |
|
2965 | ) | |
2967 | return 0 |
|
2966 | return 0 | |
2968 |
|
2967 | |||
2969 |
|
2968 | |||
2970 | @command( |
|
2969 | @command( | |
2971 | b"qtop", |
|
2970 | b"qtop", | |
2972 | seriesopts, |
|
2971 | seriesopts, | |
2973 | _(b'hg qtop [-s]'), |
|
2972 | _(b'hg qtop [-s]'), | |
2974 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2973 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
2975 | ) |
|
2974 | ) | |
2976 | def top(ui, repo, **opts): |
|
2975 | def top(ui, repo, **opts): | |
2977 | """print the name of the current patch |
|
2976 | """print the name of the current patch | |
2978 |
|
2977 | |||
2979 | Returns 0 on success.""" |
|
2978 | Returns 0 on success.""" | |
2980 | q = repo.mq |
|
2979 | q = repo.mq | |
2981 | if q.applied: |
|
2980 | if q.applied: | |
2982 | t = q.seriesend(True) |
|
2981 | t = q.seriesend(True) | |
2983 | else: |
|
2982 | else: | |
2984 | t = 0 |
|
2983 | t = 0 | |
2985 |
|
2984 | |||
2986 | if t: |
|
2985 | if t: | |
2987 | q.qseries( |
|
2986 | q.qseries( | |
2988 | repo, |
|
2987 | repo, | |
2989 | start=t - 1, |
|
2988 | start=t - 1, | |
2990 | length=1, |
|
2989 | length=1, | |
2991 | status=b'A', |
|
2990 | status=b'A', | |
2992 | summary=opts.get('summary'), |
|
2991 | summary=opts.get('summary'), | |
2993 | ) |
|
2992 | ) | |
2994 | else: |
|
2993 | else: | |
2995 | ui.write(_(b"no patches applied\n")) |
|
2994 | ui.write(_(b"no patches applied\n")) | |
2996 | return 1 |
|
2995 | return 1 | |
2997 |
|
2996 | |||
2998 |
|
2997 | |||
2999 | @command( |
|
2998 | @command( | |
3000 | b"qnext", |
|
2999 | b"qnext", | |
3001 | seriesopts, |
|
3000 | seriesopts, | |
3002 | _(b'hg qnext [-s]'), |
|
3001 | _(b'hg qnext [-s]'), | |
3003 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3002 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3004 | ) |
|
3003 | ) | |
3005 | def next(ui, repo, **opts): |
|
3004 | def next(ui, repo, **opts): | |
3006 | """print the name of the next pushable patch |
|
3005 | """print the name of the next pushable patch | |
3007 |
|
3006 | |||
3008 | Returns 0 on success.""" |
|
3007 | Returns 0 on success.""" | |
3009 | q = repo.mq |
|
3008 | q = repo.mq | |
3010 | end = q.seriesend() |
|
3009 | end = q.seriesend() | |
3011 | if end == len(q.series): |
|
3010 | if end == len(q.series): | |
3012 | ui.write(_(b"all patches applied\n")) |
|
3011 | ui.write(_(b"all patches applied\n")) | |
3013 | return 1 |
|
3012 | return 1 | |
3014 | q.qseries(repo, start=end, length=1, summary=opts.get('summary')) |
|
3013 | q.qseries(repo, start=end, length=1, summary=opts.get('summary')) | |
3015 |
|
3014 | |||
3016 |
|
3015 | |||
3017 | @command( |
|
3016 | @command( | |
3018 | b"qprev", |
|
3017 | b"qprev", | |
3019 | seriesopts, |
|
3018 | seriesopts, | |
3020 | _(b'hg qprev [-s]'), |
|
3019 | _(b'hg qprev [-s]'), | |
3021 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3020 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3022 | ) |
|
3021 | ) | |
3023 | def prev(ui, repo, **opts): |
|
3022 | def prev(ui, repo, **opts): | |
3024 | """print the name of the preceding applied patch |
|
3023 | """print the name of the preceding applied patch | |
3025 |
|
3024 | |||
3026 | Returns 0 on success.""" |
|
3025 | Returns 0 on success.""" | |
3027 | q = repo.mq |
|
3026 | q = repo.mq | |
3028 | l = len(q.applied) |
|
3027 | l = len(q.applied) | |
3029 | if l == 1: |
|
3028 | if l == 1: | |
3030 | ui.write(_(b"only one patch applied\n")) |
|
3029 | ui.write(_(b"only one patch applied\n")) | |
3031 | return 1 |
|
3030 | return 1 | |
3032 | if not l: |
|
3031 | if not l: | |
3033 | ui.write(_(b"no patches applied\n")) |
|
3032 | ui.write(_(b"no patches applied\n")) | |
3034 | return 1 |
|
3033 | return 1 | |
3035 | idx = q.series.index(q.applied[-2].name) |
|
3034 | idx = q.series.index(q.applied[-2].name) | |
3036 | q.qseries( |
|
3035 | q.qseries( | |
3037 | repo, start=idx, length=1, status=b'A', summary=opts.get('summary') |
|
3036 | repo, start=idx, length=1, status=b'A', summary=opts.get('summary') | |
3038 | ) |
|
3037 | ) | |
3039 |
|
3038 | |||
3040 |
|
3039 | |||
3041 | def setupheaderopts(ui, opts): |
|
3040 | def setupheaderopts(ui, opts): | |
3042 | if not opts.get(b'user') and opts.get(b'currentuser'): |
|
3041 | if not opts.get(b'user') and opts.get(b'currentuser'): | |
3043 | opts[b'user'] = ui.username() |
|
3042 | opts[b'user'] = ui.username() | |
3044 | if not opts.get(b'date') and opts.get(b'currentdate'): |
|
3043 | if not opts.get(b'date') and opts.get(b'currentdate'): | |
3045 | opts[b'date'] = b"%d %d" % dateutil.makedate() |
|
3044 | opts[b'date'] = b"%d %d" % dateutil.makedate() | |
3046 |
|
3045 | |||
3047 |
|
3046 | |||
3048 | @command( |
|
3047 | @command( | |
3049 | b"qnew", |
|
3048 | b"qnew", | |
3050 | [ |
|
3049 | [ | |
3051 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), |
|
3050 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), | |
3052 | (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')), |
|
3051 | (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')), | |
3053 | (b'g', b'git', None, _(b'use git extended diff format')), |
|
3052 | (b'g', b'git', None, _(b'use git extended diff format')), | |
3054 | (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')), |
|
3053 | (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')), | |
3055 | (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')), |
|
3054 | (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')), | |
3056 | (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')), |
|
3055 | (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')), | |
3057 | (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')), |
|
3056 | (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')), | |
3058 | ] |
|
3057 | ] | |
3059 | + cmdutil.walkopts |
|
3058 | + cmdutil.walkopts | |
3060 | + cmdutil.commitopts, |
|
3059 | + cmdutil.commitopts, | |
3061 | _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'), |
|
3060 | _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'), | |
3062 | helpcategory=command.CATEGORY_COMMITTING, |
|
3061 | helpcategory=command.CATEGORY_COMMITTING, | |
3063 | helpbasic=True, |
|
3062 | helpbasic=True, | |
3064 | inferrepo=True, |
|
3063 | inferrepo=True, | |
3065 | ) |
|
3064 | ) | |
3066 | def new(ui, repo, patch, *args, **opts): |
|
3065 | def new(ui, repo, patch, *args, **opts): | |
3067 | """create a new patch |
|
3066 | """create a new patch | |
3068 |
|
3067 | |||
3069 | qnew creates a new patch on top of the currently-applied patch (if |
|
3068 | qnew creates a new patch on top of the currently-applied patch (if | |
3070 | any). The patch will be initialized with any outstanding changes |
|
3069 | any). The patch will be initialized with any outstanding changes | |
3071 | in the working directory. You may also use -I/--include, |
|
3070 | in the working directory. You may also use -I/--include, | |
3072 | -X/--exclude, and/or a list of files after the patch name to add |
|
3071 | -X/--exclude, and/or a list of files after the patch name to add | |
3073 | only changes to matching files to the new patch, leaving the rest |
|
3072 | only changes to matching files to the new patch, leaving the rest | |
3074 | as uncommitted modifications. |
|
3073 | as uncommitted modifications. | |
3075 |
|
3074 | |||
3076 | -u/--user and -d/--date can be used to set the (given) user and |
|
3075 | -u/--user and -d/--date can be used to set the (given) user and | |
3077 | date, respectively. -U/--currentuser and -D/--currentdate set user |
|
3076 | date, respectively. -U/--currentuser and -D/--currentdate set user | |
3078 | to current user and date to current date. |
|
3077 | to current user and date to current date. | |
3079 |
|
3078 | |||
3080 | -e/--edit, -m/--message or -l/--logfile set the patch header as |
|
3079 | -e/--edit, -m/--message or -l/--logfile set the patch header as | |
3081 | well as the commit message. If none is specified, the header is |
|
3080 | well as the commit message. If none is specified, the header is | |
3082 | empty and the commit message is '[mq]: PATCH'. |
|
3081 | empty and the commit message is '[mq]: PATCH'. | |
3083 |
|
3082 | |||
3084 | Use the -g/--git option to keep the patch in the git extended diff |
|
3083 | Use the -g/--git option to keep the patch in the git extended diff | |
3085 | format. Read the diffs help topic for more information on why this |
|
3084 | format. Read the diffs help topic for more information on why this | |
3086 | is important for preserving permission changes and copy/rename |
|
3085 | is important for preserving permission changes and copy/rename | |
3087 | information. |
|
3086 | information. | |
3088 |
|
3087 | |||
3089 | Returns 0 on successful creation of a new patch. |
|
3088 | Returns 0 on successful creation of a new patch. | |
3090 | """ |
|
3089 | """ | |
3091 | opts = pycompat.byteskwargs(opts) |
|
3090 | opts = pycompat.byteskwargs(opts) | |
3092 | msg = cmdutil.logmessage(ui, opts) |
|
3091 | msg = cmdutil.logmessage(ui, opts) | |
3093 | q = repo.mq |
|
3092 | q = repo.mq | |
3094 | opts[b'msg'] = msg |
|
3093 | opts[b'msg'] = msg | |
3095 | setupheaderopts(ui, opts) |
|
3094 | setupheaderopts(ui, opts) | |
3096 | q.new(repo, patch, *args, **pycompat.strkwargs(opts)) |
|
3095 | q.new(repo, patch, *args, **pycompat.strkwargs(opts)) | |
3097 | q.savedirty() |
|
3096 | q.savedirty() | |
3098 | return 0 |
|
3097 | return 0 | |
3099 |
|
3098 | |||
3100 |
|
3099 | |||
3101 | @command( |
|
3100 | @command( | |
3102 | b"qrefresh", |
|
3101 | b"qrefresh", | |
3103 | [ |
|
3102 | [ | |
3104 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), |
|
3103 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), | |
3105 | (b'g', b'git', None, _(b'use git extended diff format')), |
|
3104 | (b'g', b'git', None, _(b'use git extended diff format')), | |
3106 | ( |
|
3105 | ( | |
3107 | b's', |
|
3106 | b's', | |
3108 | b'short', |
|
3107 | b'short', | |
3109 | None, |
|
3108 | None, | |
3110 | _(b'refresh only files already in the patch and specified files'), |
|
3109 | _(b'refresh only files already in the patch and specified files'), | |
3111 | ), |
|
3110 | ), | |
3112 | ( |
|
3111 | ( | |
3113 | b'U', |
|
3112 | b'U', | |
3114 | b'currentuser', |
|
3113 | b'currentuser', | |
3115 | None, |
|
3114 | None, | |
3116 | _(b'add/update author field in patch with current user'), |
|
3115 | _(b'add/update author field in patch with current user'), | |
3117 | ), |
|
3116 | ), | |
3118 | ( |
|
3117 | ( | |
3119 | b'u', |
|
3118 | b'u', | |
3120 | b'user', |
|
3119 | b'user', | |
3121 | b'', |
|
3120 | b'', | |
3122 | _(b'add/update author field in patch with given user'), |
|
3121 | _(b'add/update author field in patch with given user'), | |
3123 | _(b'USER'), |
|
3122 | _(b'USER'), | |
3124 | ), |
|
3123 | ), | |
3125 | ( |
|
3124 | ( | |
3126 | b'D', |
|
3125 | b'D', | |
3127 | b'currentdate', |
|
3126 | b'currentdate', | |
3128 | None, |
|
3127 | None, | |
3129 | _(b'add/update date field in patch with current date'), |
|
3128 | _(b'add/update date field in patch with current date'), | |
3130 | ), |
|
3129 | ), | |
3131 | ( |
|
3130 | ( | |
3132 | b'd', |
|
3131 | b'd', | |
3133 | b'date', |
|
3132 | b'date', | |
3134 | b'', |
|
3133 | b'', | |
3135 | _(b'add/update date field in patch with given date'), |
|
3134 | _(b'add/update date field in patch with given date'), | |
3136 | _(b'DATE'), |
|
3135 | _(b'DATE'), | |
3137 | ), |
|
3136 | ), | |
3138 | ] |
|
3137 | ] | |
3139 | + cmdutil.walkopts |
|
3138 | + cmdutil.walkopts | |
3140 | + cmdutil.commitopts, |
|
3139 | + cmdutil.commitopts, | |
3141 | _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'), |
|
3140 | _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'), | |
3142 | helpcategory=command.CATEGORY_COMMITTING, |
|
3141 | helpcategory=command.CATEGORY_COMMITTING, | |
3143 | helpbasic=True, |
|
3142 | helpbasic=True, | |
3144 | inferrepo=True, |
|
3143 | inferrepo=True, | |
3145 | ) |
|
3144 | ) | |
3146 | def refresh(ui, repo, *pats, **opts): |
|
3145 | def refresh(ui, repo, *pats, **opts): | |
3147 | """update the current patch |
|
3146 | """update the current patch | |
3148 |
|
3147 | |||
3149 | If any file patterns are provided, the refreshed patch will |
|
3148 | If any file patterns are provided, the refreshed patch will | |
3150 | contain only the modifications that match those patterns; the |
|
3149 | contain only the modifications that match those patterns; the | |
3151 | remaining modifications will remain in the working directory. |
|
3150 | remaining modifications will remain in the working directory. | |
3152 |
|
3151 | |||
3153 | If -s/--short is specified, files currently included in the patch |
|
3152 | If -s/--short is specified, files currently included in the patch | |
3154 | will be refreshed just like matched files and remain in the patch. |
|
3153 | will be refreshed just like matched files and remain in the patch. | |
3155 |
|
3154 | |||
3156 | If -e/--edit is specified, Mercurial will start your configured editor for |
|
3155 | If -e/--edit is specified, Mercurial will start your configured editor for | |
3157 | you to enter a message. In case qrefresh fails, you will find a backup of |
|
3156 | you to enter a message. In case qrefresh fails, you will find a backup of | |
3158 | your message in ``.hg/last-message.txt``. |
|
3157 | your message in ``.hg/last-message.txt``. | |
3159 |
|
3158 | |||
3160 | hg add/remove/copy/rename work as usual, though you might want to |
|
3159 | hg add/remove/copy/rename work as usual, though you might want to | |
3161 | use git-style patches (-g/--git or [diff] git=1) to track copies |
|
3160 | use git-style patches (-g/--git or [diff] git=1) to track copies | |
3162 | and renames. See the diffs help topic for more information on the |
|
3161 | and renames. See the diffs help topic for more information on the | |
3163 | git diff format. |
|
3162 | git diff format. | |
3164 |
|
3163 | |||
3165 | Returns 0 on success. |
|
3164 | Returns 0 on success. | |
3166 | """ |
|
3165 | """ | |
3167 | opts = pycompat.byteskwargs(opts) |
|
3166 | opts = pycompat.byteskwargs(opts) | |
3168 | q = repo.mq |
|
3167 | q = repo.mq | |
3169 | message = cmdutil.logmessage(ui, opts) |
|
3168 | message = cmdutil.logmessage(ui, opts) | |
3170 | setupheaderopts(ui, opts) |
|
3169 | setupheaderopts(ui, opts) | |
3171 | with repo.wlock(): |
|
3170 | with repo.wlock(): | |
3172 | ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts)) |
|
3171 | ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts)) | |
3173 | q.savedirty() |
|
3172 | q.savedirty() | |
3174 | return ret |
|
3173 | return ret | |
3175 |
|
3174 | |||
3176 |
|
3175 | |||
3177 | @command( |
|
3176 | @command( | |
3178 | b"qdiff", |
|
3177 | b"qdiff", | |
3179 | cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts, |
|
3178 | cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts, | |
3180 | _(b'hg qdiff [OPTION]... [FILE]...'), |
|
3179 | _(b'hg qdiff [OPTION]... [FILE]...'), | |
3181 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
3180 | helpcategory=command.CATEGORY_FILE_CONTENTS, | |
3182 | helpbasic=True, |
|
3181 | helpbasic=True, | |
3183 | inferrepo=True, |
|
3182 | inferrepo=True, | |
3184 | ) |
|
3183 | ) | |
3185 | def diff(ui, repo, *pats, **opts): |
|
3184 | def diff(ui, repo, *pats, **opts): | |
3186 | """diff of the current patch and subsequent modifications |
|
3185 | """diff of the current patch and subsequent modifications | |
3187 |
|
3186 | |||
3188 | Shows a diff which includes the current patch as well as any |
|
3187 | Shows a diff which includes the current patch as well as any | |
3189 | changes which have been made in the working directory since the |
|
3188 | changes which have been made in the working directory since the | |
3190 | last refresh (thus showing what the current patch would become |
|
3189 | last refresh (thus showing what the current patch would become | |
3191 | after a qrefresh). |
|
3190 | after a qrefresh). | |
3192 |
|
3191 | |||
3193 | Use :hg:`diff` if you only want to see the changes made since the |
|
3192 | Use :hg:`diff` if you only want to see the changes made since the | |
3194 | last qrefresh, or :hg:`export qtip` if you want to see changes |
|
3193 | last qrefresh, or :hg:`export qtip` if you want to see changes | |
3195 | made by the current patch without including changes made since the |
|
3194 | made by the current patch without including changes made since the | |
3196 | qrefresh. |
|
3195 | qrefresh. | |
3197 |
|
3196 | |||
3198 | Returns 0 on success. |
|
3197 | Returns 0 on success. | |
3199 | """ |
|
3198 | """ | |
3200 | ui.pager(b'qdiff') |
|
3199 | ui.pager(b'qdiff') | |
3201 | repo.mq.diff(repo, pats, pycompat.byteskwargs(opts)) |
|
3200 | repo.mq.diff(repo, pats, pycompat.byteskwargs(opts)) | |
3202 | return 0 |
|
3201 | return 0 | |
3203 |
|
3202 | |||
3204 |
|
3203 | |||
3205 | @command( |
|
3204 | @command( | |
3206 | b'qfold', |
|
3205 | b'qfold', | |
3207 | [ |
|
3206 | [ | |
3208 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), |
|
3207 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), | |
3209 | (b'k', b'keep', None, _(b'keep folded patch files')), |
|
3208 | (b'k', b'keep', None, _(b'keep folded patch files')), | |
3210 | ] |
|
3209 | ] | |
3211 | + cmdutil.commitopts, |
|
3210 | + cmdutil.commitopts, | |
3212 | _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'), |
|
3211 | _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'), | |
3213 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, |
|
3212 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | |
3214 | ) |
|
3213 | ) | |
3215 | def fold(ui, repo, *files, **opts): |
|
3214 | def fold(ui, repo, *files, **opts): | |
3216 | """fold the named patches into the current patch |
|
3215 | """fold the named patches into the current patch | |
3217 |
|
3216 | |||
3218 | Patches must not yet be applied. Each patch will be successively |
|
3217 | Patches must not yet be applied. Each patch will be successively | |
3219 | applied to the current patch in the order given. If all the |
|
3218 | applied to the current patch in the order given. If all the | |
3220 | patches apply successfully, the current patch will be refreshed |
|
3219 | patches apply successfully, the current patch will be refreshed | |
3221 | with the new cumulative patch, and the folded patches will be |
|
3220 | with the new cumulative patch, and the folded patches will be | |
3222 | deleted. With -k/--keep, the folded patch files will not be |
|
3221 | deleted. With -k/--keep, the folded patch files will not be | |
3223 | removed afterwards. |
|
3222 | removed afterwards. | |
3224 |
|
3223 | |||
3225 | The header for each folded patch will be concatenated with the |
|
3224 | The header for each folded patch will be concatenated with the | |
3226 | current patch header, separated by a line of ``* * *``. |
|
3225 | current patch header, separated by a line of ``* * *``. | |
3227 |
|
3226 | |||
3228 | Returns 0 on success.""" |
|
3227 | Returns 0 on success.""" | |
3229 | opts = pycompat.byteskwargs(opts) |
|
3228 | opts = pycompat.byteskwargs(opts) | |
3230 | q = repo.mq |
|
3229 | q = repo.mq | |
3231 | if not files: |
|
3230 | if not files: | |
3232 | raise error.Abort(_(b'qfold requires at least one patch name')) |
|
3231 | raise error.Abort(_(b'qfold requires at least one patch name')) | |
3233 | if not q.checktoppatch(repo)[0]: |
|
3232 | if not q.checktoppatch(repo)[0]: | |
3234 | raise error.Abort(_(b'no patches applied')) |
|
3233 | raise error.Abort(_(b'no patches applied')) | |
3235 | q.checklocalchanges(repo) |
|
3234 | q.checklocalchanges(repo) | |
3236 |
|
3235 | |||
3237 | message = cmdutil.logmessage(ui, opts) |
|
3236 | message = cmdutil.logmessage(ui, opts) | |
3238 |
|
3237 | |||
3239 | parent = q.lookup(b'qtip') |
|
3238 | parent = q.lookup(b'qtip') | |
3240 | patches = [] |
|
3239 | patches = [] | |
3241 | messages = [] |
|
3240 | messages = [] | |
3242 | for f in files: |
|
3241 | for f in files: | |
3243 | p = q.lookup(f) |
|
3242 | p = q.lookup(f) | |
3244 | if p in patches or p == parent: |
|
3243 | if p in patches or p == parent: | |
3245 | ui.warn(_(b'skipping already folded patch %s\n') % p) |
|
3244 | ui.warn(_(b'skipping already folded patch %s\n') % p) | |
3246 | if q.isapplied(p): |
|
3245 | if q.isapplied(p): | |
3247 | raise error.Abort( |
|
3246 | raise error.Abort( | |
3248 | _(b'qfold cannot fold already applied patch %s') % p |
|
3247 | _(b'qfold cannot fold already applied patch %s') % p | |
3249 | ) |
|
3248 | ) | |
3250 | patches.append(p) |
|
3249 | patches.append(p) | |
3251 |
|
3250 | |||
3252 | for p in patches: |
|
3251 | for p in patches: | |
3253 | if not message: |
|
3252 | if not message: | |
3254 | ph = patchheader(q.join(p), q.plainmode) |
|
3253 | ph = patchheader(q.join(p), q.plainmode) | |
3255 | if ph.message: |
|
3254 | if ph.message: | |
3256 | messages.append(ph.message) |
|
3255 | messages.append(ph.message) | |
3257 | pf = q.join(p) |
|
3256 | pf = q.join(p) | |
3258 | (patchsuccess, files, fuzz) = q.patch(repo, pf) |
|
3257 | (patchsuccess, files, fuzz) = q.patch(repo, pf) | |
3259 | if not patchsuccess: |
|
3258 | if not patchsuccess: | |
3260 | raise error.Abort(_(b'error folding patch %s') % p) |
|
3259 | raise error.Abort(_(b'error folding patch %s') % p) | |
3261 |
|
3260 | |||
3262 | if not message: |
|
3261 | if not message: | |
3263 | ph = patchheader(q.join(parent), q.plainmode) |
|
3262 | ph = patchheader(q.join(parent), q.plainmode) | |
3264 | message = ph.message |
|
3263 | message = ph.message | |
3265 | for msg in messages: |
|
3264 | for msg in messages: | |
3266 | if msg: |
|
3265 | if msg: | |
3267 | if message: |
|
3266 | if message: | |
3268 | message.append(b'* * *') |
|
3267 | message.append(b'* * *') | |
3269 | message.extend(msg) |
|
3268 | message.extend(msg) | |
3270 | message = b'\n'.join(message) |
|
3269 | message = b'\n'.join(message) | |
3271 |
|
3270 | |||
3272 | diffopts = q.patchopts(q.diffopts(), *patches) |
|
3271 | diffopts = q.patchopts(q.diffopts(), *patches) | |
3273 | with repo.wlock(): |
|
3272 | with repo.wlock(): | |
3274 | q.refresh( |
|
3273 | q.refresh( | |
3275 | repo, |
|
3274 | repo, | |
3276 | msg=message, |
|
3275 | msg=message, | |
3277 | git=diffopts.git, |
|
3276 | git=diffopts.git, | |
3278 | edit=opts.get(b'edit'), |
|
3277 | edit=opts.get(b'edit'), | |
3279 | editform=b'mq.qfold', |
|
3278 | editform=b'mq.qfold', | |
3280 | ) |
|
3279 | ) | |
3281 | q.delete(repo, patches, opts) |
|
3280 | q.delete(repo, patches, opts) | |
3282 | q.savedirty() |
|
3281 | q.savedirty() | |
3283 |
|
3282 | |||
3284 |
|
3283 | |||
3285 | @command( |
|
3284 | @command( | |
3286 | b"qgoto", |
|
3285 | b"qgoto", | |
3287 | [ |
|
3286 | [ | |
3288 | ( |
|
3287 | ( | |
3289 | b'', |
|
3288 | b'', | |
3290 | b'keep-changes', |
|
3289 | b'keep-changes', | |
3291 | None, |
|
3290 | None, | |
3292 | _(b'tolerate non-conflicting local changes'), |
|
3291 | _(b'tolerate non-conflicting local changes'), | |
3293 | ), |
|
3292 | ), | |
3294 | (b'f', b'force', None, _(b'overwrite any local changes')), |
|
3293 | (b'f', b'force', None, _(b'overwrite any local changes')), | |
3295 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), |
|
3294 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), | |
3296 | ], |
|
3295 | ], | |
3297 | _(b'hg qgoto [OPTION]... PATCH'), |
|
3296 | _(b'hg qgoto [OPTION]... PATCH'), | |
3298 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3297 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3299 | ) |
|
3298 | ) | |
3300 | def goto(ui, repo, patch, **opts): |
|
3299 | def goto(ui, repo, patch, **opts): | |
3301 | """push or pop patches until named patch is at top of stack |
|
3300 | """push or pop patches until named patch is at top of stack | |
3302 |
|
3301 | |||
3303 | Returns 0 on success.""" |
|
3302 | Returns 0 on success.""" | |
3304 | opts = pycompat.byteskwargs(opts) |
|
3303 | opts = pycompat.byteskwargs(opts) | |
3305 | opts = fixkeepchangesopts(ui, opts) |
|
3304 | opts = fixkeepchangesopts(ui, opts) | |
3306 | q = repo.mq |
|
3305 | q = repo.mq | |
3307 | patch = q.lookup(patch) |
|
3306 | patch = q.lookup(patch) | |
3308 | nobackup = opts.get(b'no_backup') |
|
3307 | nobackup = opts.get(b'no_backup') | |
3309 | keepchanges = opts.get(b'keep_changes') |
|
3308 | keepchanges = opts.get(b'keep_changes') | |
3310 | if q.isapplied(patch): |
|
3309 | if q.isapplied(patch): | |
3311 | ret = q.pop( |
|
3310 | ret = q.pop( | |
3312 | repo, |
|
3311 | repo, | |
3313 | patch, |
|
3312 | patch, | |
3314 | force=opts.get(b'force'), |
|
3313 | force=opts.get(b'force'), | |
3315 | nobackup=nobackup, |
|
3314 | nobackup=nobackup, | |
3316 | keepchanges=keepchanges, |
|
3315 | keepchanges=keepchanges, | |
3317 | ) |
|
3316 | ) | |
3318 | else: |
|
3317 | else: | |
3319 | ret = q.push( |
|
3318 | ret = q.push( | |
3320 | repo, |
|
3319 | repo, | |
3321 | patch, |
|
3320 | patch, | |
3322 | force=opts.get(b'force'), |
|
3321 | force=opts.get(b'force'), | |
3323 | nobackup=nobackup, |
|
3322 | nobackup=nobackup, | |
3324 | keepchanges=keepchanges, |
|
3323 | keepchanges=keepchanges, | |
3325 | ) |
|
3324 | ) | |
3326 | q.savedirty() |
|
3325 | q.savedirty() | |
3327 | return ret |
|
3326 | return ret | |
3328 |
|
3327 | |||
3329 |
|
3328 | |||
3330 | @command( |
|
3329 | @command( | |
3331 | b"qguard", |
|
3330 | b"qguard", | |
3332 | [ |
|
3331 | [ | |
3333 | (b'l', b'list', None, _(b'list all patches and guards')), |
|
3332 | (b'l', b'list', None, _(b'list all patches and guards')), | |
3334 | (b'n', b'none', None, _(b'drop all guards')), |
|
3333 | (b'n', b'none', None, _(b'drop all guards')), | |
3335 | ], |
|
3334 | ], | |
3336 | _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'), |
|
3335 | _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'), | |
3337 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3336 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3338 | ) |
|
3337 | ) | |
3339 | def guard(ui, repo, *args, **opts): |
|
3338 | def guard(ui, repo, *args, **opts): | |
3340 | """set or print guards for a patch |
|
3339 | """set or print guards for a patch | |
3341 |
|
3340 | |||
3342 | Guards control whether a patch can be pushed. A patch with no |
|
3341 | Guards control whether a patch can be pushed. A patch with no | |
3343 | guards is always pushed. A patch with a positive guard ("+foo") is |
|
3342 | guards is always pushed. A patch with a positive guard ("+foo") is | |
3344 | pushed only if the :hg:`qselect` command has activated it. A patch with |
|
3343 | pushed only if the :hg:`qselect` command has activated it. A patch with | |
3345 | a negative guard ("-foo") is never pushed if the :hg:`qselect` command |
|
3344 | a negative guard ("-foo") is never pushed if the :hg:`qselect` command | |
3346 | has activated it. |
|
3345 | has activated it. | |
3347 |
|
3346 | |||
3348 | With no arguments, print the currently active guards. |
|
3347 | With no arguments, print the currently active guards. | |
3349 | With arguments, set guards for the named patch. |
|
3348 | With arguments, set guards for the named patch. | |
3350 |
|
3349 | |||
3351 | .. note:: |
|
3350 | .. note:: | |
3352 |
|
3351 | |||
3353 | Specifying negative guards now requires '--'. |
|
3352 | Specifying negative guards now requires '--'. | |
3354 |
|
3353 | |||
3355 | To set guards on another patch:: |
|
3354 | To set guards on another patch:: | |
3356 |
|
3355 | |||
3357 | hg qguard other.patch -- +2.6.17 -stable |
|
3356 | hg qguard other.patch -- +2.6.17 -stable | |
3358 |
|
3357 | |||
3359 | Returns 0 on success. |
|
3358 | Returns 0 on success. | |
3360 | """ |
|
3359 | """ | |
3361 |
|
3360 | |||
3362 | def status(idx): |
|
3361 | def status(idx): | |
3363 | guards = q.seriesguards[idx] or [b'unguarded'] |
|
3362 | guards = q.seriesguards[idx] or [b'unguarded'] | |
3364 | if q.series[idx] in applied: |
|
3363 | if q.series[idx] in applied: | |
3365 | state = b'applied' |
|
3364 | state = b'applied' | |
3366 | elif q.pushable(idx)[0]: |
|
3365 | elif q.pushable(idx)[0]: | |
3367 | state = b'unapplied' |
|
3366 | state = b'unapplied' | |
3368 | else: |
|
3367 | else: | |
3369 | state = b'guarded' |
|
3368 | state = b'guarded' | |
3370 | label = b'qguard.patch qguard.%s qseries.%s' % (state, state) |
|
3369 | label = b'qguard.patch qguard.%s qseries.%s' % (state, state) | |
3371 | ui.write(b'%s: ' % ui.label(q.series[idx], label)) |
|
3370 | ui.write(b'%s: ' % ui.label(q.series[idx], label)) | |
3372 |
|
3371 | |||
3373 | for i, guard in enumerate(guards): |
|
3372 | for i, guard in enumerate(guards): | |
3374 | if guard.startswith(b'+'): |
|
3373 | if guard.startswith(b'+'): | |
3375 | ui.write(guard, label=b'qguard.positive') |
|
3374 | ui.write(guard, label=b'qguard.positive') | |
3376 | elif guard.startswith(b'-'): |
|
3375 | elif guard.startswith(b'-'): | |
3377 | ui.write(guard, label=b'qguard.negative') |
|
3376 | ui.write(guard, label=b'qguard.negative') | |
3378 | else: |
|
3377 | else: | |
3379 | ui.write(guard, label=b'qguard.unguarded') |
|
3378 | ui.write(guard, label=b'qguard.unguarded') | |
3380 | if i != len(guards) - 1: |
|
3379 | if i != len(guards) - 1: | |
3381 | ui.write(b' ') |
|
3380 | ui.write(b' ') | |
3382 | ui.write(b'\n') |
|
3381 | ui.write(b'\n') | |
3383 |
|
3382 | |||
3384 | q = repo.mq |
|
3383 | q = repo.mq | |
3385 | applied = {p.name for p in q.applied} |
|
3384 | applied = {p.name for p in q.applied} | |
3386 | patch = None |
|
3385 | patch = None | |
3387 | args = list(args) |
|
3386 | args = list(args) | |
3388 | if opts.get('list'): |
|
3387 | if opts.get('list'): | |
3389 | if args or opts.get('none'): |
|
3388 | if args or opts.get('none'): | |
3390 | raise error.Abort( |
|
3389 | raise error.Abort( | |
3391 | _(b'cannot mix -l/--list with options or arguments') |
|
3390 | _(b'cannot mix -l/--list with options or arguments') | |
3392 | ) |
|
3391 | ) | |
3393 | for i in pycompat.xrange(len(q.series)): |
|
3392 | for i in pycompat.xrange(len(q.series)): | |
3394 | status(i) |
|
3393 | status(i) | |
3395 | return |
|
3394 | return | |
3396 | if not args or args[0][0:1] in b'-+': |
|
3395 | if not args or args[0][0:1] in b'-+': | |
3397 | if not q.applied: |
|
3396 | if not q.applied: | |
3398 | raise error.Abort(_(b'no patches applied')) |
|
3397 | raise error.Abort(_(b'no patches applied')) | |
3399 | patch = q.applied[-1].name |
|
3398 | patch = q.applied[-1].name | |
3400 | if patch is None and args[0][0:1] not in b'-+': |
|
3399 | if patch is None and args[0][0:1] not in b'-+': | |
3401 | patch = args.pop(0) |
|
3400 | patch = args.pop(0) | |
3402 | if patch is None: |
|
3401 | if patch is None: | |
3403 | raise error.Abort(_(b'no patch to work with')) |
|
3402 | raise error.Abort(_(b'no patch to work with')) | |
3404 | if args or opts.get('none'): |
|
3403 | if args or opts.get('none'): | |
3405 | idx = q.findseries(patch) |
|
3404 | idx = q.findseries(patch) | |
3406 | if idx is None: |
|
3405 | if idx is None: | |
3407 | raise error.Abort(_(b'no patch named %s') % patch) |
|
3406 | raise error.Abort(_(b'no patch named %s') % patch) | |
3408 | q.setguards(idx, args) |
|
3407 | q.setguards(idx, args) | |
3409 | q.savedirty() |
|
3408 | q.savedirty() | |
3410 | else: |
|
3409 | else: | |
3411 | status(q.series.index(q.lookup(patch))) |
|
3410 | status(q.series.index(q.lookup(patch))) | |
3412 |
|
3411 | |||
3413 |
|
3412 | |||
3414 | @command( |
|
3413 | @command( | |
3415 | b"qheader", |
|
3414 | b"qheader", | |
3416 | [], |
|
3415 | [], | |
3417 | _(b'hg qheader [PATCH]'), |
|
3416 | _(b'hg qheader [PATCH]'), | |
3418 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3417 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3419 | ) |
|
3418 | ) | |
3420 | def header(ui, repo, patch=None): |
|
3419 | def header(ui, repo, patch=None): | |
3421 | """print the header of the topmost or specified patch |
|
3420 | """print the header of the topmost or specified patch | |
3422 |
|
3421 | |||
3423 | Returns 0 on success.""" |
|
3422 | Returns 0 on success.""" | |
3424 | q = repo.mq |
|
3423 | q = repo.mq | |
3425 |
|
3424 | |||
3426 | if patch: |
|
3425 | if patch: | |
3427 | patch = q.lookup(patch) |
|
3426 | patch = q.lookup(patch) | |
3428 | else: |
|
3427 | else: | |
3429 | if not q.applied: |
|
3428 | if not q.applied: | |
3430 | ui.write(_(b'no patches applied\n')) |
|
3429 | ui.write(_(b'no patches applied\n')) | |
3431 | return 1 |
|
3430 | return 1 | |
3432 | patch = q.lookup(b'qtip') |
|
3431 | patch = q.lookup(b'qtip') | |
3433 | ph = patchheader(q.join(patch), q.plainmode) |
|
3432 | ph = patchheader(q.join(patch), q.plainmode) | |
3434 |
|
3433 | |||
3435 | ui.write(b'\n'.join(ph.message) + b'\n') |
|
3434 | ui.write(b'\n'.join(ph.message) + b'\n') | |
3436 |
|
3435 | |||
3437 |
|
3436 | |||
3438 | def lastsavename(path): |
|
3437 | def lastsavename(path): | |
3439 | (directory, base) = os.path.split(path) |
|
3438 | (directory, base) = os.path.split(path) | |
3440 | names = os.listdir(directory) |
|
3439 | names = os.listdir(directory) | |
3441 | namere = re.compile(b"%s.([0-9]+)" % base) |
|
3440 | namere = re.compile(b"%s.([0-9]+)" % base) | |
3442 | maxindex = None |
|
3441 | maxindex = None | |
3443 | maxname = None |
|
3442 | maxname = None | |
3444 | for f in names: |
|
3443 | for f in names: | |
3445 | m = namere.match(f) |
|
3444 | m = namere.match(f) | |
3446 | if m: |
|
3445 | if m: | |
3447 | index = int(m.group(1)) |
|
3446 | index = int(m.group(1)) | |
3448 | if maxindex is None or index > maxindex: |
|
3447 | if maxindex is None or index > maxindex: | |
3449 | maxindex = index |
|
3448 | maxindex = index | |
3450 | maxname = f |
|
3449 | maxname = f | |
3451 | if maxname: |
|
3450 | if maxname: | |
3452 | return (os.path.join(directory, maxname), maxindex) |
|
3451 | return (os.path.join(directory, maxname), maxindex) | |
3453 | return (None, None) |
|
3452 | return (None, None) | |
3454 |
|
3453 | |||
3455 |
|
3454 | |||
3456 | def savename(path): |
|
3455 | def savename(path): | |
3457 | (last, index) = lastsavename(path) |
|
3456 | (last, index) = lastsavename(path) | |
3458 | if last is None: |
|
3457 | if last is None: | |
3459 | index = 0 |
|
3458 | index = 0 | |
3460 | newpath = path + b".%d" % (index + 1) |
|
3459 | newpath = path + b".%d" % (index + 1) | |
3461 | return newpath |
|
3460 | return newpath | |
3462 |
|
3461 | |||
3463 |
|
3462 | |||
3464 | @command( |
|
3463 | @command( | |
3465 | b"qpush", |
|
3464 | b"qpush", | |
3466 | [ |
|
3465 | [ | |
3467 | ( |
|
3466 | ( | |
3468 | b'', |
|
3467 | b'', | |
3469 | b'keep-changes', |
|
3468 | b'keep-changes', | |
3470 | None, |
|
3469 | None, | |
3471 | _(b'tolerate non-conflicting local changes'), |
|
3470 | _(b'tolerate non-conflicting local changes'), | |
3472 | ), |
|
3471 | ), | |
3473 | (b'f', b'force', None, _(b'apply on top of local changes')), |
|
3472 | (b'f', b'force', None, _(b'apply on top of local changes')), | |
3474 | ( |
|
3473 | ( | |
3475 | b'e', |
|
3474 | b'e', | |
3476 | b'exact', |
|
3475 | b'exact', | |
3477 | None, |
|
3476 | None, | |
3478 | _(b'apply the target patch to its recorded parent'), |
|
3477 | _(b'apply the target patch to its recorded parent'), | |
3479 | ), |
|
3478 | ), | |
3480 | (b'l', b'list', None, _(b'list patch name in commit text')), |
|
3479 | (b'l', b'list', None, _(b'list patch name in commit text')), | |
3481 | (b'a', b'all', None, _(b'apply all patches')), |
|
3480 | (b'a', b'all', None, _(b'apply all patches')), | |
3482 | (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')), |
|
3481 | (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')), | |
3483 | (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')), |
|
3482 | (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')), | |
3484 | ( |
|
3483 | ( | |
3485 | b'', |
|
3484 | b'', | |
3486 | b'move', |
|
3485 | b'move', | |
3487 | None, |
|
3486 | None, | |
3488 | _(b'reorder patch series and apply only the patch'), |
|
3487 | _(b'reorder patch series and apply only the patch'), | |
3489 | ), |
|
3488 | ), | |
3490 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), |
|
3489 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), | |
3491 | ], |
|
3490 | ], | |
3492 | _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'), |
|
3491 | _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'), | |
3493 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3492 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3494 | helpbasic=True, |
|
3493 | helpbasic=True, | |
3495 | ) |
|
3494 | ) | |
3496 | def push(ui, repo, patch=None, **opts): |
|
3495 | def push(ui, repo, patch=None, **opts): | |
3497 | """push the next patch onto the stack |
|
3496 | """push the next patch onto the stack | |
3498 |
|
3497 | |||
3499 | By default, abort if the working directory contains uncommitted |
|
3498 | By default, abort if the working directory contains uncommitted | |
3500 | changes. With --keep-changes, abort only if the uncommitted files |
|
3499 | changes. With --keep-changes, abort only if the uncommitted files | |
3501 | overlap with patched files. With -f/--force, backup and patch over |
|
3500 | overlap with patched files. With -f/--force, backup and patch over | |
3502 | uncommitted changes. |
|
3501 | uncommitted changes. | |
3503 |
|
3502 | |||
3504 | Return 0 on success. |
|
3503 | Return 0 on success. | |
3505 | """ |
|
3504 | """ | |
3506 | q = repo.mq |
|
3505 | q = repo.mq | |
3507 | mergeq = None |
|
3506 | mergeq = None | |
3508 |
|
3507 | |||
3509 | opts = pycompat.byteskwargs(opts) |
|
3508 | opts = pycompat.byteskwargs(opts) | |
3510 | opts = fixkeepchangesopts(ui, opts) |
|
3509 | opts = fixkeepchangesopts(ui, opts) | |
3511 | if opts.get(b'merge'): |
|
3510 | if opts.get(b'merge'): | |
3512 | if opts.get(b'name'): |
|
3511 | if opts.get(b'name'): | |
3513 | newpath = repo.vfs.join(opts.get(b'name')) |
|
3512 | newpath = repo.vfs.join(opts.get(b'name')) | |
3514 | else: |
|
3513 | else: | |
3515 | newpath, i = lastsavename(q.path) |
|
3514 | newpath, i = lastsavename(q.path) | |
3516 | if not newpath: |
|
3515 | if not newpath: | |
3517 | ui.warn(_(b"no saved queues found, please use -n\n")) |
|
3516 | ui.warn(_(b"no saved queues found, please use -n\n")) | |
3518 | return 1 |
|
3517 | return 1 | |
3519 | mergeq = queue(ui, repo.baseui, repo.path, newpath) |
|
3518 | mergeq = queue(ui, repo.baseui, repo.path, newpath) | |
3520 | ui.warn(_(b"merging with queue at: %s\n") % mergeq.path) |
|
3519 | ui.warn(_(b"merging with queue at: %s\n") % mergeq.path) | |
3521 | ret = q.push( |
|
3520 | ret = q.push( | |
3522 | repo, |
|
3521 | repo, | |
3523 | patch, |
|
3522 | patch, | |
3524 | force=opts.get(b'force'), |
|
3523 | force=opts.get(b'force'), | |
3525 | list=opts.get(b'list'), |
|
3524 | list=opts.get(b'list'), | |
3526 | mergeq=mergeq, |
|
3525 | mergeq=mergeq, | |
3527 | all=opts.get(b'all'), |
|
3526 | all=opts.get(b'all'), | |
3528 | move=opts.get(b'move'), |
|
3527 | move=opts.get(b'move'), | |
3529 | exact=opts.get(b'exact'), |
|
3528 | exact=opts.get(b'exact'), | |
3530 | nobackup=opts.get(b'no_backup'), |
|
3529 | nobackup=opts.get(b'no_backup'), | |
3531 | keepchanges=opts.get(b'keep_changes'), |
|
3530 | keepchanges=opts.get(b'keep_changes'), | |
3532 | ) |
|
3531 | ) | |
3533 | return ret |
|
3532 | return ret | |
3534 |
|
3533 | |||
3535 |
|
3534 | |||
3536 | @command( |
|
3535 | @command( | |
3537 | b"qpop", |
|
3536 | b"qpop", | |
3538 | [ |
|
3537 | [ | |
3539 | (b'a', b'all', None, _(b'pop all patches')), |
|
3538 | (b'a', b'all', None, _(b'pop all patches')), | |
3540 | (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')), |
|
3539 | (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')), | |
3541 | ( |
|
3540 | ( | |
3542 | b'', |
|
3541 | b'', | |
3543 | b'keep-changes', |
|
3542 | b'keep-changes', | |
3544 | None, |
|
3543 | None, | |
3545 | _(b'tolerate non-conflicting local changes'), |
|
3544 | _(b'tolerate non-conflicting local changes'), | |
3546 | ), |
|
3545 | ), | |
3547 | (b'f', b'force', None, _(b'forget any local changes to patched files')), |
|
3546 | (b'f', b'force', None, _(b'forget any local changes to patched files')), | |
3548 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), |
|
3547 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), | |
3549 | ], |
|
3548 | ], | |
3550 | _(b'hg qpop [-a] [-f] [PATCH | INDEX]'), |
|
3549 | _(b'hg qpop [-a] [-f] [PATCH | INDEX]'), | |
3551 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3550 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3552 | helpbasic=True, |
|
3551 | helpbasic=True, | |
3553 | ) |
|
3552 | ) | |
3554 | def pop(ui, repo, patch=None, **opts): |
|
3553 | def pop(ui, repo, patch=None, **opts): | |
3555 | """pop the current patch off the stack |
|
3554 | """pop the current patch off the stack | |
3556 |
|
3555 | |||
3557 | Without argument, pops off the top of the patch stack. If given a |
|
3556 | Without argument, pops off the top of the patch stack. If given a | |
3558 | patch name, keeps popping off patches until the named patch is at |
|
3557 | patch name, keeps popping off patches until the named patch is at | |
3559 | the top of the stack. |
|
3558 | the top of the stack. | |
3560 |
|
3559 | |||
3561 | By default, abort if the working directory contains uncommitted |
|
3560 | By default, abort if the working directory contains uncommitted | |
3562 | changes. With --keep-changes, abort only if the uncommitted files |
|
3561 | changes. With --keep-changes, abort only if the uncommitted files | |
3563 | overlap with patched files. With -f/--force, backup and discard |
|
3562 | overlap with patched files. With -f/--force, backup and discard | |
3564 | changes made to such files. |
|
3563 | changes made to such files. | |
3565 |
|
3564 | |||
3566 | Return 0 on success. |
|
3565 | Return 0 on success. | |
3567 | """ |
|
3566 | """ | |
3568 | opts = pycompat.byteskwargs(opts) |
|
3567 | opts = pycompat.byteskwargs(opts) | |
3569 | opts = fixkeepchangesopts(ui, opts) |
|
3568 | opts = fixkeepchangesopts(ui, opts) | |
3570 | localupdate = True |
|
3569 | localupdate = True | |
3571 | if opts.get(b'name'): |
|
3570 | if opts.get(b'name'): | |
3572 | q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name'))) |
|
3571 | q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name'))) | |
3573 | ui.warn(_(b'using patch queue: %s\n') % q.path) |
|
3572 | ui.warn(_(b'using patch queue: %s\n') % q.path) | |
3574 | localupdate = False |
|
3573 | localupdate = False | |
3575 | else: |
|
3574 | else: | |
3576 | q = repo.mq |
|
3575 | q = repo.mq | |
3577 | ret = q.pop( |
|
3576 | ret = q.pop( | |
3578 | repo, |
|
3577 | repo, | |
3579 | patch, |
|
3578 | patch, | |
3580 | force=opts.get(b'force'), |
|
3579 | force=opts.get(b'force'), | |
3581 | update=localupdate, |
|
3580 | update=localupdate, | |
3582 | all=opts.get(b'all'), |
|
3581 | all=opts.get(b'all'), | |
3583 | nobackup=opts.get(b'no_backup'), |
|
3582 | nobackup=opts.get(b'no_backup'), | |
3584 | keepchanges=opts.get(b'keep_changes'), |
|
3583 | keepchanges=opts.get(b'keep_changes'), | |
3585 | ) |
|
3584 | ) | |
3586 | q.savedirty() |
|
3585 | q.savedirty() | |
3587 | return ret |
|
3586 | return ret | |
3588 |
|
3587 | |||
3589 |
|
3588 | |||
3590 | @command( |
|
3589 | @command( | |
3591 | b"qrename|qmv", |
|
3590 | b"qrename|qmv", | |
3592 | [], |
|
3591 | [], | |
3593 | _(b'hg qrename PATCH1 [PATCH2]'), |
|
3592 | _(b'hg qrename PATCH1 [PATCH2]'), | |
3594 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3593 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3595 | ) |
|
3594 | ) | |
3596 | def rename(ui, repo, patch, name=None, **opts): |
|
3595 | def rename(ui, repo, patch, name=None, **opts): | |
3597 | """rename a patch |
|
3596 | """rename a patch | |
3598 |
|
3597 | |||
3599 | With one argument, renames the current patch to PATCH1. |
|
3598 | With one argument, renames the current patch to PATCH1. | |
3600 | With two arguments, renames PATCH1 to PATCH2. |
|
3599 | With two arguments, renames PATCH1 to PATCH2. | |
3601 |
|
3600 | |||
3602 | Returns 0 on success.""" |
|
3601 | Returns 0 on success.""" | |
3603 | q = repo.mq |
|
3602 | q = repo.mq | |
3604 | if not name: |
|
3603 | if not name: | |
3605 | name = patch |
|
3604 | name = patch | |
3606 | patch = None |
|
3605 | patch = None | |
3607 |
|
3606 | |||
3608 | if patch: |
|
3607 | if patch: | |
3609 | patch = q.lookup(patch) |
|
3608 | patch = q.lookup(patch) | |
3610 | else: |
|
3609 | else: | |
3611 | if not q.applied: |
|
3610 | if not q.applied: | |
3612 | ui.write(_(b'no patches applied\n')) |
|
3611 | ui.write(_(b'no patches applied\n')) | |
3613 | return |
|
3612 | return | |
3614 | patch = q.lookup(b'qtip') |
|
3613 | patch = q.lookup(b'qtip') | |
3615 | absdest = q.join(name) |
|
3614 | absdest = q.join(name) | |
3616 | if os.path.isdir(absdest): |
|
3615 | if os.path.isdir(absdest): | |
3617 | name = normname(os.path.join(name, os.path.basename(patch))) |
|
3616 | name = normname(os.path.join(name, os.path.basename(patch))) | |
3618 | absdest = q.join(name) |
|
3617 | absdest = q.join(name) | |
3619 | q.checkpatchname(name) |
|
3618 | q.checkpatchname(name) | |
3620 |
|
3619 | |||
3621 | ui.note(_(b'renaming %s to %s\n') % (patch, name)) |
|
3620 | ui.note(_(b'renaming %s to %s\n') % (patch, name)) | |
3622 | i = q.findseries(patch) |
|
3621 | i = q.findseries(patch) | |
3623 | guards = q.guard_re.findall(q.fullseries[i]) |
|
3622 | guards = q.guard_re.findall(q.fullseries[i]) | |
3624 | q.fullseries[i] = name + b''.join([b' #' + g for g in guards]) |
|
3623 | q.fullseries[i] = name + b''.join([b' #' + g for g in guards]) | |
3625 | q.parseseries() |
|
3624 | q.parseseries() | |
3626 | q.seriesdirty = True |
|
3625 | q.seriesdirty = True | |
3627 |
|
3626 | |||
3628 | info = q.isapplied(patch) |
|
3627 | info = q.isapplied(patch) | |
3629 | if info: |
|
3628 | if info: | |
3630 | q.applied[info[0]] = statusentry(info[1], name) |
|
3629 | q.applied[info[0]] = statusentry(info[1], name) | |
3631 | q.applieddirty = True |
|
3630 | q.applieddirty = True | |
3632 |
|
3631 | |||
3633 | destdir = os.path.dirname(absdest) |
|
3632 | destdir = os.path.dirname(absdest) | |
3634 | if not os.path.isdir(destdir): |
|
3633 | if not os.path.isdir(destdir): | |
3635 | os.makedirs(destdir) |
|
3634 | os.makedirs(destdir) | |
3636 | util.rename(q.join(patch), absdest) |
|
3635 | util.rename(q.join(patch), absdest) | |
3637 | r = q.qrepo() |
|
3636 | r = q.qrepo() | |
3638 | if r and patch in r.dirstate: |
|
3637 | if r and patch in r.dirstate: | |
3639 | wctx = r[None] |
|
3638 | wctx = r[None] | |
3640 | with r.wlock(): |
|
3639 | with r.wlock(): | |
3641 | if r.dirstate[patch] == b'a': |
|
3640 | if r.dirstate[patch] == b'a': | |
3642 | r.dirstate.drop(patch) |
|
3641 | r.dirstate.drop(patch) | |
3643 | r.dirstate.add(name) |
|
3642 | r.dirstate.add(name) | |
3644 | else: |
|
3643 | else: | |
3645 | wctx.copy(patch, name) |
|
3644 | wctx.copy(patch, name) | |
3646 | wctx.forget([patch]) |
|
3645 | wctx.forget([patch]) | |
3647 |
|
3646 | |||
3648 | q.savedirty() |
|
3647 | q.savedirty() | |
3649 |
|
3648 | |||
3650 |
|
3649 | |||
3651 | @command( |
|
3650 | @command( | |
3652 | b"qrestore", |
|
3651 | b"qrestore", | |
3653 | [ |
|
3652 | [ | |
3654 | (b'd', b'delete', None, _(b'delete save entry')), |
|
3653 | (b'd', b'delete', None, _(b'delete save entry')), | |
3655 | (b'u', b'update', None, _(b'update queue working directory')), |
|
3654 | (b'u', b'update', None, _(b'update queue working directory')), | |
3656 | ], |
|
3655 | ], | |
3657 | _(b'hg qrestore [-d] [-u] REV'), |
|
3656 | _(b'hg qrestore [-d] [-u] REV'), | |
3658 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3657 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3659 | ) |
|
3658 | ) | |
3660 | def restore(ui, repo, rev, **opts): |
|
3659 | def restore(ui, repo, rev, **opts): | |
3661 | """restore the queue state saved by a revision (DEPRECATED) |
|
3660 | """restore the queue state saved by a revision (DEPRECATED) | |
3662 |
|
3661 | |||
3663 | This command is deprecated, use :hg:`rebase` instead.""" |
|
3662 | This command is deprecated, use :hg:`rebase` instead.""" | |
3664 | rev = repo.lookup(rev) |
|
3663 | rev = repo.lookup(rev) | |
3665 | q = repo.mq |
|
3664 | q = repo.mq | |
3666 | q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update')) |
|
3665 | q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update')) | |
3667 | q.savedirty() |
|
3666 | q.savedirty() | |
3668 | return 0 |
|
3667 | return 0 | |
3669 |
|
3668 | |||
3670 |
|
3669 | |||
3671 | @command( |
|
3670 | @command( | |
3672 | b"qsave", |
|
3671 | b"qsave", | |
3673 | [ |
|
3672 | [ | |
3674 | (b'c', b'copy', None, _(b'copy patch directory')), |
|
3673 | (b'c', b'copy', None, _(b'copy patch directory')), | |
3675 | (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')), |
|
3674 | (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')), | |
3676 | (b'e', b'empty', None, _(b'clear queue status file')), |
|
3675 | (b'e', b'empty', None, _(b'clear queue status file')), | |
3677 | (b'f', b'force', None, _(b'force copy')), |
|
3676 | (b'f', b'force', None, _(b'force copy')), | |
3678 | ] |
|
3677 | ] | |
3679 | + cmdutil.commitopts, |
|
3678 | + cmdutil.commitopts, | |
3680 | _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'), |
|
3679 | _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'), | |
3681 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3680 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3682 | ) |
|
3681 | ) | |
3683 | def save(ui, repo, **opts): |
|
3682 | def save(ui, repo, **opts): | |
3684 | """save current queue state (DEPRECATED) |
|
3683 | """save current queue state (DEPRECATED) | |
3685 |
|
3684 | |||
3686 | This command is deprecated, use :hg:`rebase` instead.""" |
|
3685 | This command is deprecated, use :hg:`rebase` instead.""" | |
3687 | q = repo.mq |
|
3686 | q = repo.mq | |
3688 | opts = pycompat.byteskwargs(opts) |
|
3687 | opts = pycompat.byteskwargs(opts) | |
3689 | message = cmdutil.logmessage(ui, opts) |
|
3688 | message = cmdutil.logmessage(ui, opts) | |
3690 | ret = q.save(repo, msg=message) |
|
3689 | ret = q.save(repo, msg=message) | |
3691 | if ret: |
|
3690 | if ret: | |
3692 | return ret |
|
3691 | return ret | |
3693 | q.savedirty() # save to .hg/patches before copying |
|
3692 | q.savedirty() # save to .hg/patches before copying | |
3694 | if opts.get(b'copy'): |
|
3693 | if opts.get(b'copy'): | |
3695 | path = q.path |
|
3694 | path = q.path | |
3696 | if opts.get(b'name'): |
|
3695 | if opts.get(b'name'): | |
3697 | newpath = os.path.join(q.basepath, opts.get(b'name')) |
|
3696 | newpath = os.path.join(q.basepath, opts.get(b'name')) | |
3698 | if os.path.exists(newpath): |
|
3697 | if os.path.exists(newpath): | |
3699 | if not os.path.isdir(newpath): |
|
3698 | if not os.path.isdir(newpath): | |
3700 | raise error.Abort( |
|
3699 | raise error.Abort( | |
3701 | _(b'destination %s exists and is not a directory') |
|
3700 | _(b'destination %s exists and is not a directory') | |
3702 | % newpath |
|
3701 | % newpath | |
3703 | ) |
|
3702 | ) | |
3704 | if not opts.get(b'force'): |
|
3703 | if not opts.get(b'force'): | |
3705 | raise error.Abort( |
|
3704 | raise error.Abort( | |
3706 | _(b'destination %s exists, use -f to force') % newpath |
|
3705 | _(b'destination %s exists, use -f to force') % newpath | |
3707 | ) |
|
3706 | ) | |
3708 | else: |
|
3707 | else: | |
3709 | newpath = savename(path) |
|
3708 | newpath = savename(path) | |
3710 | ui.warn(_(b"copy %s to %s\n") % (path, newpath)) |
|
3709 | ui.warn(_(b"copy %s to %s\n") % (path, newpath)) | |
3711 | util.copyfiles(path, newpath) |
|
3710 | util.copyfiles(path, newpath) | |
3712 | if opts.get(b'empty'): |
|
3711 | if opts.get(b'empty'): | |
3713 | del q.applied[:] |
|
3712 | del q.applied[:] | |
3714 | q.applieddirty = True |
|
3713 | q.applieddirty = True | |
3715 | q.savedirty() |
|
3714 | q.savedirty() | |
3716 | return 0 |
|
3715 | return 0 | |
3717 |
|
3716 | |||
3718 |
|
3717 | |||
3719 | @command( |
|
3718 | @command( | |
3720 | b"qselect", |
|
3719 | b"qselect", | |
3721 | [ |
|
3720 | [ | |
3722 | (b'n', b'none', None, _(b'disable all guards')), |
|
3721 | (b'n', b'none', None, _(b'disable all guards')), | |
3723 | (b's', b'series', None, _(b'list all guards in series file')), |
|
3722 | (b's', b'series', None, _(b'list all guards in series file')), | |
3724 | (b'', b'pop', None, _(b'pop to before first guarded applied patch')), |
|
3723 | (b'', b'pop', None, _(b'pop to before first guarded applied patch')), | |
3725 | (b'', b'reapply', None, _(b'pop, then reapply patches')), |
|
3724 | (b'', b'reapply', None, _(b'pop, then reapply patches')), | |
3726 | ], |
|
3725 | ], | |
3727 | _(b'hg qselect [OPTION]... [GUARD]...'), |
|
3726 | _(b'hg qselect [OPTION]... [GUARD]...'), | |
3728 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3727 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3729 | ) |
|
3728 | ) | |
3730 | def select(ui, repo, *args, **opts): |
|
3729 | def select(ui, repo, *args, **opts): | |
3731 | """set or print guarded patches to push |
|
3730 | """set or print guarded patches to push | |
3732 |
|
3731 | |||
3733 | Use the :hg:`qguard` command to set or print guards on patch, then use |
|
3732 | Use the :hg:`qguard` command to set or print guards on patch, then use | |
3734 | qselect to tell mq which guards to use. A patch will be pushed if |
|
3733 | qselect to tell mq which guards to use. A patch will be pushed if | |
3735 | it has no guards or any positive guards match the currently |
|
3734 | it has no guards or any positive guards match the currently | |
3736 | selected guard, but will not be pushed if any negative guards |
|
3735 | selected guard, but will not be pushed if any negative guards | |
3737 | match the current guard. For example:: |
|
3736 | match the current guard. For example:: | |
3738 |
|
3737 | |||
3739 | qguard foo.patch -- -stable (negative guard) |
|
3738 | qguard foo.patch -- -stable (negative guard) | |
3740 | qguard bar.patch +stable (positive guard) |
|
3739 | qguard bar.patch +stable (positive guard) | |
3741 | qselect stable |
|
3740 | qselect stable | |
3742 |
|
3741 | |||
3743 | This activates the "stable" guard. mq will skip foo.patch (because |
|
3742 | This activates the "stable" guard. mq will skip foo.patch (because | |
3744 | it has a negative match) but push bar.patch (because it has a |
|
3743 | it has a negative match) but push bar.patch (because it has a | |
3745 | positive match). |
|
3744 | positive match). | |
3746 |
|
3745 | |||
3747 | With no arguments, prints the currently active guards. |
|
3746 | With no arguments, prints the currently active guards. | |
3748 | With one argument, sets the active guard. |
|
3747 | With one argument, sets the active guard. | |
3749 |
|
3748 | |||
3750 | Use -n/--none to deactivate guards (no other arguments needed). |
|
3749 | Use -n/--none to deactivate guards (no other arguments needed). | |
3751 | When no guards are active, patches with positive guards are |
|
3750 | When no guards are active, patches with positive guards are | |
3752 | skipped and patches with negative guards are pushed. |
|
3751 | skipped and patches with negative guards are pushed. | |
3753 |
|
3752 | |||
3754 | qselect can change the guards on applied patches. It does not pop |
|
3753 | qselect can change the guards on applied patches. It does not pop | |
3755 | guarded patches by default. Use --pop to pop back to the last |
|
3754 | guarded patches by default. Use --pop to pop back to the last | |
3756 | applied patch that is not guarded. Use --reapply (which implies |
|
3755 | applied patch that is not guarded. Use --reapply (which implies | |
3757 | --pop) to push back to the current patch afterwards, but skip |
|
3756 | --pop) to push back to the current patch afterwards, but skip | |
3758 | guarded patches. |
|
3757 | guarded patches. | |
3759 |
|
3758 | |||
3760 | Use -s/--series to print a list of all guards in the series file |
|
3759 | Use -s/--series to print a list of all guards in the series file | |
3761 | (no other arguments needed). Use -v for more information. |
|
3760 | (no other arguments needed). Use -v for more information. | |
3762 |
|
3761 | |||
3763 | Returns 0 on success.""" |
|
3762 | Returns 0 on success.""" | |
3764 |
|
3763 | |||
3765 | q = repo.mq |
|
3764 | q = repo.mq | |
3766 | opts = pycompat.byteskwargs(opts) |
|
3765 | opts = pycompat.byteskwargs(opts) | |
3767 | guards = q.active() |
|
3766 | guards = q.active() | |
3768 | pushable = lambda i: q.pushable(q.applied[i].name)[0] |
|
3767 | pushable = lambda i: q.pushable(q.applied[i].name)[0] | |
3769 | if args or opts.get(b'none'): |
|
3768 | if args or opts.get(b'none'): | |
3770 | old_unapplied = q.unapplied(repo) |
|
3769 | old_unapplied = q.unapplied(repo) | |
3771 | old_guarded = [ |
|
3770 | old_guarded = [ | |
3772 | i for i in pycompat.xrange(len(q.applied)) if not pushable(i) |
|
3771 | i for i in pycompat.xrange(len(q.applied)) if not pushable(i) | |
3773 | ] |
|
3772 | ] | |
3774 | q.setactive(args) |
|
3773 | q.setactive(args) | |
3775 | q.savedirty() |
|
3774 | q.savedirty() | |
3776 | if not args: |
|
3775 | if not args: | |
3777 | ui.status(_(b'guards deactivated\n')) |
|
3776 | ui.status(_(b'guards deactivated\n')) | |
3778 | if not opts.get(b'pop') and not opts.get(b'reapply'): |
|
3777 | if not opts.get(b'pop') and not opts.get(b'reapply'): | |
3779 | unapplied = q.unapplied(repo) |
|
3778 | unapplied = q.unapplied(repo) | |
3780 | guarded = [ |
|
3779 | guarded = [ | |
3781 | i for i in pycompat.xrange(len(q.applied)) if not pushable(i) |
|
3780 | i for i in pycompat.xrange(len(q.applied)) if not pushable(i) | |
3782 | ] |
|
3781 | ] | |
3783 | if len(unapplied) != len(old_unapplied): |
|
3782 | if len(unapplied) != len(old_unapplied): | |
3784 | ui.status( |
|
3783 | ui.status( | |
3785 | _( |
|
3784 | _( | |
3786 | b'number of unguarded, unapplied patches has ' |
|
3785 | b'number of unguarded, unapplied patches has ' | |
3787 | b'changed from %d to %d\n' |
|
3786 | b'changed from %d to %d\n' | |
3788 | ) |
|
3787 | ) | |
3789 | % (len(old_unapplied), len(unapplied)) |
|
3788 | % (len(old_unapplied), len(unapplied)) | |
3790 | ) |
|
3789 | ) | |
3791 | if len(guarded) != len(old_guarded): |
|
3790 | if len(guarded) != len(old_guarded): | |
3792 | ui.status( |
|
3791 | ui.status( | |
3793 | _( |
|
3792 | _( | |
3794 | b'number of guarded, applied patches has changed ' |
|
3793 | b'number of guarded, applied patches has changed ' | |
3795 | b'from %d to %d\n' |
|
3794 | b'from %d to %d\n' | |
3796 | ) |
|
3795 | ) | |
3797 | % (len(old_guarded), len(guarded)) |
|
3796 | % (len(old_guarded), len(guarded)) | |
3798 | ) |
|
3797 | ) | |
3799 | elif opts.get(b'series'): |
|
3798 | elif opts.get(b'series'): | |
3800 | guards = {} |
|
3799 | guards = {} | |
3801 | noguards = 0 |
|
3800 | noguards = 0 | |
3802 | for gs in q.seriesguards: |
|
3801 | for gs in q.seriesguards: | |
3803 | if not gs: |
|
3802 | if not gs: | |
3804 | noguards += 1 |
|
3803 | noguards += 1 | |
3805 | for g in gs: |
|
3804 | for g in gs: | |
3806 | guards.setdefault(g, 0) |
|
3805 | guards.setdefault(g, 0) | |
3807 | guards[g] += 1 |
|
3806 | guards[g] += 1 | |
3808 | if ui.verbose: |
|
3807 | if ui.verbose: | |
3809 | guards[b'NONE'] = noguards |
|
3808 | guards[b'NONE'] = noguards | |
3810 | guards = list(guards.items()) |
|
3809 | guards = list(guards.items()) | |
3811 | guards.sort(key=lambda x: x[0][1:]) |
|
3810 | guards.sort(key=lambda x: x[0][1:]) | |
3812 | if guards: |
|
3811 | if guards: | |
3813 | ui.note(_(b'guards in series file:\n')) |
|
3812 | ui.note(_(b'guards in series file:\n')) | |
3814 | for guard, count in guards: |
|
3813 | for guard, count in guards: | |
3815 | ui.note(b'%2d ' % count) |
|
3814 | ui.note(b'%2d ' % count) | |
3816 | ui.write(guard, b'\n') |
|
3815 | ui.write(guard, b'\n') | |
3817 | else: |
|
3816 | else: | |
3818 | ui.note(_(b'no guards in series file\n')) |
|
3817 | ui.note(_(b'no guards in series file\n')) | |
3819 | else: |
|
3818 | else: | |
3820 | if guards: |
|
3819 | if guards: | |
3821 | ui.note(_(b'active guards:\n')) |
|
3820 | ui.note(_(b'active guards:\n')) | |
3822 | for g in guards: |
|
3821 | for g in guards: | |
3823 | ui.write(g, b'\n') |
|
3822 | ui.write(g, b'\n') | |
3824 | else: |
|
3823 | else: | |
3825 | ui.write(_(b'no active guards\n')) |
|
3824 | ui.write(_(b'no active guards\n')) | |
3826 | reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name |
|
3825 | reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name | |
3827 | popped = False |
|
3826 | popped = False | |
3828 | if opts.get(b'pop') or opts.get(b'reapply'): |
|
3827 | if opts.get(b'pop') or opts.get(b'reapply'): | |
3829 | for i in pycompat.xrange(len(q.applied)): |
|
3828 | for i in pycompat.xrange(len(q.applied)): | |
3830 | if not pushable(i): |
|
3829 | if not pushable(i): | |
3831 | ui.status(_(b'popping guarded patches\n')) |
|
3830 | ui.status(_(b'popping guarded patches\n')) | |
3832 | popped = True |
|
3831 | popped = True | |
3833 | if i == 0: |
|
3832 | if i == 0: | |
3834 | q.pop(repo, all=True) |
|
3833 | q.pop(repo, all=True) | |
3835 | else: |
|
3834 | else: | |
3836 | q.pop(repo, q.applied[i - 1].name) |
|
3835 | q.pop(repo, q.applied[i - 1].name) | |
3837 | break |
|
3836 | break | |
3838 | if popped: |
|
3837 | if popped: | |
3839 | try: |
|
3838 | try: | |
3840 | if reapply: |
|
3839 | if reapply: | |
3841 | ui.status(_(b'reapplying unguarded patches\n')) |
|
3840 | ui.status(_(b'reapplying unguarded patches\n')) | |
3842 | q.push(repo, reapply) |
|
3841 | q.push(repo, reapply) | |
3843 | finally: |
|
3842 | finally: | |
3844 | q.savedirty() |
|
3843 | q.savedirty() | |
3845 |
|
3844 | |||
3846 |
|
3845 | |||
3847 | @command( |
|
3846 | @command( | |
3848 | b"qfinish", |
|
3847 | b"qfinish", | |
3849 | [(b'a', b'applied', None, _(b'finish all applied changesets'))], |
|
3848 | [(b'a', b'applied', None, _(b'finish all applied changesets'))], | |
3850 | _(b'hg qfinish [-a] [REV]...'), |
|
3849 | _(b'hg qfinish [-a] [REV]...'), | |
3851 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3850 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3852 | ) |
|
3851 | ) | |
3853 | def finish(ui, repo, *revrange, **opts): |
|
3852 | def finish(ui, repo, *revrange, **opts): | |
3854 | """move applied patches into repository history |
|
3853 | """move applied patches into repository history | |
3855 |
|
3854 | |||
3856 | Finishes the specified revisions (corresponding to applied |
|
3855 | Finishes the specified revisions (corresponding to applied | |
3857 | patches) by moving them out of mq control into regular repository |
|
3856 | patches) by moving them out of mq control into regular repository | |
3858 | history. |
|
3857 | history. | |
3859 |
|
3858 | |||
3860 | Accepts a revision range or the -a/--applied option. If --applied |
|
3859 | Accepts a revision range or the -a/--applied option. If --applied | |
3861 | is specified, all applied mq revisions are removed from mq |
|
3860 | is specified, all applied mq revisions are removed from mq | |
3862 | control. Otherwise, the given revisions must be at the base of the |
|
3861 | control. Otherwise, the given revisions must be at the base of the | |
3863 | stack of applied patches. |
|
3862 | stack of applied patches. | |
3864 |
|
3863 | |||
3865 | This can be especially useful if your changes have been applied to |
|
3864 | This can be especially useful if your changes have been applied to | |
3866 | an upstream repository, or if you are about to push your changes |
|
3865 | an upstream repository, or if you are about to push your changes | |
3867 | to upstream. |
|
3866 | to upstream. | |
3868 |
|
3867 | |||
3869 | Returns 0 on success. |
|
3868 | Returns 0 on success. | |
3870 | """ |
|
3869 | """ | |
3871 | if not opts.get('applied') and not revrange: |
|
3870 | if not opts.get('applied') and not revrange: | |
3872 | raise error.Abort(_(b'no revisions specified')) |
|
3871 | raise error.Abort(_(b'no revisions specified')) | |
3873 | elif opts.get('applied'): |
|
3872 | elif opts.get('applied'): | |
3874 | revrange = (b'qbase::qtip',) + revrange |
|
3873 | revrange = (b'qbase::qtip',) + revrange | |
3875 |
|
3874 | |||
3876 | q = repo.mq |
|
3875 | q = repo.mq | |
3877 | if not q.applied: |
|
3876 | if not q.applied: | |
3878 | ui.status(_(b'no patches applied\n')) |
|
3877 | ui.status(_(b'no patches applied\n')) | |
3879 | return 0 |
|
3878 | return 0 | |
3880 |
|
3879 | |||
3881 | revs = scmutil.revrange(repo, revrange) |
|
3880 | revs = scmutil.revrange(repo, revrange) | |
3882 | if repo[b'.'].rev() in revs and repo[None].files(): |
|
3881 | if repo[b'.'].rev() in revs and repo[None].files(): | |
3883 | ui.warn(_(b'warning: uncommitted changes in the working directory\n')) |
|
3882 | ui.warn(_(b'warning: uncommitted changes in the working directory\n')) | |
3884 | # queue.finish may changes phases but leave the responsibility to lock the |
|
3883 | # queue.finish may changes phases but leave the responsibility to lock the | |
3885 | # repo to the caller to avoid deadlock with wlock. This command code is |
|
3884 | # repo to the caller to avoid deadlock with wlock. This command code is | |
3886 | # responsibility for this locking. |
|
3885 | # responsibility for this locking. | |
3887 | with repo.lock(): |
|
3886 | with repo.lock(): | |
3888 | q.finish(repo, revs) |
|
3887 | q.finish(repo, revs) | |
3889 | q.savedirty() |
|
3888 | q.savedirty() | |
3890 | return 0 |
|
3889 | return 0 | |
3891 |
|
3890 | |||
3892 |
|
3891 | |||
3893 | @command( |
|
3892 | @command( | |
3894 | b"qqueue", |
|
3893 | b"qqueue", | |
3895 | [ |
|
3894 | [ | |
3896 | (b'l', b'list', False, _(b'list all available queues')), |
|
3895 | (b'l', b'list', False, _(b'list all available queues')), | |
3897 | (b'', b'active', False, _(b'print name of active queue')), |
|
3896 | (b'', b'active', False, _(b'print name of active queue')), | |
3898 | (b'c', b'create', False, _(b'create new queue')), |
|
3897 | (b'c', b'create', False, _(b'create new queue')), | |
3899 | (b'', b'rename', False, _(b'rename active queue')), |
|
3898 | (b'', b'rename', False, _(b'rename active queue')), | |
3900 | (b'', b'delete', False, _(b'delete reference to queue')), |
|
3899 | (b'', b'delete', False, _(b'delete reference to queue')), | |
3901 | (b'', b'purge', False, _(b'delete queue, and remove patch dir')), |
|
3900 | (b'', b'purge', False, _(b'delete queue, and remove patch dir')), | |
3902 | ], |
|
3901 | ], | |
3903 | _(b'[OPTION] [QUEUE]'), |
|
3902 | _(b'[OPTION] [QUEUE]'), | |
3904 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3903 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | |
3905 | ) |
|
3904 | ) | |
3906 | def qqueue(ui, repo, name=None, **opts): |
|
3905 | def qqueue(ui, repo, name=None, **opts): | |
3907 | """manage multiple patch queues |
|
3906 | """manage multiple patch queues | |
3908 |
|
3907 | |||
3909 | Supports switching between different patch queues, as well as creating |
|
3908 | Supports switching between different patch queues, as well as creating | |
3910 | new patch queues and deleting existing ones. |
|
3909 | new patch queues and deleting existing ones. | |
3911 |
|
3910 | |||
3912 | Omitting a queue name or specifying -l/--list will show you the registered |
|
3911 | Omitting a queue name or specifying -l/--list will show you the registered | |
3913 | queues - by default the "normal" patches queue is registered. The currently |
|
3912 | queues - by default the "normal" patches queue is registered. The currently | |
3914 | active queue will be marked with "(active)". Specifying --active will print |
|
3913 | active queue will be marked with "(active)". Specifying --active will print | |
3915 | only the name of the active queue. |
|
3914 | only the name of the active queue. | |
3916 |
|
3915 | |||
3917 | To create a new queue, use -c/--create. The queue is automatically made |
|
3916 | To create a new queue, use -c/--create. The queue is automatically made | |
3918 | active, except in the case where there are applied patches from the |
|
3917 | active, except in the case where there are applied patches from the | |
3919 | currently active queue in the repository. Then the queue will only be |
|
3918 | currently active queue in the repository. Then the queue will only be | |
3920 | created and switching will fail. |
|
3919 | created and switching will fail. | |
3921 |
|
3920 | |||
3922 | To delete an existing queue, use --delete. You cannot delete the currently |
|
3921 | To delete an existing queue, use --delete. You cannot delete the currently | |
3923 | active queue. |
|
3922 | active queue. | |
3924 |
|
3923 | |||
3925 | Returns 0 on success. |
|
3924 | Returns 0 on success. | |
3926 | """ |
|
3925 | """ | |
3927 | q = repo.mq |
|
3926 | q = repo.mq | |
3928 | _defaultqueue = b'patches' |
|
3927 | _defaultqueue = b'patches' | |
3929 | _allqueues = b'patches.queues' |
|
3928 | _allqueues = b'patches.queues' | |
3930 | _activequeue = b'patches.queue' |
|
3929 | _activequeue = b'patches.queue' | |
3931 |
|
3930 | |||
3932 | def _getcurrent(): |
|
3931 | def _getcurrent(): | |
3933 | cur = os.path.basename(q.path) |
|
3932 | cur = os.path.basename(q.path) | |
3934 | if cur.startswith(b'patches-'): |
|
3933 | if cur.startswith(b'patches-'): | |
3935 | cur = cur[8:] |
|
3934 | cur = cur[8:] | |
3936 | return cur |
|
3935 | return cur | |
3937 |
|
3936 | |||
3938 | def _noqueues(): |
|
3937 | def _noqueues(): | |
3939 | try: |
|
3938 | try: | |
3940 | fh = repo.vfs(_allqueues, b'r') |
|
3939 | fh = repo.vfs(_allqueues, b'r') | |
3941 | fh.close() |
|
3940 | fh.close() | |
3942 | except IOError: |
|
3941 | except IOError: | |
3943 | return True |
|
3942 | return True | |
3944 |
|
3943 | |||
3945 | return False |
|
3944 | return False | |
3946 |
|
3945 | |||
3947 | def _getqueues(): |
|
3946 | def _getqueues(): | |
3948 | current = _getcurrent() |
|
3947 | current = _getcurrent() | |
3949 |
|
3948 | |||
3950 | try: |
|
3949 | try: | |
3951 | fh = repo.vfs(_allqueues, b'r') |
|
3950 | fh = repo.vfs(_allqueues, b'r') | |
3952 | queues = [queue.strip() for queue in fh if queue.strip()] |
|
3951 | queues = [queue.strip() for queue in fh if queue.strip()] | |
3953 | fh.close() |
|
3952 | fh.close() | |
3954 | if current not in queues: |
|
3953 | if current not in queues: | |
3955 | queues.append(current) |
|
3954 | queues.append(current) | |
3956 | except IOError: |
|
3955 | except IOError: | |
3957 | queues = [_defaultqueue] |
|
3956 | queues = [_defaultqueue] | |
3958 |
|
3957 | |||
3959 | return sorted(queues) |
|
3958 | return sorted(queues) | |
3960 |
|
3959 | |||
3961 | def _setactive(name): |
|
3960 | def _setactive(name): | |
3962 | if q.applied: |
|
3961 | if q.applied: | |
3963 | raise error.Abort( |
|
3962 | raise error.Abort( | |
3964 | _( |
|
3963 | _( | |
3965 | b'new queue created, but cannot make active ' |
|
3964 | b'new queue created, but cannot make active ' | |
3966 | b'as patches are applied' |
|
3965 | b'as patches are applied' | |
3967 | ) |
|
3966 | ) | |
3968 | ) |
|
3967 | ) | |
3969 | _setactivenocheck(name) |
|
3968 | _setactivenocheck(name) | |
3970 |
|
3969 | |||
3971 | def _setactivenocheck(name): |
|
3970 | def _setactivenocheck(name): | |
3972 | fh = repo.vfs(_activequeue, b'w') |
|
3971 | fh = repo.vfs(_activequeue, b'w') | |
3973 | if name != b'patches': |
|
3972 | if name != b'patches': | |
3974 | fh.write(name) |
|
3973 | fh.write(name) | |
3975 | fh.close() |
|
3974 | fh.close() | |
3976 |
|
3975 | |||
3977 | def _addqueue(name): |
|
3976 | def _addqueue(name): | |
3978 | fh = repo.vfs(_allqueues, b'a') |
|
3977 | fh = repo.vfs(_allqueues, b'a') | |
3979 | fh.write(b'%s\n' % (name,)) |
|
3978 | fh.write(b'%s\n' % (name,)) | |
3980 | fh.close() |
|
3979 | fh.close() | |
3981 |
|
3980 | |||
3982 | def _queuedir(name): |
|
3981 | def _queuedir(name): | |
3983 | if name == b'patches': |
|
3982 | if name == b'patches': | |
3984 | return repo.vfs.join(b'patches') |
|
3983 | return repo.vfs.join(b'patches') | |
3985 | else: |
|
3984 | else: | |
3986 | return repo.vfs.join(b'patches-' + name) |
|
3985 | return repo.vfs.join(b'patches-' + name) | |
3987 |
|
3986 | |||
3988 | def _validname(name): |
|
3987 | def _validname(name): | |
3989 | for n in name: |
|
3988 | for n in name: | |
3990 | if n in b':\\/.': |
|
3989 | if n in b':\\/.': | |
3991 | return False |
|
3990 | return False | |
3992 | return True |
|
3991 | return True | |
3993 |
|
3992 | |||
3994 | def _delete(name): |
|
3993 | def _delete(name): | |
3995 | if name not in existing: |
|
3994 | if name not in existing: | |
3996 | raise error.Abort(_(b'cannot delete queue that does not exist')) |
|
3995 | raise error.Abort(_(b'cannot delete queue that does not exist')) | |
3997 |
|
3996 | |||
3998 | current = _getcurrent() |
|
3997 | current = _getcurrent() | |
3999 |
|
3998 | |||
4000 | if name == current: |
|
3999 | if name == current: | |
4001 | raise error.Abort(_(b'cannot delete currently active queue')) |
|
4000 | raise error.Abort(_(b'cannot delete currently active queue')) | |
4002 |
|
4001 | |||
4003 | fh = repo.vfs(b'patches.queues.new', b'w') |
|
4002 | fh = repo.vfs(b'patches.queues.new', b'w') | |
4004 | for queue in existing: |
|
4003 | for queue in existing: | |
4005 | if queue == name: |
|
4004 | if queue == name: | |
4006 | continue |
|
4005 | continue | |
4007 | fh.write(b'%s\n' % (queue,)) |
|
4006 | fh.write(b'%s\n' % (queue,)) | |
4008 | fh.close() |
|
4007 | fh.close() | |
4009 | repo.vfs.rename(b'patches.queues.new', _allqueues) |
|
4008 | repo.vfs.rename(b'patches.queues.new', _allqueues) | |
4010 |
|
4009 | |||
4011 | opts = pycompat.byteskwargs(opts) |
|
4010 | opts = pycompat.byteskwargs(opts) | |
4012 | if not name or opts.get(b'list') or opts.get(b'active'): |
|
4011 | if not name or opts.get(b'list') or opts.get(b'active'): | |
4013 | current = _getcurrent() |
|
4012 | current = _getcurrent() | |
4014 | if opts.get(b'active'): |
|
4013 | if opts.get(b'active'): | |
4015 | ui.write(b'%s\n' % (current,)) |
|
4014 | ui.write(b'%s\n' % (current,)) | |
4016 | return |
|
4015 | return | |
4017 | for queue in _getqueues(): |
|
4016 | for queue in _getqueues(): | |
4018 | ui.write(b'%s' % (queue,)) |
|
4017 | ui.write(b'%s' % (queue,)) | |
4019 | if queue == current and not ui.quiet: |
|
4018 | if queue == current and not ui.quiet: | |
4020 | ui.write(_(b' (active)\n')) |
|
4019 | ui.write(_(b' (active)\n')) | |
4021 | else: |
|
4020 | else: | |
4022 | ui.write(b'\n') |
|
4021 | ui.write(b'\n') | |
4023 | return |
|
4022 | return | |
4024 |
|
4023 | |||
4025 | if not _validname(name): |
|
4024 | if not _validname(name): | |
4026 | raise error.Abort( |
|
4025 | raise error.Abort( | |
4027 | _(b'invalid queue name, may not contain the characters ":\\/."') |
|
4026 | _(b'invalid queue name, may not contain the characters ":\\/."') | |
4028 | ) |
|
4027 | ) | |
4029 |
|
4028 | |||
4030 | with repo.wlock(): |
|
4029 | with repo.wlock(): | |
4031 | existing = _getqueues() |
|
4030 | existing = _getqueues() | |
4032 |
|
4031 | |||
4033 | if opts.get(b'create'): |
|
4032 | if opts.get(b'create'): | |
4034 | if name in existing: |
|
4033 | if name in existing: | |
4035 | raise error.Abort(_(b'queue "%s" already exists') % name) |
|
4034 | raise error.Abort(_(b'queue "%s" already exists') % name) | |
4036 | if _noqueues(): |
|
4035 | if _noqueues(): | |
4037 | _addqueue(_defaultqueue) |
|
4036 | _addqueue(_defaultqueue) | |
4038 | _addqueue(name) |
|
4037 | _addqueue(name) | |
4039 | _setactive(name) |
|
4038 | _setactive(name) | |
4040 | elif opts.get(b'rename'): |
|
4039 | elif opts.get(b'rename'): | |
4041 | current = _getcurrent() |
|
4040 | current = _getcurrent() | |
4042 | if name == current: |
|
4041 | if name == current: | |
4043 | raise error.Abort( |
|
4042 | raise error.Abort( | |
4044 | _(b'can\'t rename "%s" to its current name') % name |
|
4043 | _(b'can\'t rename "%s" to its current name') % name | |
4045 | ) |
|
4044 | ) | |
4046 | if name in existing: |
|
4045 | if name in existing: | |
4047 | raise error.Abort(_(b'queue "%s" already exists') % name) |
|
4046 | raise error.Abort(_(b'queue "%s" already exists') % name) | |
4048 |
|
4047 | |||
4049 | olddir = _queuedir(current) |
|
4048 | olddir = _queuedir(current) | |
4050 | newdir = _queuedir(name) |
|
4049 | newdir = _queuedir(name) | |
4051 |
|
4050 | |||
4052 | if os.path.exists(newdir): |
|
4051 | if os.path.exists(newdir): | |
4053 | raise error.Abort( |
|
4052 | raise error.Abort( | |
4054 | _(b'non-queue directory "%s" already exists') % newdir |
|
4053 | _(b'non-queue directory "%s" already exists') % newdir | |
4055 | ) |
|
4054 | ) | |
4056 |
|
4055 | |||
4057 | fh = repo.vfs(b'patches.queues.new', b'w') |
|
4056 | fh = repo.vfs(b'patches.queues.new', b'w') | |
4058 | for queue in existing: |
|
4057 | for queue in existing: | |
4059 | if queue == current: |
|
4058 | if queue == current: | |
4060 | fh.write(b'%s\n' % (name,)) |
|
4059 | fh.write(b'%s\n' % (name,)) | |
4061 | if os.path.exists(olddir): |
|
4060 | if os.path.exists(olddir): | |
4062 | util.rename(olddir, newdir) |
|
4061 | util.rename(olddir, newdir) | |
4063 | else: |
|
4062 | else: | |
4064 | fh.write(b'%s\n' % (queue,)) |
|
4063 | fh.write(b'%s\n' % (queue,)) | |
4065 | fh.close() |
|
4064 | fh.close() | |
4066 | repo.vfs.rename(b'patches.queues.new', _allqueues) |
|
4065 | repo.vfs.rename(b'patches.queues.new', _allqueues) | |
4067 | _setactivenocheck(name) |
|
4066 | _setactivenocheck(name) | |
4068 | elif opts.get(b'delete'): |
|
4067 | elif opts.get(b'delete'): | |
4069 | _delete(name) |
|
4068 | _delete(name) | |
4070 | elif opts.get(b'purge'): |
|
4069 | elif opts.get(b'purge'): | |
4071 | if name in existing: |
|
4070 | if name in existing: | |
4072 | _delete(name) |
|
4071 | _delete(name) | |
4073 | qdir = _queuedir(name) |
|
4072 | qdir = _queuedir(name) | |
4074 | if os.path.exists(qdir): |
|
4073 | if os.path.exists(qdir): | |
4075 | shutil.rmtree(qdir) |
|
4074 | shutil.rmtree(qdir) | |
4076 | else: |
|
4075 | else: | |
4077 | if name not in existing: |
|
4076 | if name not in existing: | |
4078 | raise error.Abort(_(b'use --create to create a new queue')) |
|
4077 | raise error.Abort(_(b'use --create to create a new queue')) | |
4079 | _setactive(name) |
|
4078 | _setactive(name) | |
4080 |
|
4079 | |||
4081 |
|
4080 | |||
4082 | def mqphasedefaults(repo, roots): |
|
4081 | def mqphasedefaults(repo, roots): | |
4083 | """callback used to set mq changeset as secret when no phase data exists""" |
|
4082 | """callback used to set mq changeset as secret when no phase data exists""" | |
4084 | if repo.mq.applied: |
|
4083 | if repo.mq.applied: | |
4085 | if repo.ui.configbool(b'mq', b'secret'): |
|
4084 | if repo.ui.configbool(b'mq', b'secret'): | |
4086 | mqphase = phases.secret |
|
4085 | mqphase = phases.secret | |
4087 | else: |
|
4086 | else: | |
4088 | mqphase = phases.draft |
|
4087 | mqphase = phases.draft | |
4089 | qbase = repo[repo.mq.applied[0].node] |
|
4088 | qbase = repo[repo.mq.applied[0].node] | |
4090 | roots[mqphase].add(qbase.node()) |
|
4089 | roots[mqphase].add(qbase.node()) | |
4091 | return roots |
|
4090 | return roots | |
4092 |
|
4091 | |||
4093 |
|
4092 | |||
4094 | def reposetup(ui, repo): |
|
4093 | def reposetup(ui, repo): | |
4095 | class mqrepo(repo.__class__): |
|
4094 | class mqrepo(repo.__class__): | |
4096 | @localrepo.unfilteredpropertycache |
|
4095 | @localrepo.unfilteredpropertycache | |
4097 | def mq(self): |
|
4096 | def mq(self): | |
4098 | return queue(self.ui, self.baseui, self.path) |
|
4097 | return queue(self.ui, self.baseui, self.path) | |
4099 |
|
4098 | |||
4100 | def invalidateall(self): |
|
4099 | def invalidateall(self): | |
4101 | super(mqrepo, self).invalidateall() |
|
4100 | super(mqrepo, self).invalidateall() | |
4102 | if localrepo.hasunfilteredcache(self, 'mq'): |
|
4101 | if localrepo.hasunfilteredcache(self, 'mq'): | |
4103 | # recreate mq in case queue path was changed |
|
4102 | # recreate mq in case queue path was changed | |
4104 | delattr(self.unfiltered(), 'mq') |
|
4103 | delattr(self.unfiltered(), 'mq') | |
4105 |
|
4104 | |||
4106 | def abortifwdirpatched(self, errmsg, force=False): |
|
4105 | def abortifwdirpatched(self, errmsg, force=False): | |
4107 | if self.mq.applied and self.mq.checkapplied and not force: |
|
4106 | if self.mq.applied and self.mq.checkapplied and not force: | |
4108 | parents = self.dirstate.parents() |
|
4107 | parents = self.dirstate.parents() | |
4109 | patches = [s.node for s in self.mq.applied] |
|
4108 | patches = [s.node for s in self.mq.applied] | |
4110 | if any(p in patches for p in parents): |
|
4109 | if any(p in patches for p in parents): | |
4111 | raise error.Abort(errmsg) |
|
4110 | raise error.Abort(errmsg) | |
4112 |
|
4111 | |||
4113 | def commit( |
|
4112 | def commit( | |
4114 | self, |
|
4113 | self, | |
4115 | text=b"", |
|
4114 | text=b"", | |
4116 | user=None, |
|
4115 | user=None, | |
4117 | date=None, |
|
4116 | date=None, | |
4118 | match=None, |
|
4117 | match=None, | |
4119 | force=False, |
|
4118 | force=False, | |
4120 | editor=False, |
|
4119 | editor=False, | |
4121 | extra=None, |
|
4120 | extra=None, | |
4122 | ): |
|
4121 | ): | |
4123 | if extra is None: |
|
4122 | if extra is None: | |
4124 | extra = {} |
|
4123 | extra = {} | |
4125 | self.abortifwdirpatched( |
|
4124 | self.abortifwdirpatched( | |
4126 | _(b'cannot commit over an applied mq patch'), force |
|
4125 | _(b'cannot commit over an applied mq patch'), force | |
4127 | ) |
|
4126 | ) | |
4128 |
|
4127 | |||
4129 | return super(mqrepo, self).commit( |
|
4128 | return super(mqrepo, self).commit( | |
4130 | text, user, date, match, force, editor, extra |
|
4129 | text, user, date, match, force, editor, extra | |
4131 | ) |
|
4130 | ) | |
4132 |
|
4131 | |||
4133 | def checkpush(self, pushop): |
|
4132 | def checkpush(self, pushop): | |
4134 | if self.mq.applied and self.mq.checkapplied and not pushop.force: |
|
4133 | if self.mq.applied and self.mq.checkapplied and not pushop.force: | |
4135 | outapplied = [e.node for e in self.mq.applied] |
|
4134 | outapplied = [e.node for e in self.mq.applied] | |
4136 | if pushop.revs: |
|
4135 | if pushop.revs: | |
4137 | # Assume applied patches have no non-patch descendants and |
|
4136 | # Assume applied patches have no non-patch descendants and | |
4138 | # are not on remote already. Filtering any changeset not |
|
4137 | # are not on remote already. Filtering any changeset not | |
4139 | # pushed. |
|
4138 | # pushed. | |
4140 | heads = set(pushop.revs) |
|
4139 | heads = set(pushop.revs) | |
4141 | for node in reversed(outapplied): |
|
4140 | for node in reversed(outapplied): | |
4142 | if node in heads: |
|
4141 | if node in heads: | |
4143 | break |
|
4142 | break | |
4144 | else: |
|
4143 | else: | |
4145 | outapplied.pop() |
|
4144 | outapplied.pop() | |
4146 | # looking for pushed and shared changeset |
|
4145 | # looking for pushed and shared changeset | |
4147 | for node in outapplied: |
|
4146 | for node in outapplied: | |
4148 | if self[node].phase() < phases.secret: |
|
4147 | if self[node].phase() < phases.secret: | |
4149 | raise error.Abort(_(b'source has mq patches applied')) |
|
4148 | raise error.Abort(_(b'source has mq patches applied')) | |
4150 | # no non-secret patches pushed |
|
4149 | # no non-secret patches pushed | |
4151 | super(mqrepo, self).checkpush(pushop) |
|
4150 | super(mqrepo, self).checkpush(pushop) | |
4152 |
|
4151 | |||
4153 | def _findtags(self): |
|
4152 | def _findtags(self): | |
4154 | '''augment tags from base class with patch tags''' |
|
4153 | '''augment tags from base class with patch tags''' | |
4155 | result = super(mqrepo, self)._findtags() |
|
4154 | result = super(mqrepo, self)._findtags() | |
4156 |
|
4155 | |||
4157 | q = self.mq |
|
4156 | q = self.mq | |
4158 | if not q.applied: |
|
4157 | if not q.applied: | |
4159 | return result |
|
4158 | return result | |
4160 |
|
4159 | |||
4161 | mqtags = [(patch.node, patch.name) for patch in q.applied] |
|
4160 | mqtags = [(patch.node, patch.name) for patch in q.applied] | |
4162 |
|
4161 | |||
4163 | try: |
|
4162 | try: | |
4164 | # for now ignore filtering business |
|
4163 | # for now ignore filtering business | |
4165 | self.unfiltered().changelog.rev(mqtags[-1][0]) |
|
4164 | self.unfiltered().changelog.rev(mqtags[-1][0]) | |
4166 | except error.LookupError: |
|
4165 | except error.LookupError: | |
4167 | self.ui.warn( |
|
4166 | self.ui.warn( | |
4168 | _(b'mq status file refers to unknown node %s\n') |
|
4167 | _(b'mq status file refers to unknown node %s\n') | |
4169 | % short(mqtags[-1][0]) |
|
4168 | % short(mqtags[-1][0]) | |
4170 | ) |
|
4169 | ) | |
4171 | return result |
|
4170 | return result | |
4172 |
|
4171 | |||
4173 | # do not add fake tags for filtered revisions |
|
4172 | # do not add fake tags for filtered revisions | |
4174 | included = self.changelog.hasnode |
|
4173 | included = self.changelog.hasnode | |
4175 | mqtags = [mqt for mqt in mqtags if included(mqt[0])] |
|
4174 | mqtags = [mqt for mqt in mqtags if included(mqt[0])] | |
4176 | if not mqtags: |
|
4175 | if not mqtags: | |
4177 | return result |
|
4176 | return result | |
4178 |
|
4177 | |||
4179 | mqtags.append((mqtags[-1][0], b'qtip')) |
|
4178 | mqtags.append((mqtags[-1][0], b'qtip')) | |
4180 | mqtags.append((mqtags[0][0], b'qbase')) |
|
4179 | mqtags.append((mqtags[0][0], b'qbase')) | |
4181 | mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent')) |
|
4180 | mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent')) | |
4182 | tags = result[0] |
|
4181 | tags = result[0] | |
4183 | for patch in mqtags: |
|
4182 | for patch in mqtags: | |
4184 | if patch[1] in tags: |
|
4183 | if patch[1] in tags: | |
4185 | self.ui.warn( |
|
4184 | self.ui.warn( | |
4186 | _(b'tag %s overrides mq patch of the same name\n') |
|
4185 | _(b'tag %s overrides mq patch of the same name\n') | |
4187 | % patch[1] |
|
4186 | % patch[1] | |
4188 | ) |
|
4187 | ) | |
4189 | else: |
|
4188 | else: | |
4190 | tags[patch[1]] = patch[0] |
|
4189 | tags[patch[1]] = patch[0] | |
4191 |
|
4190 | |||
4192 | return result |
|
4191 | return result | |
4193 |
|
4192 | |||
4194 | if repo.local(): |
|
4193 | if repo.local(): | |
4195 | repo.__class__ = mqrepo |
|
4194 | repo.__class__ = mqrepo | |
4196 |
|
4195 | |||
4197 | repo._phasedefaults.append(mqphasedefaults) |
|
4196 | repo._phasedefaults.append(mqphasedefaults) | |
4198 |
|
4197 | |||
4199 |
|
4198 | |||
4200 | def mqimport(orig, ui, repo, *args, **kwargs): |
|
4199 | def mqimport(orig, ui, repo, *args, **kwargs): | |
4201 | if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get( |
|
4200 | if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get( | |
4202 | 'no_commit', False |
|
4201 | 'no_commit', False | |
4203 | ): |
|
4202 | ): | |
4204 | repo.abortifwdirpatched( |
|
4203 | repo.abortifwdirpatched( | |
4205 | _(b'cannot import over an applied patch'), kwargs.get('force') |
|
4204 | _(b'cannot import over an applied patch'), kwargs.get('force') | |
4206 | ) |
|
4205 | ) | |
4207 | return orig(ui, repo, *args, **kwargs) |
|
4206 | return orig(ui, repo, *args, **kwargs) | |
4208 |
|
4207 | |||
4209 |
|
4208 | |||
4210 | def mqinit(orig, ui, *args, **kwargs): |
|
4209 | def mqinit(orig, ui, *args, **kwargs): | |
4211 | mq = kwargs.pop('mq', None) |
|
4210 | mq = kwargs.pop('mq', None) | |
4212 |
|
4211 | |||
4213 | if not mq: |
|
4212 | if not mq: | |
4214 | return orig(ui, *args, **kwargs) |
|
4213 | return orig(ui, *args, **kwargs) | |
4215 |
|
4214 | |||
4216 | if args: |
|
4215 | if args: | |
4217 | repopath = args[0] |
|
4216 | repopath = args[0] | |
4218 | if not hg.islocal(repopath): |
|
4217 | if not hg.islocal(repopath): | |
4219 | raise error.Abort( |
|
4218 | raise error.Abort( | |
4220 | _(b'only a local queue repository may be initialized') |
|
4219 | _(b'only a local queue repository may be initialized') | |
4221 | ) |
|
4220 | ) | |
4222 | else: |
|
4221 | else: | |
4223 | repopath = cmdutil.findrepo(encoding.getcwd()) |
|
4222 | repopath = cmdutil.findrepo(encoding.getcwd()) | |
4224 | if not repopath: |
|
4223 | if not repopath: | |
4225 | raise error.Abort( |
|
4224 | raise error.Abort( | |
4226 | _(b'there is no Mercurial repository here (.hg not found)') |
|
4225 | _(b'there is no Mercurial repository here (.hg not found)') | |
4227 | ) |
|
4226 | ) | |
4228 | repo = hg.repository(ui, repopath) |
|
4227 | repo = hg.repository(ui, repopath) | |
4229 | return qinit(ui, repo, True) |
|
4228 | return qinit(ui, repo, True) | |
4230 |
|
4229 | |||
4231 |
|
4230 | |||
4232 | def mqcommand(orig, ui, repo, *args, **kwargs): |
|
4231 | def mqcommand(orig, ui, repo, *args, **kwargs): | |
4233 | """Add --mq option to operate on patch repository instead of main""" |
|
4232 | """Add --mq option to operate on patch repository instead of main""" | |
4234 |
|
4233 | |||
4235 | # some commands do not like getting unknown options |
|
4234 | # some commands do not like getting unknown options | |
4236 | mq = kwargs.pop('mq', None) |
|
4235 | mq = kwargs.pop('mq', None) | |
4237 |
|
4236 | |||
4238 | if not mq: |
|
4237 | if not mq: | |
4239 | return orig(ui, repo, *args, **kwargs) |
|
4238 | return orig(ui, repo, *args, **kwargs) | |
4240 |
|
4239 | |||
4241 | q = repo.mq |
|
4240 | q = repo.mq | |
4242 | r = q.qrepo() |
|
4241 | r = q.qrepo() | |
4243 | if not r: |
|
4242 | if not r: | |
4244 | raise error.Abort(_(b'no queue repository')) |
|
4243 | raise error.Abort(_(b'no queue repository')) | |
4245 | return orig(r.ui, r, *args, **kwargs) |
|
4244 | return orig(r.ui, r, *args, **kwargs) | |
4246 |
|
4245 | |||
4247 |
|
4246 | |||
4248 | def summaryhook(ui, repo): |
|
4247 | def summaryhook(ui, repo): | |
4249 | q = repo.mq |
|
4248 | q = repo.mq | |
4250 | m = [] |
|
4249 | m = [] | |
4251 | a, u = len(q.applied), len(q.unapplied(repo)) |
|
4250 | a, u = len(q.applied), len(q.unapplied(repo)) | |
4252 | if a: |
|
4251 | if a: | |
4253 | m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a) |
|
4252 | m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a) | |
4254 | if u: |
|
4253 | if u: | |
4255 | m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u) |
|
4254 | m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u) | |
4256 | if m: |
|
4255 | if m: | |
4257 | # i18n: column positioning for "hg summary" |
|
4256 | # i18n: column positioning for "hg summary" | |
4258 | ui.write(_(b"mq: %s\n") % b', '.join(m)) |
|
4257 | ui.write(_(b"mq: %s\n") % b', '.join(m)) | |
4259 | else: |
|
4258 | else: | |
4260 | # i18n: column positioning for "hg summary" |
|
4259 | # i18n: column positioning for "hg summary" | |
4261 | ui.note(_(b"mq: (empty queue)\n")) |
|
4260 | ui.note(_(b"mq: (empty queue)\n")) | |
4262 |
|
4261 | |||
4263 |
|
4262 | |||
4264 | revsetpredicate = registrar.revsetpredicate() |
|
4263 | revsetpredicate = registrar.revsetpredicate() | |
4265 |
|
4264 | |||
4266 |
|
4265 | |||
4267 | @revsetpredicate(b'mq()') |
|
4266 | @revsetpredicate(b'mq()') | |
4268 | def revsetmq(repo, subset, x): |
|
4267 | def revsetmq(repo, subset, x): | |
4269 | """Changesets managed by MQ.""" |
|
4268 | """Changesets managed by MQ.""" | |
4270 | revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments")) |
|
4269 | revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments")) | |
4271 | applied = {repo[r.node].rev() for r in repo.mq.applied} |
|
4270 | applied = {repo[r.node].rev() for r in repo.mq.applied} | |
4272 | return smartset.baseset([r for r in subset if r in applied]) |
|
4271 | return smartset.baseset([r for r in subset if r in applied]) | |
4273 |
|
4272 | |||
4274 |
|
4273 | |||
4275 | # tell hggettext to extract docstrings from these functions: |
|
4274 | # tell hggettext to extract docstrings from these functions: | |
4276 | i18nfunctions = [revsetmq] |
|
4275 | i18nfunctions = [revsetmq] | |
4277 |
|
4276 | |||
4278 |
|
4277 | |||
4279 | def extsetup(ui): |
|
4278 | def extsetup(ui): | |
4280 | # Ensure mq wrappers are called first, regardless of extension load order by |
|
4279 | # Ensure mq wrappers are called first, regardless of extension load order by | |
4281 | # NOT wrapping in uisetup() and instead deferring to init stage two here. |
|
4280 | # NOT wrapping in uisetup() and instead deferring to init stage two here. | |
4282 | mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))] |
|
4281 | mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))] | |
4283 |
|
4282 | |||
4284 | extensions.wrapcommand(commands.table, b'import', mqimport) |
|
4283 | extensions.wrapcommand(commands.table, b'import', mqimport) | |
4285 | cmdutil.summaryhooks.add(b'mq', summaryhook) |
|
4284 | cmdutil.summaryhooks.add(b'mq', summaryhook) | |
4286 |
|
4285 | |||
4287 | entry = extensions.wrapcommand(commands.table, b'init', mqinit) |
|
4286 | entry = extensions.wrapcommand(commands.table, b'init', mqinit) | |
4288 | entry[1].extend(mqopt) |
|
4287 | entry[1].extend(mqopt) | |
4289 |
|
4288 | |||
4290 | def dotable(cmdtable): |
|
4289 | def dotable(cmdtable): | |
4291 | for cmd, entry in pycompat.iteritems(cmdtable): |
|
4290 | for cmd, entry in pycompat.iteritems(cmdtable): | |
4292 | cmd = cmdutil.parsealiases(cmd)[0] |
|
4291 | cmd = cmdutil.parsealiases(cmd)[0] | |
4293 | func = entry[0] |
|
4292 | func = entry[0] | |
4294 | if func.norepo: |
|
4293 | if func.norepo: | |
4295 | continue |
|
4294 | continue | |
4296 | entry = extensions.wrapcommand(cmdtable, cmd, mqcommand) |
|
4295 | entry = extensions.wrapcommand(cmdtable, cmd, mqcommand) | |
4297 | entry[1].extend(mqopt) |
|
4296 | entry[1].extend(mqopt) | |
4298 |
|
4297 | |||
4299 | dotable(commands.table) |
|
4298 | dotable(commands.table) | |
4300 |
|
4299 | |||
4301 | thismodule = sys.modules["hgext.mq"] |
|
4300 | thismodule = sys.modules["hgext.mq"] | |
4302 | for extname, extmodule in extensions.extensions(): |
|
4301 | for extname, extmodule in extensions.extensions(): | |
4303 | if extmodule != thismodule: |
|
4302 | if extmodule != thismodule: | |
4304 | dotable(getattr(extmodule, 'cmdtable', {})) |
|
4303 | dotable(getattr(extmodule, 'cmdtable', {})) | |
4305 |
|
4304 | |||
4306 |
|
4305 | |||
4307 | colortable = { |
|
4306 | colortable = { | |
4308 | b'qguard.negative': b'red', |
|
4307 | b'qguard.negative': b'red', | |
4309 | b'qguard.positive': b'yellow', |
|
4308 | b'qguard.positive': b'yellow', | |
4310 | b'qguard.unguarded': b'green', |
|
4309 | b'qguard.unguarded': b'green', | |
4311 | b'qseries.applied': b'blue bold underline', |
|
4310 | b'qseries.applied': b'blue bold underline', | |
4312 | b'qseries.guarded': b'black bold', |
|
4311 | b'qseries.guarded': b'black bold', | |
4313 | b'qseries.missing': b'red bold', |
|
4312 | b'qseries.missing': b'red bold', | |
4314 | b'qseries.unapplied': b'black bold', |
|
4313 | b'qseries.unapplied': b'black bold', | |
4315 | } |
|
4314 | } |
@@ -1,356 +1,355 b'' | |||||
1 | # narrowbundle2.py - bundle2 extensions for narrow repository support |
|
1 | # narrowbundle2.py - bundle2 extensions for narrow repository support | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import struct |
|
11 | import struct | |
12 |
|
12 | |||
13 | from mercurial.i18n import _ |
|
13 | from mercurial.i18n import _ | |
14 | from mercurial.node import nullid |
|
|||
15 | from mercurial import ( |
|
14 | from mercurial import ( | |
16 | bundle2, |
|
15 | bundle2, | |
17 | changegroup, |
|
16 | changegroup, | |
18 | error, |
|
17 | error, | |
19 | exchange, |
|
18 | exchange, | |
20 | localrepo, |
|
19 | localrepo, | |
21 | narrowspec, |
|
20 | narrowspec, | |
22 | repair, |
|
21 | repair, | |
23 | requirements, |
|
22 | requirements, | |
24 | scmutil, |
|
23 | scmutil, | |
25 | util, |
|
24 | util, | |
26 | wireprototypes, |
|
25 | wireprototypes, | |
27 | ) |
|
26 | ) | |
28 | from mercurial.utils import stringutil |
|
27 | from mercurial.utils import stringutil | |
29 |
|
28 | |||
30 | _NARROWACL_SECTION = b'narrowacl' |
|
29 | _NARROWACL_SECTION = b'narrowacl' | |
31 | _CHANGESPECPART = b'narrow:changespec' |
|
30 | _CHANGESPECPART = b'narrow:changespec' | |
32 | _RESSPECS = b'narrow:responsespec' |
|
31 | _RESSPECS = b'narrow:responsespec' | |
33 | _SPECPART = b'narrow:spec' |
|
32 | _SPECPART = b'narrow:spec' | |
34 | _SPECPART_INCLUDE = b'include' |
|
33 | _SPECPART_INCLUDE = b'include' | |
35 | _SPECPART_EXCLUDE = b'exclude' |
|
34 | _SPECPART_EXCLUDE = b'exclude' | |
36 | _KILLNODESIGNAL = b'KILL' |
|
35 | _KILLNODESIGNAL = b'KILL' | |
37 | _DONESIGNAL = b'DONE' |
|
36 | _DONESIGNAL = b'DONE' | |
38 | _ELIDEDCSHEADER = b'>20s20s20sl' # cset id, p1, p2, len(text) |
|
37 | _ELIDEDCSHEADER = b'>20s20s20sl' # cset id, p1, p2, len(text) | |
39 | _ELIDEDMFHEADER = b'>20s20s20s20sl' # manifest id, p1, p2, link id, len(text) |
|
38 | _ELIDEDMFHEADER = b'>20s20s20s20sl' # manifest id, p1, p2, link id, len(text) | |
40 | _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) |
|
39 | _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) | |
41 | _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) |
|
40 | _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) | |
42 |
|
41 | |||
43 | # Serve a changegroup for a client with a narrow clone. |
|
42 | # Serve a changegroup for a client with a narrow clone. | |
44 | def getbundlechangegrouppart_narrow( |
|
43 | def getbundlechangegrouppart_narrow( | |
45 | bundler, |
|
44 | bundler, | |
46 | repo, |
|
45 | repo, | |
47 | source, |
|
46 | source, | |
48 | bundlecaps=None, |
|
47 | bundlecaps=None, | |
49 | b2caps=None, |
|
48 | b2caps=None, | |
50 | heads=None, |
|
49 | heads=None, | |
51 | common=None, |
|
50 | common=None, | |
52 | **kwargs |
|
51 | **kwargs | |
53 | ): |
|
52 | ): | |
54 | assert repo.ui.configbool(b'experimental', b'narrowservebrokenellipses') |
|
53 | assert repo.ui.configbool(b'experimental', b'narrowservebrokenellipses') | |
55 |
|
54 | |||
56 | cgversions = b2caps.get(b'changegroup') |
|
55 | cgversions = b2caps.get(b'changegroup') | |
57 | cgversions = [ |
|
56 | cgversions = [ | |
58 | v |
|
57 | v | |
59 | for v in cgversions |
|
58 | for v in cgversions | |
60 | if v in changegroup.supportedoutgoingversions(repo) |
|
59 | if v in changegroup.supportedoutgoingversions(repo) | |
61 | ] |
|
60 | ] | |
62 | if not cgversions: |
|
61 | if not cgversions: | |
63 | raise ValueError(_(b'no common changegroup version')) |
|
62 | raise ValueError(_(b'no common changegroup version')) | |
64 | version = max(cgversions) |
|
63 | version = max(cgversions) | |
65 |
|
64 | |||
66 | include = sorted(filter(bool, kwargs.get('includepats', []))) |
|
65 | include = sorted(filter(bool, kwargs.get('includepats', []))) | |
67 | exclude = sorted(filter(bool, kwargs.get('excludepats', []))) |
|
66 | exclude = sorted(filter(bool, kwargs.get('excludepats', []))) | |
68 | generateellipsesbundle2( |
|
67 | generateellipsesbundle2( | |
69 | bundler, |
|
68 | bundler, | |
70 | repo, |
|
69 | repo, | |
71 | include, |
|
70 | include, | |
72 | exclude, |
|
71 | exclude, | |
73 | version, |
|
72 | version, | |
74 | common, |
|
73 | common, | |
75 | heads, |
|
74 | heads, | |
76 | kwargs.get('depth', None), |
|
75 | kwargs.get('depth', None), | |
77 | ) |
|
76 | ) | |
78 |
|
77 | |||
79 |
|
78 | |||
80 | def generateellipsesbundle2( |
|
79 | def generateellipsesbundle2( | |
81 | bundler, |
|
80 | bundler, | |
82 | repo, |
|
81 | repo, | |
83 | include, |
|
82 | include, | |
84 | exclude, |
|
83 | exclude, | |
85 | version, |
|
84 | version, | |
86 | common, |
|
85 | common, | |
87 | heads, |
|
86 | heads, | |
88 | depth, |
|
87 | depth, | |
89 | ): |
|
88 | ): | |
90 | match = narrowspec.match(repo.root, include=include, exclude=exclude) |
|
89 | match = narrowspec.match(repo.root, include=include, exclude=exclude) | |
91 | if depth is not None: |
|
90 | if depth is not None: | |
92 | depth = int(depth) |
|
91 | depth = int(depth) | |
93 | if depth < 1: |
|
92 | if depth < 1: | |
94 | raise error.Abort(_(b'depth must be positive, got %d') % depth) |
|
93 | raise error.Abort(_(b'depth must be positive, got %d') % depth) | |
95 |
|
94 | |||
96 | heads = set(heads or repo.heads()) |
|
95 | heads = set(heads or repo.heads()) | |
97 | common = set(common or [nullid]) |
|
96 | common = set(common or [repo.nullid]) | |
98 |
|
97 | |||
99 | visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis( |
|
98 | visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis( | |
100 | repo, common, heads, set(), match, depth=depth |
|
99 | repo, common, heads, set(), match, depth=depth | |
101 | ) |
|
100 | ) | |
102 |
|
101 | |||
103 | repo.ui.debug(b'Found %d relevant revs\n' % len(relevant_nodes)) |
|
102 | repo.ui.debug(b'Found %d relevant revs\n' % len(relevant_nodes)) | |
104 | if visitnodes: |
|
103 | if visitnodes: | |
105 | packer = changegroup.getbundler( |
|
104 | packer = changegroup.getbundler( | |
106 | version, |
|
105 | version, | |
107 | repo, |
|
106 | repo, | |
108 | matcher=match, |
|
107 | matcher=match, | |
109 | ellipses=True, |
|
108 | ellipses=True, | |
110 | shallow=depth is not None, |
|
109 | shallow=depth is not None, | |
111 | ellipsisroots=ellipsisroots, |
|
110 | ellipsisroots=ellipsisroots, | |
112 | fullnodes=relevant_nodes, |
|
111 | fullnodes=relevant_nodes, | |
113 | ) |
|
112 | ) | |
114 | cgdata = packer.generate(common, visitnodes, False, b'narrow_widen') |
|
113 | cgdata = packer.generate(common, visitnodes, False, b'narrow_widen') | |
115 |
|
114 | |||
116 | part = bundler.newpart(b'changegroup', data=cgdata) |
|
115 | part = bundler.newpart(b'changegroup', data=cgdata) | |
117 | part.addparam(b'version', version) |
|
116 | part.addparam(b'version', version) | |
118 | if scmutil.istreemanifest(repo): |
|
117 | if scmutil.istreemanifest(repo): | |
119 | part.addparam(b'treemanifest', b'1') |
|
118 | part.addparam(b'treemanifest', b'1') | |
120 |
|
119 | |||
121 |
|
120 | |||
122 | def generate_ellipses_bundle2_for_widening( |
|
121 | def generate_ellipses_bundle2_for_widening( | |
123 | bundler, |
|
122 | bundler, | |
124 | repo, |
|
123 | repo, | |
125 | oldmatch, |
|
124 | oldmatch, | |
126 | newmatch, |
|
125 | newmatch, | |
127 | version, |
|
126 | version, | |
128 | common, |
|
127 | common, | |
129 | known, |
|
128 | known, | |
130 | ): |
|
129 | ): | |
131 | common = set(common or [nullid]) |
|
130 | common = set(common or [repo.nullid]) | |
132 | # Steps: |
|
131 | # Steps: | |
133 | # 1. Send kill for "$known & ::common" |
|
132 | # 1. Send kill for "$known & ::common" | |
134 | # |
|
133 | # | |
135 | # 2. Send changegroup for ::common |
|
134 | # 2. Send changegroup for ::common | |
136 | # |
|
135 | # | |
137 | # 3. Proceed. |
|
136 | # 3. Proceed. | |
138 | # |
|
137 | # | |
139 | # In the future, we can send kills for only the specific |
|
138 | # In the future, we can send kills for only the specific | |
140 | # nodes we know should go away or change shape, and then |
|
139 | # nodes we know should go away or change shape, and then | |
141 | # send a data stream that tells the client something like this: |
|
140 | # send a data stream that tells the client something like this: | |
142 | # |
|
141 | # | |
143 | # a) apply this changegroup |
|
142 | # a) apply this changegroup | |
144 | # b) apply nodes XXX, YYY, ZZZ that you already have |
|
143 | # b) apply nodes XXX, YYY, ZZZ that you already have | |
145 | # c) goto a |
|
144 | # c) goto a | |
146 | # |
|
145 | # | |
147 | # until they've built up the full new state. |
|
146 | # until they've built up the full new state. | |
148 | knownrevs = {repo.changelog.rev(n) for n in known} |
|
147 | knownrevs = {repo.changelog.rev(n) for n in known} | |
149 | # TODO: we could send only roots() of this set, and the |
|
148 | # TODO: we could send only roots() of this set, and the | |
150 | # list of nodes in common, and the client could work out |
|
149 | # list of nodes in common, and the client could work out | |
151 | # what to strip, instead of us explicitly sending every |
|
150 | # what to strip, instead of us explicitly sending every | |
152 | # single node. |
|
151 | # single node. | |
153 | deadrevs = knownrevs |
|
152 | deadrevs = knownrevs | |
154 |
|
153 | |||
155 | def genkills(): |
|
154 | def genkills(): | |
156 | for r in deadrevs: |
|
155 | for r in deadrevs: | |
157 | yield _KILLNODESIGNAL |
|
156 | yield _KILLNODESIGNAL | |
158 | yield repo.changelog.node(r) |
|
157 | yield repo.changelog.node(r) | |
159 | yield _DONESIGNAL |
|
158 | yield _DONESIGNAL | |
160 |
|
159 | |||
161 | bundler.newpart(_CHANGESPECPART, data=genkills()) |
|
160 | bundler.newpart(_CHANGESPECPART, data=genkills()) | |
162 | newvisit, newfull, newellipsis = exchange._computeellipsis( |
|
161 | newvisit, newfull, newellipsis = exchange._computeellipsis( | |
163 | repo, set(), common, knownrevs, newmatch |
|
162 | repo, set(), common, knownrevs, newmatch | |
164 | ) |
|
163 | ) | |
165 | if newvisit: |
|
164 | if newvisit: | |
166 | packer = changegroup.getbundler( |
|
165 | packer = changegroup.getbundler( | |
167 | version, |
|
166 | version, | |
168 | repo, |
|
167 | repo, | |
169 | matcher=newmatch, |
|
168 | matcher=newmatch, | |
170 | ellipses=True, |
|
169 | ellipses=True, | |
171 | shallow=False, |
|
170 | shallow=False, | |
172 | ellipsisroots=newellipsis, |
|
171 | ellipsisroots=newellipsis, | |
173 | fullnodes=newfull, |
|
172 | fullnodes=newfull, | |
174 | ) |
|
173 | ) | |
175 | cgdata = packer.generate(common, newvisit, False, b'narrow_widen') |
|
174 | cgdata = packer.generate(common, newvisit, False, b'narrow_widen') | |
176 |
|
175 | |||
177 | part = bundler.newpart(b'changegroup', data=cgdata) |
|
176 | part = bundler.newpart(b'changegroup', data=cgdata) | |
178 | part.addparam(b'version', version) |
|
177 | part.addparam(b'version', version) | |
179 | if scmutil.istreemanifest(repo): |
|
178 | if scmutil.istreemanifest(repo): | |
180 | part.addparam(b'treemanifest', b'1') |
|
179 | part.addparam(b'treemanifest', b'1') | |
181 |
|
180 | |||
182 |
|
181 | |||
183 | @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) |
|
182 | @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) | |
184 | def _handlechangespec_2(op, inpart): |
|
183 | def _handlechangespec_2(op, inpart): | |
185 | # XXX: This bundle2 handling is buggy and should be removed after hg5.2 is |
|
184 | # XXX: This bundle2 handling is buggy and should be removed after hg5.2 is | |
186 | # released. New servers will send a mandatory bundle2 part named |
|
185 | # released. New servers will send a mandatory bundle2 part named | |
187 | # 'Narrowspec' and will send specs as data instead of params. |
|
186 | # 'Narrowspec' and will send specs as data instead of params. | |
188 | # Refer to issue5952 and 6019 |
|
187 | # Refer to issue5952 and 6019 | |
189 | includepats = set(inpart.params.get(_SPECPART_INCLUDE, b'').splitlines()) |
|
188 | includepats = set(inpart.params.get(_SPECPART_INCLUDE, b'').splitlines()) | |
190 | excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, b'').splitlines()) |
|
189 | excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, b'').splitlines()) | |
191 | narrowspec.validatepatterns(includepats) |
|
190 | narrowspec.validatepatterns(includepats) | |
192 | narrowspec.validatepatterns(excludepats) |
|
191 | narrowspec.validatepatterns(excludepats) | |
193 |
|
192 | |||
194 | if not requirements.NARROW_REQUIREMENT in op.repo.requirements: |
|
193 | if not requirements.NARROW_REQUIREMENT in op.repo.requirements: | |
195 | op.repo.requirements.add(requirements.NARROW_REQUIREMENT) |
|
194 | op.repo.requirements.add(requirements.NARROW_REQUIREMENT) | |
196 | scmutil.writereporequirements(op.repo) |
|
195 | scmutil.writereporequirements(op.repo) | |
197 | op.repo.setnarrowpats(includepats, excludepats) |
|
196 | op.repo.setnarrowpats(includepats, excludepats) | |
198 | narrowspec.copytoworkingcopy(op.repo) |
|
197 | narrowspec.copytoworkingcopy(op.repo) | |
199 |
|
198 | |||
200 |
|
199 | |||
201 | @bundle2.parthandler(_RESSPECS) |
|
200 | @bundle2.parthandler(_RESSPECS) | |
202 | def _handlenarrowspecs(op, inpart): |
|
201 | def _handlenarrowspecs(op, inpart): | |
203 | data = inpart.read() |
|
202 | data = inpart.read() | |
204 | inc, exc = data.split(b'\0') |
|
203 | inc, exc = data.split(b'\0') | |
205 | includepats = set(inc.splitlines()) |
|
204 | includepats = set(inc.splitlines()) | |
206 | excludepats = set(exc.splitlines()) |
|
205 | excludepats = set(exc.splitlines()) | |
207 | narrowspec.validatepatterns(includepats) |
|
206 | narrowspec.validatepatterns(includepats) | |
208 | narrowspec.validatepatterns(excludepats) |
|
207 | narrowspec.validatepatterns(excludepats) | |
209 |
|
208 | |||
210 | if requirements.NARROW_REQUIREMENT not in op.repo.requirements: |
|
209 | if requirements.NARROW_REQUIREMENT not in op.repo.requirements: | |
211 | op.repo.requirements.add(requirements.NARROW_REQUIREMENT) |
|
210 | op.repo.requirements.add(requirements.NARROW_REQUIREMENT) | |
212 | scmutil.writereporequirements(op.repo) |
|
211 | scmutil.writereporequirements(op.repo) | |
213 | op.repo.setnarrowpats(includepats, excludepats) |
|
212 | op.repo.setnarrowpats(includepats, excludepats) | |
214 | narrowspec.copytoworkingcopy(op.repo) |
|
213 | narrowspec.copytoworkingcopy(op.repo) | |
215 |
|
214 | |||
216 |
|
215 | |||
217 | @bundle2.parthandler(_CHANGESPECPART) |
|
216 | @bundle2.parthandler(_CHANGESPECPART) | |
218 | def _handlechangespec(op, inpart): |
|
217 | def _handlechangespec(op, inpart): | |
219 | repo = op.repo |
|
218 | repo = op.repo | |
220 | cl = repo.changelog |
|
219 | cl = repo.changelog | |
221 |
|
220 | |||
222 | # changesets which need to be stripped entirely. either they're no longer |
|
221 | # changesets which need to be stripped entirely. either they're no longer | |
223 | # needed in the new narrow spec, or the server is sending a replacement |
|
222 | # needed in the new narrow spec, or the server is sending a replacement | |
224 | # in the changegroup part. |
|
223 | # in the changegroup part. | |
225 | clkills = set() |
|
224 | clkills = set() | |
226 |
|
225 | |||
227 | # A changespec part contains all the updates to ellipsis nodes |
|
226 | # A changespec part contains all the updates to ellipsis nodes | |
228 | # that will happen as a result of widening or narrowing a |
|
227 | # that will happen as a result of widening or narrowing a | |
229 | # repo. All the changes that this block encounters are ellipsis |
|
228 | # repo. All the changes that this block encounters are ellipsis | |
230 | # nodes or flags to kill an existing ellipsis. |
|
229 | # nodes or flags to kill an existing ellipsis. | |
231 | chunksignal = changegroup.readexactly(inpart, 4) |
|
230 | chunksignal = changegroup.readexactly(inpart, 4) | |
232 | while chunksignal != _DONESIGNAL: |
|
231 | while chunksignal != _DONESIGNAL: | |
233 | if chunksignal == _KILLNODESIGNAL: |
|
232 | if chunksignal == _KILLNODESIGNAL: | |
234 | # a node used to be an ellipsis but isn't anymore |
|
233 | # a node used to be an ellipsis but isn't anymore | |
235 | ck = changegroup.readexactly(inpart, 20) |
|
234 | ck = changegroup.readexactly(inpart, 20) | |
236 | if cl.hasnode(ck): |
|
235 | if cl.hasnode(ck): | |
237 | clkills.add(ck) |
|
236 | clkills.add(ck) | |
238 | else: |
|
237 | else: | |
239 | raise error.Abort( |
|
238 | raise error.Abort( | |
240 | _(b'unexpected changespec node chunk type: %s') % chunksignal |
|
239 | _(b'unexpected changespec node chunk type: %s') % chunksignal | |
241 | ) |
|
240 | ) | |
242 | chunksignal = changegroup.readexactly(inpart, 4) |
|
241 | chunksignal = changegroup.readexactly(inpart, 4) | |
243 |
|
242 | |||
244 | if clkills: |
|
243 | if clkills: | |
245 | # preserve bookmarks that repair.strip() would otherwise strip |
|
244 | # preserve bookmarks that repair.strip() would otherwise strip | |
246 | op._bookmarksbackup = repo._bookmarks |
|
245 | op._bookmarksbackup = repo._bookmarks | |
247 |
|
246 | |||
248 | class dummybmstore(dict): |
|
247 | class dummybmstore(dict): | |
249 | def applychanges(self, repo, tr, changes): |
|
248 | def applychanges(self, repo, tr, changes): | |
250 | pass |
|
249 | pass | |
251 |
|
250 | |||
252 | localrepo.localrepository._bookmarks.set(repo, dummybmstore()) |
|
251 | localrepo.localrepository._bookmarks.set(repo, dummybmstore()) | |
253 | chgrpfile = repair.strip( |
|
252 | chgrpfile = repair.strip( | |
254 | op.ui, repo, list(clkills), backup=True, topic=b'widen' |
|
253 | op.ui, repo, list(clkills), backup=True, topic=b'widen' | |
255 | ) |
|
254 | ) | |
256 | if chgrpfile: |
|
255 | if chgrpfile: | |
257 | op._widen_uninterr = repo.ui.uninterruptible() |
|
256 | op._widen_uninterr = repo.ui.uninterruptible() | |
258 | op._widen_uninterr.__enter__() |
|
257 | op._widen_uninterr.__enter__() | |
259 | # presence of _widen_bundle attribute activates widen handler later |
|
258 | # presence of _widen_bundle attribute activates widen handler later | |
260 | op._widen_bundle = chgrpfile |
|
259 | op._widen_bundle = chgrpfile | |
261 | # Set the new narrowspec if we're widening. The setnewnarrowpats() method |
|
260 | # Set the new narrowspec if we're widening. The setnewnarrowpats() method | |
262 | # will currently always be there when using the core+narrowhg server, but |
|
261 | # will currently always be there when using the core+narrowhg server, but | |
263 | # other servers may include a changespec part even when not widening (e.g. |
|
262 | # other servers may include a changespec part even when not widening (e.g. | |
264 | # because we're deepening a shallow repo). |
|
263 | # because we're deepening a shallow repo). | |
265 | if util.safehasattr(repo, 'setnewnarrowpats'): |
|
264 | if util.safehasattr(repo, 'setnewnarrowpats'): | |
266 | repo.setnewnarrowpats() |
|
265 | repo.setnewnarrowpats() | |
267 |
|
266 | |||
268 |
|
267 | |||
269 | def handlechangegroup_widen(op, inpart): |
|
268 | def handlechangegroup_widen(op, inpart): | |
270 | """Changegroup exchange handler which restores temporarily-stripped nodes""" |
|
269 | """Changegroup exchange handler which restores temporarily-stripped nodes""" | |
271 | # We saved a bundle with stripped node data we must now restore. |
|
270 | # We saved a bundle with stripped node data we must now restore. | |
272 | # This approach is based on mercurial/repair.py@6ee26a53c111. |
|
271 | # This approach is based on mercurial/repair.py@6ee26a53c111. | |
273 | repo = op.repo |
|
272 | repo = op.repo | |
274 | ui = op.ui |
|
273 | ui = op.ui | |
275 |
|
274 | |||
276 | chgrpfile = op._widen_bundle |
|
275 | chgrpfile = op._widen_bundle | |
277 | del op._widen_bundle |
|
276 | del op._widen_bundle | |
278 | vfs = repo.vfs |
|
277 | vfs = repo.vfs | |
279 |
|
278 | |||
280 | ui.note(_(b"adding branch\n")) |
|
279 | ui.note(_(b"adding branch\n")) | |
281 | f = vfs.open(chgrpfile, b"rb") |
|
280 | f = vfs.open(chgrpfile, b"rb") | |
282 | try: |
|
281 | try: | |
283 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) |
|
282 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) | |
284 | # silence internal shuffling chatter |
|
283 | # silence internal shuffling chatter | |
285 | override = {(b'ui', b'quiet'): True} |
|
284 | override = {(b'ui', b'quiet'): True} | |
286 | if ui.verbose: |
|
285 | if ui.verbose: | |
287 | override = {} |
|
286 | override = {} | |
288 | with ui.configoverride(override): |
|
287 | with ui.configoverride(override): | |
289 | if isinstance(gen, bundle2.unbundle20): |
|
288 | if isinstance(gen, bundle2.unbundle20): | |
290 | with repo.transaction(b'strip') as tr: |
|
289 | with repo.transaction(b'strip') as tr: | |
291 | bundle2.processbundle(repo, gen, lambda: tr) |
|
290 | bundle2.processbundle(repo, gen, lambda: tr) | |
292 | else: |
|
291 | else: | |
293 | gen.apply( |
|
292 | gen.apply( | |
294 | repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True |
|
293 | repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True | |
295 | ) |
|
294 | ) | |
296 | finally: |
|
295 | finally: | |
297 | f.close() |
|
296 | f.close() | |
298 |
|
297 | |||
299 | # remove undo files |
|
298 | # remove undo files | |
300 | for undovfs, undofile in repo.undofiles(): |
|
299 | for undovfs, undofile in repo.undofiles(): | |
301 | try: |
|
300 | try: | |
302 | undovfs.unlink(undofile) |
|
301 | undovfs.unlink(undofile) | |
303 | except OSError as e: |
|
302 | except OSError as e: | |
304 | if e.errno != errno.ENOENT: |
|
303 | if e.errno != errno.ENOENT: | |
305 | ui.warn( |
|
304 | ui.warn( | |
306 | _(b'error removing %s: %s\n') |
|
305 | _(b'error removing %s: %s\n') | |
307 | % (undovfs.join(undofile), stringutil.forcebytestr(e)) |
|
306 | % (undovfs.join(undofile), stringutil.forcebytestr(e)) | |
308 | ) |
|
307 | ) | |
309 |
|
308 | |||
310 | # Remove partial backup only if there were no exceptions |
|
309 | # Remove partial backup only if there were no exceptions | |
311 | op._widen_uninterr.__exit__(None, None, None) |
|
310 | op._widen_uninterr.__exit__(None, None, None) | |
312 | vfs.unlink(chgrpfile) |
|
311 | vfs.unlink(chgrpfile) | |
313 |
|
312 | |||
314 |
|
313 | |||
315 | def setup(): |
|
314 | def setup(): | |
316 | """Enable narrow repo support in bundle2-related extension points.""" |
|
315 | """Enable narrow repo support in bundle2-related extension points.""" | |
317 | getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS |
|
316 | getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS | |
318 |
|
317 | |||
319 | getbundleargs[b'narrow'] = b'boolean' |
|
318 | getbundleargs[b'narrow'] = b'boolean' | |
320 | getbundleargs[b'depth'] = b'plain' |
|
319 | getbundleargs[b'depth'] = b'plain' | |
321 | getbundleargs[b'oldincludepats'] = b'csv' |
|
320 | getbundleargs[b'oldincludepats'] = b'csv' | |
322 | getbundleargs[b'oldexcludepats'] = b'csv' |
|
321 | getbundleargs[b'oldexcludepats'] = b'csv' | |
323 | getbundleargs[b'known'] = b'csv' |
|
322 | getbundleargs[b'known'] = b'csv' | |
324 |
|
323 | |||
325 | # Extend changegroup serving to handle requests from narrow clients. |
|
324 | # Extend changegroup serving to handle requests from narrow clients. | |
326 | origcgfn = exchange.getbundle2partsmapping[b'changegroup'] |
|
325 | origcgfn = exchange.getbundle2partsmapping[b'changegroup'] | |
327 |
|
326 | |||
328 | def wrappedcgfn(*args, **kwargs): |
|
327 | def wrappedcgfn(*args, **kwargs): | |
329 | repo = args[1] |
|
328 | repo = args[1] | |
330 | if repo.ui.has_section(_NARROWACL_SECTION): |
|
329 | if repo.ui.has_section(_NARROWACL_SECTION): | |
331 | kwargs = exchange.applynarrowacl(repo, kwargs) |
|
330 | kwargs = exchange.applynarrowacl(repo, kwargs) | |
332 |
|
331 | |||
333 | if kwargs.get('narrow', False) and repo.ui.configbool( |
|
332 | if kwargs.get('narrow', False) and repo.ui.configbool( | |
334 | b'experimental', b'narrowservebrokenellipses' |
|
333 | b'experimental', b'narrowservebrokenellipses' | |
335 | ): |
|
334 | ): | |
336 | getbundlechangegrouppart_narrow(*args, **kwargs) |
|
335 | getbundlechangegrouppart_narrow(*args, **kwargs) | |
337 | else: |
|
336 | else: | |
338 | origcgfn(*args, **kwargs) |
|
337 | origcgfn(*args, **kwargs) | |
339 |
|
338 | |||
340 | exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn |
|
339 | exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn | |
341 |
|
340 | |||
342 | # Extend changegroup receiver so client can fixup after widen requests. |
|
341 | # Extend changegroup receiver so client can fixup after widen requests. | |
343 | origcghandler = bundle2.parthandlermapping[b'changegroup'] |
|
342 | origcghandler = bundle2.parthandlermapping[b'changegroup'] | |
344 |
|
343 | |||
345 | def wrappedcghandler(op, inpart): |
|
344 | def wrappedcghandler(op, inpart): | |
346 | origcghandler(op, inpart) |
|
345 | origcghandler(op, inpart) | |
347 | if util.safehasattr(op, '_widen_bundle'): |
|
346 | if util.safehasattr(op, '_widen_bundle'): | |
348 | handlechangegroup_widen(op, inpart) |
|
347 | handlechangegroup_widen(op, inpart) | |
349 | if util.safehasattr(op, '_bookmarksbackup'): |
|
348 | if util.safehasattr(op, '_bookmarksbackup'): | |
350 | localrepo.localrepository._bookmarks.set( |
|
349 | localrepo.localrepository._bookmarks.set( | |
351 | op.repo, op._bookmarksbackup |
|
350 | op.repo, op._bookmarksbackup | |
352 | ) |
|
351 | ) | |
353 | del op._bookmarksbackup |
|
352 | del op._bookmarksbackup | |
354 |
|
353 | |||
355 | wrappedcghandler.params = origcghandler.params |
|
354 | wrappedcghandler.params = origcghandler.params | |
356 | bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler |
|
355 | bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler |
@@ -1,680 +1,679 b'' | |||||
1 | # narrowcommands.py - command modifications for narrowhg extension |
|
1 | # narrowcommands.py - command modifications for narrowhg extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import itertools |
|
9 | import itertools | |
10 | import os |
|
10 | import os | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 | from mercurial.node import ( |
|
13 | from mercurial.node import ( | |
14 | hex, |
|
14 | hex, | |
15 | nullid, |
|
|||
16 | short, |
|
15 | short, | |
17 | ) |
|
16 | ) | |
18 | from mercurial import ( |
|
17 | from mercurial import ( | |
19 | bundle2, |
|
18 | bundle2, | |
20 | cmdutil, |
|
19 | cmdutil, | |
21 | commands, |
|
20 | commands, | |
22 | discovery, |
|
21 | discovery, | |
23 | encoding, |
|
22 | encoding, | |
24 | error, |
|
23 | error, | |
25 | exchange, |
|
24 | exchange, | |
26 | extensions, |
|
25 | extensions, | |
27 | hg, |
|
26 | hg, | |
28 | narrowspec, |
|
27 | narrowspec, | |
29 | pathutil, |
|
28 | pathutil, | |
30 | pycompat, |
|
29 | pycompat, | |
31 | registrar, |
|
30 | registrar, | |
32 | repair, |
|
31 | repair, | |
33 | repoview, |
|
32 | repoview, | |
34 | requirements, |
|
33 | requirements, | |
35 | sparse, |
|
34 | sparse, | |
36 | util, |
|
35 | util, | |
37 | wireprototypes, |
|
36 | wireprototypes, | |
38 | ) |
|
37 | ) | |
39 | from mercurial.utils import ( |
|
38 | from mercurial.utils import ( | |
40 | urlutil, |
|
39 | urlutil, | |
41 | ) |
|
40 | ) | |
42 |
|
41 | |||
43 | table = {} |
|
42 | table = {} | |
44 | command = registrar.command(table) |
|
43 | command = registrar.command(table) | |
45 |
|
44 | |||
46 |
|
45 | |||
47 | def setup(): |
|
46 | def setup(): | |
48 | """Wraps user-facing mercurial commands with narrow-aware versions.""" |
|
47 | """Wraps user-facing mercurial commands with narrow-aware versions.""" | |
49 |
|
48 | |||
50 | entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd) |
|
49 | entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd) | |
51 | entry[1].append( |
|
50 | entry[1].append( | |
52 | (b'', b'narrow', None, _(b"create a narrow clone of select files")) |
|
51 | (b'', b'narrow', None, _(b"create a narrow clone of select files")) | |
53 | ) |
|
52 | ) | |
54 | entry[1].append( |
|
53 | entry[1].append( | |
55 | ( |
|
54 | ( | |
56 | b'', |
|
55 | b'', | |
57 | b'depth', |
|
56 | b'depth', | |
58 | b'', |
|
57 | b'', | |
59 | _(b"limit the history fetched by distance from heads"), |
|
58 | _(b"limit the history fetched by distance from heads"), | |
60 | ) |
|
59 | ) | |
61 | ) |
|
60 | ) | |
62 | entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file"))) |
|
61 | entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file"))) | |
63 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit |
|
62 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit | |
64 | if b'sparse' not in extensions.enabled(): |
|
63 | if b'sparse' not in extensions.enabled(): | |
65 | entry[1].append( |
|
64 | entry[1].append( | |
66 | (b'', b'include', [], _(b"specifically fetch this file/directory")) |
|
65 | (b'', b'include', [], _(b"specifically fetch this file/directory")) | |
67 | ) |
|
66 | ) | |
68 | entry[1].append( |
|
67 | entry[1].append( | |
69 | ( |
|
68 | ( | |
70 | b'', |
|
69 | b'', | |
71 | b'exclude', |
|
70 | b'exclude', | |
72 | [], |
|
71 | [], | |
73 | _(b"do not fetch this file/directory, even if included"), |
|
72 | _(b"do not fetch this file/directory, even if included"), | |
74 | ) |
|
73 | ) | |
75 | ) |
|
74 | ) | |
76 |
|
75 | |||
77 | entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd) |
|
76 | entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd) | |
78 | entry[1].append( |
|
77 | entry[1].append( | |
79 | ( |
|
78 | ( | |
80 | b'', |
|
79 | b'', | |
81 | b'depth', |
|
80 | b'depth', | |
82 | b'', |
|
81 | b'', | |
83 | _(b"limit the history fetched by distance from heads"), |
|
82 | _(b"limit the history fetched by distance from heads"), | |
84 | ) |
|
83 | ) | |
85 | ) |
|
84 | ) | |
86 |
|
85 | |||
87 | extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd) |
|
86 | extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd) | |
88 |
|
87 | |||
89 |
|
88 | |||
90 | def clonenarrowcmd(orig, ui, repo, *args, **opts): |
|
89 | def clonenarrowcmd(orig, ui, repo, *args, **opts): | |
91 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" |
|
90 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" | |
92 | opts = pycompat.byteskwargs(opts) |
|
91 | opts = pycompat.byteskwargs(opts) | |
93 | wrappedextraprepare = util.nullcontextmanager() |
|
92 | wrappedextraprepare = util.nullcontextmanager() | |
94 | narrowspecfile = opts[b'narrowspec'] |
|
93 | narrowspecfile = opts[b'narrowspec'] | |
95 |
|
94 | |||
96 | if narrowspecfile: |
|
95 | if narrowspecfile: | |
97 | filepath = os.path.join(encoding.getcwd(), narrowspecfile) |
|
96 | filepath = os.path.join(encoding.getcwd(), narrowspecfile) | |
98 | ui.status(_(b"reading narrowspec from '%s'\n") % filepath) |
|
97 | ui.status(_(b"reading narrowspec from '%s'\n") % filepath) | |
99 | try: |
|
98 | try: | |
100 | fdata = util.readfile(filepath) |
|
99 | fdata = util.readfile(filepath) | |
101 | except IOError as inst: |
|
100 | except IOError as inst: | |
102 | raise error.Abort( |
|
101 | raise error.Abort( | |
103 | _(b"cannot read narrowspecs from '%s': %s") |
|
102 | _(b"cannot read narrowspecs from '%s': %s") | |
104 | % (filepath, encoding.strtolocal(inst.strerror)) |
|
103 | % (filepath, encoding.strtolocal(inst.strerror)) | |
105 | ) |
|
104 | ) | |
106 |
|
105 | |||
107 | includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow') |
|
106 | includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow') | |
108 | if profiles: |
|
107 | if profiles: | |
109 | raise error.ConfigError( |
|
108 | raise error.ConfigError( | |
110 | _( |
|
109 | _( | |
111 | b"cannot specify other files using '%include' in" |
|
110 | b"cannot specify other files using '%include' in" | |
112 | b" narrowspec" |
|
111 | b" narrowspec" | |
113 | ) |
|
112 | ) | |
114 | ) |
|
113 | ) | |
115 |
|
114 | |||
116 | narrowspec.validatepatterns(includes) |
|
115 | narrowspec.validatepatterns(includes) | |
117 | narrowspec.validatepatterns(excludes) |
|
116 | narrowspec.validatepatterns(excludes) | |
118 |
|
117 | |||
119 | # narrowspec is passed so we should assume that user wants narrow clone |
|
118 | # narrowspec is passed so we should assume that user wants narrow clone | |
120 | opts[b'narrow'] = True |
|
119 | opts[b'narrow'] = True | |
121 | opts[b'include'].extend(includes) |
|
120 | opts[b'include'].extend(includes) | |
122 | opts[b'exclude'].extend(excludes) |
|
121 | opts[b'exclude'].extend(excludes) | |
123 |
|
122 | |||
124 | if opts[b'narrow']: |
|
123 | if opts[b'narrow']: | |
125 |
|
124 | |||
126 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
125 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
127 | orig(pullop, kwargs) |
|
126 | orig(pullop, kwargs) | |
128 |
|
127 | |||
129 | if opts.get(b'depth'): |
|
128 | if opts.get(b'depth'): | |
130 | kwargs[b'depth'] = opts[b'depth'] |
|
129 | kwargs[b'depth'] = opts[b'depth'] | |
131 |
|
130 | |||
132 | wrappedextraprepare = extensions.wrappedfunction( |
|
131 | wrappedextraprepare = extensions.wrappedfunction( | |
133 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
132 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen | |
134 | ) |
|
133 | ) | |
135 |
|
134 | |||
136 | with wrappedextraprepare: |
|
135 | with wrappedextraprepare: | |
137 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) |
|
136 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) | |
138 |
|
137 | |||
139 |
|
138 | |||
140 | def pullnarrowcmd(orig, ui, repo, *args, **opts): |
|
139 | def pullnarrowcmd(orig, ui, repo, *args, **opts): | |
141 | """Wraps pull command to allow modifying narrow spec.""" |
|
140 | """Wraps pull command to allow modifying narrow spec.""" | |
142 | wrappedextraprepare = util.nullcontextmanager() |
|
141 | wrappedextraprepare = util.nullcontextmanager() | |
143 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
142 | if requirements.NARROW_REQUIREMENT in repo.requirements: | |
144 |
|
143 | |||
145 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
144 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
146 | orig(pullop, kwargs) |
|
145 | orig(pullop, kwargs) | |
147 | if opts.get('depth'): |
|
146 | if opts.get('depth'): | |
148 | kwargs[b'depth'] = opts['depth'] |
|
147 | kwargs[b'depth'] = opts['depth'] | |
149 |
|
148 | |||
150 | wrappedextraprepare = extensions.wrappedfunction( |
|
149 | wrappedextraprepare = extensions.wrappedfunction( | |
151 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
150 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen | |
152 | ) |
|
151 | ) | |
153 |
|
152 | |||
154 | with wrappedextraprepare: |
|
153 | with wrappedextraprepare: | |
155 | return orig(ui, repo, *args, **opts) |
|
154 | return orig(ui, repo, *args, **opts) | |
156 |
|
155 | |||
157 |
|
156 | |||
158 | def archivenarrowcmd(orig, ui, repo, *args, **opts): |
|
157 | def archivenarrowcmd(orig, ui, repo, *args, **opts): | |
159 | """Wraps archive command to narrow the default includes.""" |
|
158 | """Wraps archive command to narrow the default includes.""" | |
160 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
159 | if requirements.NARROW_REQUIREMENT in repo.requirements: | |
161 | repo_includes, repo_excludes = repo.narrowpats |
|
160 | repo_includes, repo_excludes = repo.narrowpats | |
162 | includes = set(opts.get('include', [])) |
|
161 | includes = set(opts.get('include', [])) | |
163 | excludes = set(opts.get('exclude', [])) |
|
162 | excludes = set(opts.get('exclude', [])) | |
164 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( |
|
163 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( | |
165 | includes, excludes, repo_includes, repo_excludes |
|
164 | includes, excludes, repo_includes, repo_excludes | |
166 | ) |
|
165 | ) | |
167 | if includes: |
|
166 | if includes: | |
168 | opts['include'] = includes |
|
167 | opts['include'] = includes | |
169 | if excludes: |
|
168 | if excludes: | |
170 | opts['exclude'] = excludes |
|
169 | opts['exclude'] = excludes | |
171 | return orig(ui, repo, *args, **opts) |
|
170 | return orig(ui, repo, *args, **opts) | |
172 |
|
171 | |||
173 |
|
172 | |||
174 | def pullbundle2extraprepare(orig, pullop, kwargs): |
|
173 | def pullbundle2extraprepare(orig, pullop, kwargs): | |
175 | repo = pullop.repo |
|
174 | repo = pullop.repo | |
176 | if requirements.NARROW_REQUIREMENT not in repo.requirements: |
|
175 | if requirements.NARROW_REQUIREMENT not in repo.requirements: | |
177 | return orig(pullop, kwargs) |
|
176 | return orig(pullop, kwargs) | |
178 |
|
177 | |||
179 | if wireprototypes.NARROWCAP not in pullop.remote.capabilities(): |
|
178 | if wireprototypes.NARROWCAP not in pullop.remote.capabilities(): | |
180 | raise error.Abort(_(b"server does not support narrow clones")) |
|
179 | raise error.Abort(_(b"server does not support narrow clones")) | |
181 | orig(pullop, kwargs) |
|
180 | orig(pullop, kwargs) | |
182 | kwargs[b'narrow'] = True |
|
181 | kwargs[b'narrow'] = True | |
183 | include, exclude = repo.narrowpats |
|
182 | include, exclude = repo.narrowpats | |
184 | kwargs[b'oldincludepats'] = include |
|
183 | kwargs[b'oldincludepats'] = include | |
185 | kwargs[b'oldexcludepats'] = exclude |
|
184 | kwargs[b'oldexcludepats'] = exclude | |
186 | if include: |
|
185 | if include: | |
187 | kwargs[b'includepats'] = include |
|
186 | kwargs[b'includepats'] = include | |
188 | if exclude: |
|
187 | if exclude: | |
189 | kwargs[b'excludepats'] = exclude |
|
188 | kwargs[b'excludepats'] = exclude | |
190 | # calculate known nodes only in ellipses cases because in non-ellipses cases |
|
189 | # calculate known nodes only in ellipses cases because in non-ellipses cases | |
191 | # we have all the nodes |
|
190 | # we have all the nodes | |
192 | if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities(): |
|
191 | if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities(): | |
193 | kwargs[b'known'] = [ |
|
192 | kwargs[b'known'] = [ | |
194 | hex(ctx.node()) |
|
193 | hex(ctx.node()) | |
195 | for ctx in repo.set(b'::%ln', pullop.common) |
|
194 | for ctx in repo.set(b'::%ln', pullop.common) | |
196 | if ctx.node() != nullid |
|
195 | if ctx.node() != repo.nullid | |
197 | ] |
|
196 | ] | |
198 | if not kwargs[b'known']: |
|
197 | if not kwargs[b'known']: | |
199 | # Mercurial serializes an empty list as '' and deserializes it as |
|
198 | # Mercurial serializes an empty list as '' and deserializes it as | |
200 | # [''], so delete it instead to avoid handling the empty string on |
|
199 | # [''], so delete it instead to avoid handling the empty string on | |
201 | # the server. |
|
200 | # the server. | |
202 | del kwargs[b'known'] |
|
201 | del kwargs[b'known'] | |
203 |
|
202 | |||
204 |
|
203 | |||
205 | extensions.wrapfunction( |
|
204 | extensions.wrapfunction( | |
206 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare |
|
205 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare | |
207 | ) |
|
206 | ) | |
208 |
|
207 | |||
209 |
|
208 | |||
210 | def _narrow( |
|
209 | def _narrow( | |
211 | ui, |
|
210 | ui, | |
212 | repo, |
|
211 | repo, | |
213 | remote, |
|
212 | remote, | |
214 | commoninc, |
|
213 | commoninc, | |
215 | oldincludes, |
|
214 | oldincludes, | |
216 | oldexcludes, |
|
215 | oldexcludes, | |
217 | newincludes, |
|
216 | newincludes, | |
218 | newexcludes, |
|
217 | newexcludes, | |
219 | force, |
|
218 | force, | |
220 | backup, |
|
219 | backup, | |
221 | ): |
|
220 | ): | |
222 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) |
|
221 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) | |
223 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) |
|
222 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) | |
224 |
|
223 | |||
225 | # This is essentially doing "hg outgoing" to find all local-only |
|
224 | # This is essentially doing "hg outgoing" to find all local-only | |
226 | # commits. We will then check that the local-only commits don't |
|
225 | # commits. We will then check that the local-only commits don't | |
227 | # have any changes to files that will be untracked. |
|
226 | # have any changes to files that will be untracked. | |
228 | unfi = repo.unfiltered() |
|
227 | unfi = repo.unfiltered() | |
229 | outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) |
|
228 | outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) | |
230 | ui.status(_(b'looking for local changes to affected paths\n')) |
|
229 | ui.status(_(b'looking for local changes to affected paths\n')) | |
231 | localnodes = [] |
|
230 | localnodes = [] | |
232 | for n in itertools.chain(outgoing.missing, outgoing.excluded): |
|
231 | for n in itertools.chain(outgoing.missing, outgoing.excluded): | |
233 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): |
|
232 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): | |
234 | localnodes.append(n) |
|
233 | localnodes.append(n) | |
235 | revstostrip = unfi.revs(b'descendants(%ln)', localnodes) |
|
234 | revstostrip = unfi.revs(b'descendants(%ln)', localnodes) | |
236 | hiddenrevs = repoview.filterrevs(repo, b'visible') |
|
235 | hiddenrevs = repoview.filterrevs(repo, b'visible') | |
237 | visibletostrip = list( |
|
236 | visibletostrip = list( | |
238 | repo.changelog.node(r) for r in (revstostrip - hiddenrevs) |
|
237 | repo.changelog.node(r) for r in (revstostrip - hiddenrevs) | |
239 | ) |
|
238 | ) | |
240 | if visibletostrip: |
|
239 | if visibletostrip: | |
241 | ui.status( |
|
240 | ui.status( | |
242 | _( |
|
241 | _( | |
243 | b'The following changeset(s) or their ancestors have ' |
|
242 | b'The following changeset(s) or their ancestors have ' | |
244 | b'local changes not on the remote:\n' |
|
243 | b'local changes not on the remote:\n' | |
245 | ) |
|
244 | ) | |
246 | ) |
|
245 | ) | |
247 | maxnodes = 10 |
|
246 | maxnodes = 10 | |
248 | if ui.verbose or len(visibletostrip) <= maxnodes: |
|
247 | if ui.verbose or len(visibletostrip) <= maxnodes: | |
249 | for n in visibletostrip: |
|
248 | for n in visibletostrip: | |
250 | ui.status(b'%s\n' % short(n)) |
|
249 | ui.status(b'%s\n' % short(n)) | |
251 | else: |
|
250 | else: | |
252 | for n in visibletostrip[:maxnodes]: |
|
251 | for n in visibletostrip[:maxnodes]: | |
253 | ui.status(b'%s\n' % short(n)) |
|
252 | ui.status(b'%s\n' % short(n)) | |
254 | ui.status( |
|
253 | ui.status( | |
255 | _(b'...and %d more, use --verbose to list all\n') |
|
254 | _(b'...and %d more, use --verbose to list all\n') | |
256 | % (len(visibletostrip) - maxnodes) |
|
255 | % (len(visibletostrip) - maxnodes) | |
257 | ) |
|
256 | ) | |
258 | if not force: |
|
257 | if not force: | |
259 | raise error.StateError( |
|
258 | raise error.StateError( | |
260 | _(b'local changes found'), |
|
259 | _(b'local changes found'), | |
261 | hint=_(b'use --force-delete-local-changes to ignore'), |
|
260 | hint=_(b'use --force-delete-local-changes to ignore'), | |
262 | ) |
|
261 | ) | |
263 |
|
262 | |||
264 | with ui.uninterruptible(): |
|
263 | with ui.uninterruptible(): | |
265 | if revstostrip: |
|
264 | if revstostrip: | |
266 | tostrip = [unfi.changelog.node(r) for r in revstostrip] |
|
265 | tostrip = [unfi.changelog.node(r) for r in revstostrip] | |
267 | if repo[b'.'].node() in tostrip: |
|
266 | if repo[b'.'].node() in tostrip: | |
268 | # stripping working copy, so move to a different commit first |
|
267 | # stripping working copy, so move to a different commit first | |
269 | urev = max( |
|
268 | urev = max( | |
270 | repo.revs( |
|
269 | repo.revs( | |
271 | b'(::%n) - %ln + null', |
|
270 | b'(::%n) - %ln + null', | |
272 | repo[b'.'].node(), |
|
271 | repo[b'.'].node(), | |
273 | visibletostrip, |
|
272 | visibletostrip, | |
274 | ) |
|
273 | ) | |
275 | ) |
|
274 | ) | |
276 | hg.clean(repo, urev) |
|
275 | hg.clean(repo, urev) | |
277 | overrides = {(b'devel', b'strip-obsmarkers'): False} |
|
276 | overrides = {(b'devel', b'strip-obsmarkers'): False} | |
278 | with ui.configoverride(overrides, b'narrow'): |
|
277 | with ui.configoverride(overrides, b'narrow'): | |
279 | repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) |
|
278 | repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) | |
280 |
|
279 | |||
281 | todelete = [] |
|
280 | todelete = [] | |
282 | for t, f, f2, size in repo.store.datafiles(): |
|
281 | for t, f, f2, size in repo.store.datafiles(): | |
283 | if f.startswith(b'data/'): |
|
282 | if f.startswith(b'data/'): | |
284 | file = f[5:-2] |
|
283 | file = f[5:-2] | |
285 | if not newmatch(file): |
|
284 | if not newmatch(file): | |
286 | todelete.append(f) |
|
285 | todelete.append(f) | |
287 | elif f.startswith(b'meta/'): |
|
286 | elif f.startswith(b'meta/'): | |
288 | dir = f[5:-13] |
|
287 | dir = f[5:-13] | |
289 | dirs = sorted(pathutil.dirs({dir})) + [dir] |
|
288 | dirs = sorted(pathutil.dirs({dir})) + [dir] | |
290 | include = True |
|
289 | include = True | |
291 | for d in dirs: |
|
290 | for d in dirs: | |
292 | visit = newmatch.visitdir(d) |
|
291 | visit = newmatch.visitdir(d) | |
293 | if not visit: |
|
292 | if not visit: | |
294 | include = False |
|
293 | include = False | |
295 | break |
|
294 | break | |
296 | if visit == b'all': |
|
295 | if visit == b'all': | |
297 | break |
|
296 | break | |
298 | if not include: |
|
297 | if not include: | |
299 | todelete.append(f) |
|
298 | todelete.append(f) | |
300 |
|
299 | |||
301 | repo.destroying() |
|
300 | repo.destroying() | |
302 |
|
301 | |||
303 | with repo.transaction(b'narrowing'): |
|
302 | with repo.transaction(b'narrowing'): | |
304 | # Update narrowspec before removing revlogs, so repo won't be |
|
303 | # Update narrowspec before removing revlogs, so repo won't be | |
305 | # corrupt in case of crash |
|
304 | # corrupt in case of crash | |
306 | repo.setnarrowpats(newincludes, newexcludes) |
|
305 | repo.setnarrowpats(newincludes, newexcludes) | |
307 |
|
306 | |||
308 | for f in todelete: |
|
307 | for f in todelete: | |
309 | ui.status(_(b'deleting %s\n') % f) |
|
308 | ui.status(_(b'deleting %s\n') % f) | |
310 | util.unlinkpath(repo.svfs.join(f)) |
|
309 | util.unlinkpath(repo.svfs.join(f)) | |
311 | repo.store.markremoved(f) |
|
310 | repo.store.markremoved(f) | |
312 |
|
311 | |||
313 | narrowspec.updateworkingcopy(repo, assumeclean=True) |
|
312 | narrowspec.updateworkingcopy(repo, assumeclean=True) | |
314 | narrowspec.copytoworkingcopy(repo) |
|
313 | narrowspec.copytoworkingcopy(repo) | |
315 |
|
314 | |||
316 | repo.destroyed() |
|
315 | repo.destroyed() | |
317 |
|
316 | |||
318 |
|
317 | |||
319 | def _widen( |
|
318 | def _widen( | |
320 | ui, |
|
319 | ui, | |
321 | repo, |
|
320 | repo, | |
322 | remote, |
|
321 | remote, | |
323 | commoninc, |
|
322 | commoninc, | |
324 | oldincludes, |
|
323 | oldincludes, | |
325 | oldexcludes, |
|
324 | oldexcludes, | |
326 | newincludes, |
|
325 | newincludes, | |
327 | newexcludes, |
|
326 | newexcludes, | |
328 | ): |
|
327 | ): | |
329 | # for now we assume that if a server has ellipses enabled, we will be |
|
328 | # for now we assume that if a server has ellipses enabled, we will be | |
330 | # exchanging ellipses nodes. In future we should add ellipses as a client |
|
329 | # exchanging ellipses nodes. In future we should add ellipses as a client | |
331 | # side requirement (maybe) to distinguish a client is shallow or not and |
|
330 | # side requirement (maybe) to distinguish a client is shallow or not and | |
332 | # then send that information to server whether we want ellipses or not. |
|
331 | # then send that information to server whether we want ellipses or not. | |
333 | # Theoretically a non-ellipses repo should be able to use narrow |
|
332 | # Theoretically a non-ellipses repo should be able to use narrow | |
334 | # functionality from an ellipses enabled server |
|
333 | # functionality from an ellipses enabled server | |
335 | remotecap = remote.capabilities() |
|
334 | remotecap = remote.capabilities() | |
336 | ellipsesremote = any( |
|
335 | ellipsesremote = any( | |
337 | cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP |
|
336 | cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP | |
338 | ) |
|
337 | ) | |
339 |
|
338 | |||
340 | # check whether we are talking to a server which supports old version of |
|
339 | # check whether we are talking to a server which supports old version of | |
341 | # ellipses capabilities |
|
340 | # ellipses capabilities | |
342 | isoldellipses = ( |
|
341 | isoldellipses = ( | |
343 | ellipsesremote |
|
342 | ellipsesremote | |
344 | and wireprototypes.ELLIPSESCAP1 in remotecap |
|
343 | and wireprototypes.ELLIPSESCAP1 in remotecap | |
345 | and wireprototypes.ELLIPSESCAP not in remotecap |
|
344 | and wireprototypes.ELLIPSESCAP not in remotecap | |
346 | ) |
|
345 | ) | |
347 |
|
346 | |||
348 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
347 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
349 | orig(pullop, kwargs) |
|
348 | orig(pullop, kwargs) | |
350 | # The old{in,ex}cludepats have already been set by orig() |
|
349 | # The old{in,ex}cludepats have already been set by orig() | |
351 | kwargs[b'includepats'] = newincludes |
|
350 | kwargs[b'includepats'] = newincludes | |
352 | kwargs[b'excludepats'] = newexcludes |
|
351 | kwargs[b'excludepats'] = newexcludes | |
353 |
|
352 | |||
354 | wrappedextraprepare = extensions.wrappedfunction( |
|
353 | wrappedextraprepare = extensions.wrappedfunction( | |
355 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
354 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen | |
356 | ) |
|
355 | ) | |
357 |
|
356 | |||
358 | # define a function that narrowbundle2 can call after creating the |
|
357 | # define a function that narrowbundle2 can call after creating the | |
359 | # backup bundle, but before applying the bundle from the server |
|
358 | # backup bundle, but before applying the bundle from the server | |
360 | def setnewnarrowpats(): |
|
359 | def setnewnarrowpats(): | |
361 | repo.setnarrowpats(newincludes, newexcludes) |
|
360 | repo.setnarrowpats(newincludes, newexcludes) | |
362 |
|
361 | |||
363 | repo.setnewnarrowpats = setnewnarrowpats |
|
362 | repo.setnewnarrowpats = setnewnarrowpats | |
364 | # silence the devel-warning of applying an empty changegroup |
|
363 | # silence the devel-warning of applying an empty changegroup | |
365 | overrides = {(b'devel', b'all-warnings'): False} |
|
364 | overrides = {(b'devel', b'all-warnings'): False} | |
366 |
|
365 | |||
367 | common = commoninc[0] |
|
366 | common = commoninc[0] | |
368 | with ui.uninterruptible(): |
|
367 | with ui.uninterruptible(): | |
369 | if ellipsesremote: |
|
368 | if ellipsesremote: | |
370 | ds = repo.dirstate |
|
369 | ds = repo.dirstate | |
371 | p1, p2 = ds.p1(), ds.p2() |
|
370 | p1, p2 = ds.p1(), ds.p2() | |
372 | with ds.parentchange(): |
|
371 | with ds.parentchange(): | |
373 | ds.setparents(nullid, nullid) |
|
372 | ds.setparents(repo.nullid, repo.nullid) | |
374 | if isoldellipses: |
|
373 | if isoldellipses: | |
375 | with wrappedextraprepare: |
|
374 | with wrappedextraprepare: | |
376 | exchange.pull(repo, remote, heads=common) |
|
375 | exchange.pull(repo, remote, heads=common) | |
377 | else: |
|
376 | else: | |
378 | known = [] |
|
377 | known = [] | |
379 | if ellipsesremote: |
|
378 | if ellipsesremote: | |
380 | known = [ |
|
379 | known = [ | |
381 | ctx.node() |
|
380 | ctx.node() | |
382 | for ctx in repo.set(b'::%ln', common) |
|
381 | for ctx in repo.set(b'::%ln', common) | |
383 | if ctx.node() != nullid |
|
382 | if ctx.node() != repo.nullid | |
384 | ] |
|
383 | ] | |
385 | with remote.commandexecutor() as e: |
|
384 | with remote.commandexecutor() as e: | |
386 | bundle = e.callcommand( |
|
385 | bundle = e.callcommand( | |
387 | b'narrow_widen', |
|
386 | b'narrow_widen', | |
388 | { |
|
387 | { | |
389 | b'oldincludes': oldincludes, |
|
388 | b'oldincludes': oldincludes, | |
390 | b'oldexcludes': oldexcludes, |
|
389 | b'oldexcludes': oldexcludes, | |
391 | b'newincludes': newincludes, |
|
390 | b'newincludes': newincludes, | |
392 | b'newexcludes': newexcludes, |
|
391 | b'newexcludes': newexcludes, | |
393 | b'cgversion': b'03', |
|
392 | b'cgversion': b'03', | |
394 | b'commonheads': common, |
|
393 | b'commonheads': common, | |
395 | b'known': known, |
|
394 | b'known': known, | |
396 | b'ellipses': ellipsesremote, |
|
395 | b'ellipses': ellipsesremote, | |
397 | }, |
|
396 | }, | |
398 | ).result() |
|
397 | ).result() | |
399 |
|
398 | |||
400 | trmanager = exchange.transactionmanager( |
|
399 | trmanager = exchange.transactionmanager( | |
401 | repo, b'widen', remote.url() |
|
400 | repo, b'widen', remote.url() | |
402 | ) |
|
401 | ) | |
403 | with trmanager, repo.ui.configoverride(overrides, b'widen'): |
|
402 | with trmanager, repo.ui.configoverride(overrides, b'widen'): | |
404 | op = bundle2.bundleoperation( |
|
403 | op = bundle2.bundleoperation( | |
405 | repo, trmanager.transaction, source=b'widen' |
|
404 | repo, trmanager.transaction, source=b'widen' | |
406 | ) |
|
405 | ) | |
407 | # TODO: we should catch error.Abort here |
|
406 | # TODO: we should catch error.Abort here | |
408 | bundle2.processbundle(repo, bundle, op=op) |
|
407 | bundle2.processbundle(repo, bundle, op=op) | |
409 |
|
408 | |||
410 | if ellipsesremote: |
|
409 | if ellipsesremote: | |
411 | with ds.parentchange(): |
|
410 | with ds.parentchange(): | |
412 | ds.setparents(p1, p2) |
|
411 | ds.setparents(p1, p2) | |
413 |
|
412 | |||
414 | with repo.transaction(b'widening'): |
|
413 | with repo.transaction(b'widening'): | |
415 | repo.setnewnarrowpats() |
|
414 | repo.setnewnarrowpats() | |
416 | narrowspec.updateworkingcopy(repo) |
|
415 | narrowspec.updateworkingcopy(repo) | |
417 | narrowspec.copytoworkingcopy(repo) |
|
416 | narrowspec.copytoworkingcopy(repo) | |
418 |
|
417 | |||
419 |
|
418 | |||
420 | # TODO(rdamazio): Make new matcher format and update description |
|
419 | # TODO(rdamazio): Make new matcher format and update description | |
421 | @command( |
|
420 | @command( | |
422 | b'tracked', |
|
421 | b'tracked', | |
423 | [ |
|
422 | [ | |
424 | (b'', b'addinclude', [], _(b'new paths to include')), |
|
423 | (b'', b'addinclude', [], _(b'new paths to include')), | |
425 | (b'', b'removeinclude', [], _(b'old paths to no longer include')), |
|
424 | (b'', b'removeinclude', [], _(b'old paths to no longer include')), | |
426 | ( |
|
425 | ( | |
427 | b'', |
|
426 | b'', | |
428 | b'auto-remove-includes', |
|
427 | b'auto-remove-includes', | |
429 | False, |
|
428 | False, | |
430 | _(b'automatically choose unused includes to remove'), |
|
429 | _(b'automatically choose unused includes to remove'), | |
431 | ), |
|
430 | ), | |
432 | (b'', b'addexclude', [], _(b'new paths to exclude')), |
|
431 | (b'', b'addexclude', [], _(b'new paths to exclude')), | |
433 | (b'', b'import-rules', b'', _(b'import narrowspecs from a file')), |
|
432 | (b'', b'import-rules', b'', _(b'import narrowspecs from a file')), | |
434 | (b'', b'removeexclude', [], _(b'old paths to no longer exclude')), |
|
433 | (b'', b'removeexclude', [], _(b'old paths to no longer exclude')), | |
435 | ( |
|
434 | ( | |
436 | b'', |
|
435 | b'', | |
437 | b'clear', |
|
436 | b'clear', | |
438 | False, |
|
437 | False, | |
439 | _(b'whether to replace the existing narrowspec'), |
|
438 | _(b'whether to replace the existing narrowspec'), | |
440 | ), |
|
439 | ), | |
441 | ( |
|
440 | ( | |
442 | b'', |
|
441 | b'', | |
443 | b'force-delete-local-changes', |
|
442 | b'force-delete-local-changes', | |
444 | False, |
|
443 | False, | |
445 | _(b'forces deletion of local changes when narrowing'), |
|
444 | _(b'forces deletion of local changes when narrowing'), | |
446 | ), |
|
445 | ), | |
447 | ( |
|
446 | ( | |
448 | b'', |
|
447 | b'', | |
449 | b'backup', |
|
448 | b'backup', | |
450 | True, |
|
449 | True, | |
451 | _(b'back up local changes when narrowing'), |
|
450 | _(b'back up local changes when narrowing'), | |
452 | ), |
|
451 | ), | |
453 | ( |
|
452 | ( | |
454 | b'', |
|
453 | b'', | |
455 | b'update-working-copy', |
|
454 | b'update-working-copy', | |
456 | False, |
|
455 | False, | |
457 | _(b'update working copy when the store has changed'), |
|
456 | _(b'update working copy when the store has changed'), | |
458 | ), |
|
457 | ), | |
459 | ] |
|
458 | ] | |
460 | + commands.remoteopts, |
|
459 | + commands.remoteopts, | |
461 | _(b'[OPTIONS]... [REMOTE]'), |
|
460 | _(b'[OPTIONS]... [REMOTE]'), | |
462 | inferrepo=True, |
|
461 | inferrepo=True, | |
463 | helpcategory=command.CATEGORY_MAINTENANCE, |
|
462 | helpcategory=command.CATEGORY_MAINTENANCE, | |
464 | ) |
|
463 | ) | |
465 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): |
|
464 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): | |
466 | """show or change the current narrowspec |
|
465 | """show or change the current narrowspec | |
467 |
|
466 | |||
468 | With no argument, shows the current narrowspec entries, one per line. Each |
|
467 | With no argument, shows the current narrowspec entries, one per line. Each | |
469 | line will be prefixed with 'I' or 'X' for included or excluded patterns, |
|
468 | line will be prefixed with 'I' or 'X' for included or excluded patterns, | |
470 | respectively. |
|
469 | respectively. | |
471 |
|
470 | |||
472 | The narrowspec is comprised of expressions to match remote files and/or |
|
471 | The narrowspec is comprised of expressions to match remote files and/or | |
473 | directories that should be pulled into your client. |
|
472 | directories that should be pulled into your client. | |
474 | The narrowspec has *include* and *exclude* expressions, with excludes always |
|
473 | The narrowspec has *include* and *exclude* expressions, with excludes always | |
475 | trumping includes: that is, if a file matches an exclude expression, it will |
|
474 | trumping includes: that is, if a file matches an exclude expression, it will | |
476 | be excluded even if it also matches an include expression. |
|
475 | be excluded even if it also matches an include expression. | |
477 | Excluding files that were never included has no effect. |
|
476 | Excluding files that were never included has no effect. | |
478 |
|
477 | |||
479 | Each included or excluded entry is in the format described by |
|
478 | Each included or excluded entry is in the format described by | |
480 | 'hg help patterns'. |
|
479 | 'hg help patterns'. | |
481 |
|
480 | |||
482 | The options allow you to add or remove included and excluded expressions. |
|
481 | The options allow you to add or remove included and excluded expressions. | |
483 |
|
482 | |||
484 | If --clear is specified, then all previous includes and excludes are DROPPED |
|
483 | If --clear is specified, then all previous includes and excludes are DROPPED | |
485 | and replaced by the new ones specified to --addinclude and --addexclude. |
|
484 | and replaced by the new ones specified to --addinclude and --addexclude. | |
486 | If --clear is specified without any further options, the narrowspec will be |
|
485 | If --clear is specified without any further options, the narrowspec will be | |
487 | empty and will not match any files. |
|
486 | empty and will not match any files. | |
488 |
|
487 | |||
489 | If --auto-remove-includes is specified, then those includes that don't match |
|
488 | If --auto-remove-includes is specified, then those includes that don't match | |
490 | any files modified by currently visible local commits (those not shared by |
|
489 | any files modified by currently visible local commits (those not shared by | |
491 | the remote) will be added to the set of explicitly specified includes to |
|
490 | the remote) will be added to the set of explicitly specified includes to | |
492 | remove. |
|
491 | remove. | |
493 |
|
492 | |||
494 | --import-rules accepts a path to a file containing rules, allowing you to |
|
493 | --import-rules accepts a path to a file containing rules, allowing you to | |
495 | add --addinclude, --addexclude rules in bulk. Like the other include and |
|
494 | add --addinclude, --addexclude rules in bulk. Like the other include and | |
496 | exclude switches, the changes are applied immediately. |
|
495 | exclude switches, the changes are applied immediately. | |
497 | """ |
|
496 | """ | |
498 | opts = pycompat.byteskwargs(opts) |
|
497 | opts = pycompat.byteskwargs(opts) | |
499 | if requirements.NARROW_REQUIREMENT not in repo.requirements: |
|
498 | if requirements.NARROW_REQUIREMENT not in repo.requirements: | |
500 | raise error.InputError( |
|
499 | raise error.InputError( | |
501 | _( |
|
500 | _( | |
502 | b'the tracked command is only supported on ' |
|
501 | b'the tracked command is only supported on ' | |
503 | b'repositories cloned with --narrow' |
|
502 | b'repositories cloned with --narrow' | |
504 | ) |
|
503 | ) | |
505 | ) |
|
504 | ) | |
506 |
|
505 | |||
507 | # Before supporting, decide whether it "hg tracked --clear" should mean |
|
506 | # Before supporting, decide whether it "hg tracked --clear" should mean | |
508 | # tracking no paths or all paths. |
|
507 | # tracking no paths or all paths. | |
509 | if opts[b'clear']: |
|
508 | if opts[b'clear']: | |
510 | raise error.InputError(_(b'the --clear option is not yet supported')) |
|
509 | raise error.InputError(_(b'the --clear option is not yet supported')) | |
511 |
|
510 | |||
512 | # import rules from a file |
|
511 | # import rules from a file | |
513 | newrules = opts.get(b'import_rules') |
|
512 | newrules = opts.get(b'import_rules') | |
514 | if newrules: |
|
513 | if newrules: | |
515 | try: |
|
514 | try: | |
516 | filepath = os.path.join(encoding.getcwd(), newrules) |
|
515 | filepath = os.path.join(encoding.getcwd(), newrules) | |
517 | fdata = util.readfile(filepath) |
|
516 | fdata = util.readfile(filepath) | |
518 | except IOError as inst: |
|
517 | except IOError as inst: | |
519 | raise error.StorageError( |
|
518 | raise error.StorageError( | |
520 | _(b"cannot read narrowspecs from '%s': %s") |
|
519 | _(b"cannot read narrowspecs from '%s': %s") | |
521 | % (filepath, encoding.strtolocal(inst.strerror)) |
|
520 | % (filepath, encoding.strtolocal(inst.strerror)) | |
522 | ) |
|
521 | ) | |
523 | includepats, excludepats, profiles = sparse.parseconfig( |
|
522 | includepats, excludepats, profiles = sparse.parseconfig( | |
524 | ui, fdata, b'narrow' |
|
523 | ui, fdata, b'narrow' | |
525 | ) |
|
524 | ) | |
526 | if profiles: |
|
525 | if profiles: | |
527 | raise error.InputError( |
|
526 | raise error.InputError( | |
528 | _( |
|
527 | _( | |
529 | b"including other spec files using '%include' " |
|
528 | b"including other spec files using '%include' " | |
530 | b"is not supported in narrowspec" |
|
529 | b"is not supported in narrowspec" | |
531 | ) |
|
530 | ) | |
532 | ) |
|
531 | ) | |
533 | opts[b'addinclude'].extend(includepats) |
|
532 | opts[b'addinclude'].extend(includepats) | |
534 | opts[b'addexclude'].extend(excludepats) |
|
533 | opts[b'addexclude'].extend(excludepats) | |
535 |
|
534 | |||
536 | addedincludes = narrowspec.parsepatterns(opts[b'addinclude']) |
|
535 | addedincludes = narrowspec.parsepatterns(opts[b'addinclude']) | |
537 | removedincludes = narrowspec.parsepatterns(opts[b'removeinclude']) |
|
536 | removedincludes = narrowspec.parsepatterns(opts[b'removeinclude']) | |
538 | addedexcludes = narrowspec.parsepatterns(opts[b'addexclude']) |
|
537 | addedexcludes = narrowspec.parsepatterns(opts[b'addexclude']) | |
539 | removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude']) |
|
538 | removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude']) | |
540 | autoremoveincludes = opts[b'auto_remove_includes'] |
|
539 | autoremoveincludes = opts[b'auto_remove_includes'] | |
541 |
|
540 | |||
542 | update_working_copy = opts[b'update_working_copy'] |
|
541 | update_working_copy = opts[b'update_working_copy'] | |
543 | only_show = not ( |
|
542 | only_show = not ( | |
544 | addedincludes |
|
543 | addedincludes | |
545 | or removedincludes |
|
544 | or removedincludes | |
546 | or addedexcludes |
|
545 | or addedexcludes | |
547 | or removedexcludes |
|
546 | or removedexcludes | |
548 | or newrules |
|
547 | or newrules | |
549 | or autoremoveincludes |
|
548 | or autoremoveincludes | |
550 | or update_working_copy |
|
549 | or update_working_copy | |
551 | ) |
|
550 | ) | |
552 |
|
551 | |||
553 | oldincludes, oldexcludes = repo.narrowpats |
|
552 | oldincludes, oldexcludes = repo.narrowpats | |
554 |
|
553 | |||
555 | # filter the user passed additions and deletions into actual additions and |
|
554 | # filter the user passed additions and deletions into actual additions and | |
556 | # deletions of excludes and includes |
|
555 | # deletions of excludes and includes | |
557 | addedincludes -= oldincludes |
|
556 | addedincludes -= oldincludes | |
558 | removedincludes &= oldincludes |
|
557 | removedincludes &= oldincludes | |
559 | addedexcludes -= oldexcludes |
|
558 | addedexcludes -= oldexcludes | |
560 | removedexcludes &= oldexcludes |
|
559 | removedexcludes &= oldexcludes | |
561 |
|
560 | |||
562 | widening = addedincludes or removedexcludes |
|
561 | widening = addedincludes or removedexcludes | |
563 | narrowing = removedincludes or addedexcludes |
|
562 | narrowing = removedincludes or addedexcludes | |
564 |
|
563 | |||
565 | # Only print the current narrowspec. |
|
564 | # Only print the current narrowspec. | |
566 | if only_show: |
|
565 | if only_show: | |
567 | ui.pager(b'tracked') |
|
566 | ui.pager(b'tracked') | |
568 | fm = ui.formatter(b'narrow', opts) |
|
567 | fm = ui.formatter(b'narrow', opts) | |
569 | for i in sorted(oldincludes): |
|
568 | for i in sorted(oldincludes): | |
570 | fm.startitem() |
|
569 | fm.startitem() | |
571 | fm.write(b'status', b'%s ', b'I', label=b'narrow.included') |
|
570 | fm.write(b'status', b'%s ', b'I', label=b'narrow.included') | |
572 | fm.write(b'pat', b'%s\n', i, label=b'narrow.included') |
|
571 | fm.write(b'pat', b'%s\n', i, label=b'narrow.included') | |
573 | for i in sorted(oldexcludes): |
|
572 | for i in sorted(oldexcludes): | |
574 | fm.startitem() |
|
573 | fm.startitem() | |
575 | fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded') |
|
574 | fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded') | |
576 | fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded') |
|
575 | fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded') | |
577 | fm.end() |
|
576 | fm.end() | |
578 | return 0 |
|
577 | return 0 | |
579 |
|
578 | |||
580 | if update_working_copy: |
|
579 | if update_working_copy: | |
581 | with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'): |
|
580 | with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'): | |
582 | narrowspec.updateworkingcopy(repo) |
|
581 | narrowspec.updateworkingcopy(repo) | |
583 | narrowspec.copytoworkingcopy(repo) |
|
582 | narrowspec.copytoworkingcopy(repo) | |
584 | return 0 |
|
583 | return 0 | |
585 |
|
584 | |||
586 | if not (widening or narrowing or autoremoveincludes): |
|
585 | if not (widening or narrowing or autoremoveincludes): | |
587 | ui.status(_(b"nothing to widen or narrow\n")) |
|
586 | ui.status(_(b"nothing to widen or narrow\n")) | |
588 | return 0 |
|
587 | return 0 | |
589 |
|
588 | |||
590 | with repo.wlock(), repo.lock(): |
|
589 | with repo.wlock(), repo.lock(): | |
591 | cmdutil.bailifchanged(repo) |
|
590 | cmdutil.bailifchanged(repo) | |
592 |
|
591 | |||
593 | # Find the revisions we have in common with the remote. These will |
|
592 | # Find the revisions we have in common with the remote. These will | |
594 | # be used for finding local-only changes for narrowing. They will |
|
593 | # be used for finding local-only changes for narrowing. They will | |
595 | # also define the set of revisions to update for widening. |
|
594 | # also define the set of revisions to update for widening. | |
596 | r = urlutil.get_unique_pull_path(b'tracked', repo, ui, remotepath) |
|
595 | r = urlutil.get_unique_pull_path(b'tracked', repo, ui, remotepath) | |
597 | url, branches = r |
|
596 | url, branches = r | |
598 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url)) |
|
597 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url)) | |
599 | remote = hg.peer(repo, opts, url) |
|
598 | remote = hg.peer(repo, opts, url) | |
600 |
|
599 | |||
601 | try: |
|
600 | try: | |
602 | # check narrow support before doing anything if widening needs to be |
|
601 | # check narrow support before doing anything if widening needs to be | |
603 | # performed. In future we should also abort if client is ellipses and |
|
602 | # performed. In future we should also abort if client is ellipses and | |
604 | # server does not support ellipses |
|
603 | # server does not support ellipses | |
605 | if ( |
|
604 | if ( | |
606 | widening |
|
605 | widening | |
607 | and wireprototypes.NARROWCAP not in remote.capabilities() |
|
606 | and wireprototypes.NARROWCAP not in remote.capabilities() | |
608 | ): |
|
607 | ): | |
609 | raise error.Abort(_(b"server does not support narrow clones")) |
|
608 | raise error.Abort(_(b"server does not support narrow clones")) | |
610 |
|
609 | |||
611 | commoninc = discovery.findcommonincoming(repo, remote) |
|
610 | commoninc = discovery.findcommonincoming(repo, remote) | |
612 |
|
611 | |||
613 | if autoremoveincludes: |
|
612 | if autoremoveincludes: | |
614 | outgoing = discovery.findcommonoutgoing( |
|
613 | outgoing = discovery.findcommonoutgoing( | |
615 | repo, remote, commoninc=commoninc |
|
614 | repo, remote, commoninc=commoninc | |
616 | ) |
|
615 | ) | |
617 | ui.status(_(b'looking for unused includes to remove\n')) |
|
616 | ui.status(_(b'looking for unused includes to remove\n')) | |
618 | localfiles = set() |
|
617 | localfiles = set() | |
619 | for n in itertools.chain(outgoing.missing, outgoing.excluded): |
|
618 | for n in itertools.chain(outgoing.missing, outgoing.excluded): | |
620 | localfiles.update(repo[n].files()) |
|
619 | localfiles.update(repo[n].files()) | |
621 | suggestedremovals = [] |
|
620 | suggestedremovals = [] | |
622 | for include in sorted(oldincludes): |
|
621 | for include in sorted(oldincludes): | |
623 | match = narrowspec.match(repo.root, [include], oldexcludes) |
|
622 | match = narrowspec.match(repo.root, [include], oldexcludes) | |
624 | if not any(match(f) for f in localfiles): |
|
623 | if not any(match(f) for f in localfiles): | |
625 | suggestedremovals.append(include) |
|
624 | suggestedremovals.append(include) | |
626 | if suggestedremovals: |
|
625 | if suggestedremovals: | |
627 | for s in suggestedremovals: |
|
626 | for s in suggestedremovals: | |
628 | ui.status(b'%s\n' % s) |
|
627 | ui.status(b'%s\n' % s) | |
629 | if ( |
|
628 | if ( | |
630 | ui.promptchoice( |
|
629 | ui.promptchoice( | |
631 | _( |
|
630 | _( | |
632 | b'remove these unused includes (yn)?' |
|
631 | b'remove these unused includes (yn)?' | |
633 | b'$$ &Yes $$ &No' |
|
632 | b'$$ &Yes $$ &No' | |
634 | ) |
|
633 | ) | |
635 | ) |
|
634 | ) | |
636 | == 0 |
|
635 | == 0 | |
637 | ): |
|
636 | ): | |
638 | removedincludes.update(suggestedremovals) |
|
637 | removedincludes.update(suggestedremovals) | |
639 | narrowing = True |
|
638 | narrowing = True | |
640 | else: |
|
639 | else: | |
641 | ui.status(_(b'found no unused includes\n')) |
|
640 | ui.status(_(b'found no unused includes\n')) | |
642 |
|
641 | |||
643 | if narrowing: |
|
642 | if narrowing: | |
644 | newincludes = oldincludes - removedincludes |
|
643 | newincludes = oldincludes - removedincludes | |
645 | newexcludes = oldexcludes | addedexcludes |
|
644 | newexcludes = oldexcludes | addedexcludes | |
646 | _narrow( |
|
645 | _narrow( | |
647 | ui, |
|
646 | ui, | |
648 | repo, |
|
647 | repo, | |
649 | remote, |
|
648 | remote, | |
650 | commoninc, |
|
649 | commoninc, | |
651 | oldincludes, |
|
650 | oldincludes, | |
652 | oldexcludes, |
|
651 | oldexcludes, | |
653 | newincludes, |
|
652 | newincludes, | |
654 | newexcludes, |
|
653 | newexcludes, | |
655 | opts[b'force_delete_local_changes'], |
|
654 | opts[b'force_delete_local_changes'], | |
656 | opts[b'backup'], |
|
655 | opts[b'backup'], | |
657 | ) |
|
656 | ) | |
658 | # _narrow() updated the narrowspec and _widen() below needs to |
|
657 | # _narrow() updated the narrowspec and _widen() below needs to | |
659 | # use the updated values as its base (otherwise removed includes |
|
658 | # use the updated values as its base (otherwise removed includes | |
660 | # and addedexcludes will be lost in the resulting narrowspec) |
|
659 | # and addedexcludes will be lost in the resulting narrowspec) | |
661 | oldincludes = newincludes |
|
660 | oldincludes = newincludes | |
662 | oldexcludes = newexcludes |
|
661 | oldexcludes = newexcludes | |
663 |
|
662 | |||
664 | if widening: |
|
663 | if widening: | |
665 | newincludes = oldincludes | addedincludes |
|
664 | newincludes = oldincludes | addedincludes | |
666 | newexcludes = oldexcludes - removedexcludes |
|
665 | newexcludes = oldexcludes - removedexcludes | |
667 | _widen( |
|
666 | _widen( | |
668 | ui, |
|
667 | ui, | |
669 | repo, |
|
668 | repo, | |
670 | remote, |
|
669 | remote, | |
671 | commoninc, |
|
670 | commoninc, | |
672 | oldincludes, |
|
671 | oldincludes, | |
673 | oldexcludes, |
|
672 | oldexcludes, | |
674 | newincludes, |
|
673 | newincludes, | |
675 | newexcludes, |
|
674 | newexcludes, | |
676 | ) |
|
675 | ) | |
677 | finally: |
|
676 | finally: | |
678 | remote.close() |
|
677 | remote.close() | |
679 |
|
678 | |||
680 | return 0 |
|
679 | return 0 |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now