Show More
@@ -1,1285 +1,1283 b'' | |||||
1 | # __init__.py - remotefilelog extension |
|
1 | # __init__.py - remotefilelog extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) |
|
7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) | |
8 |
|
8 | |||
9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY |
|
9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY | |
10 | GUARANTEES. This means that repositories created with this extension may |
|
10 | GUARANTEES. This means that repositories created with this extension may | |
11 | only be usable with the exact version of this extension/Mercurial that was |
|
11 | only be usable with the exact version of this extension/Mercurial that was | |
12 | used. The extension attempts to enforce this in order to prevent repository |
|
12 | used. The extension attempts to enforce this in order to prevent repository | |
13 | corruption. |
|
13 | corruption. | |
14 |
|
14 | |||
15 | remotefilelog works by fetching file contents lazily and storing them |
|
15 | remotefilelog works by fetching file contents lazily and storing them | |
16 | in a cache on the client rather than in revlogs. This allows enormous |
|
16 | in a cache on the client rather than in revlogs. This allows enormous | |
17 | histories to be transferred only partially, making them easier to |
|
17 | histories to be transferred only partially, making them easier to | |
18 | operate on. |
|
18 | operate on. | |
19 |
|
19 | |||
20 | Configs: |
|
20 | Configs: | |
21 |
|
21 | |||
22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files |
|
22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files | |
23 |
|
23 | |||
24 | ``packs.maxpacksize`` specifies the maximum pack file size |
|
24 | ``packs.maxpacksize`` specifies the maximum pack file size | |
25 |
|
25 | |||
26 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the |
|
26 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the | |
27 | shared cache (trees only for now) |
|
27 | shared cache (trees only for now) | |
28 |
|
28 | |||
29 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True |
|
29 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True | |
30 |
|
30 | |||
31 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and |
|
31 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and | |
32 | update, and on other commands that use them. Different from pullprefetch. |
|
32 | update, and on other commands that use them. Different from pullprefetch. | |
33 |
|
33 | |||
34 | ``remotefilelog.gcrepack`` does garbage collection during repack when True |
|
34 | ``remotefilelog.gcrepack`` does garbage collection during repack when True | |
35 |
|
35 | |||
36 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before |
|
36 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before | |
37 | it is garbage collected |
|
37 | it is garbage collected | |
38 |
|
38 | |||
39 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True |
|
39 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True | |
40 |
|
40 | |||
41 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in |
|
41 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in | |
42 | days after which it is no longer prefetched. |
|
42 | days after which it is no longer prefetched. | |
43 |
|
43 | |||
44 | ``remotefilelog.prefetchdelay`` specifies delay between background |
|
44 | ``remotefilelog.prefetchdelay`` specifies delay between background | |
45 | prefetches in seconds after operations that change the working copy parent |
|
45 | prefetches in seconds after operations that change the working copy parent | |
46 |
|
46 | |||
47 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data |
|
47 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data | |
48 | pack files required to be considered part of a generation. In particular, |
|
48 | pack files required to be considered part of a generation. In particular, | |
49 | minimum number of packs files > gencountlimit. |
|
49 | minimum number of packs files > gencountlimit. | |
50 |
|
50 | |||
51 | ``remotefilelog.data.generations`` list for specifying the lower bound of |
|
51 | ``remotefilelog.data.generations`` list for specifying the lower bound of | |
52 | each generation of the data pack files. For example, list ['100MB','1MB'] |
|
52 | each generation of the data pack files. For example, list ['100MB','1MB'] | |
53 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ |
|
53 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ | |
54 | 1MB, 100MB) and [100MB, infinity). |
|
54 | 1MB, 100MB) and [100MB, infinity). | |
55 |
|
55 | |||
56 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to |
|
56 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to | |
57 | include in an incremental data repack. |
|
57 | include in an incremental data repack. | |
58 |
|
58 | |||
59 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for |
|
59 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for | |
60 | it to be considered for an incremental data repack. |
|
60 | it to be considered for an incremental data repack. | |
61 |
|
61 | |||
62 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files |
|
62 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files | |
63 | to include in an incremental data repack. |
|
63 | to include in an incremental data repack. | |
64 |
|
64 | |||
65 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of |
|
65 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of | |
66 | history pack files required to be considered part of a generation. In |
|
66 | history pack files required to be considered part of a generation. In | |
67 | particular, minimum number of packs files > gencountlimit. |
|
67 | particular, minimum number of packs files > gencountlimit. | |
68 |
|
68 | |||
69 | ``remotefilelog.history.generations`` list for specifying the lower bound of |
|
69 | ``remotefilelog.history.generations`` list for specifying the lower bound of | |
70 | each generation of the history pack files. For example, list [ |
|
70 | each generation of the history pack files. For example, list [ | |
71 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ |
|
71 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ | |
72 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). |
|
72 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). | |
73 |
|
73 | |||
74 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to |
|
74 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to | |
75 | include in an incremental history repack. |
|
75 | include in an incremental history repack. | |
76 |
|
76 | |||
77 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file |
|
77 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file | |
78 | for it to be considered for an incremental history repack. |
|
78 | for it to be considered for an incremental history repack. | |
79 |
|
79 | |||
80 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack |
|
80 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack | |
81 | files to include in an incremental history repack. |
|
81 | files to include in an incremental history repack. | |
82 |
|
82 | |||
83 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the |
|
83 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the | |
84 | background |
|
84 | background | |
85 |
|
85 | |||
86 | ``remotefilelog.cachepath`` path to cache |
|
86 | ``remotefilelog.cachepath`` path to cache | |
87 |
|
87 | |||
88 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this |
|
88 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this | |
89 | group |
|
89 | group | |
90 |
|
90 | |||
91 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data |
|
91 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data | |
92 |
|
92 | |||
93 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output |
|
93 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output | |
94 |
|
94 | |||
95 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls |
|
95 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls | |
96 |
|
96 | |||
97 | ``remotefilelog.includepattern`` pattern of files to include in pulls |
|
97 | ``remotefilelog.includepattern`` pattern of files to include in pulls | |
98 |
|
98 | |||
99 | ``remotefilelog.fetchwarning``: message to print when too many |
|
99 | ``remotefilelog.fetchwarning``: message to print when too many | |
100 | single-file fetches occur |
|
100 | single-file fetches occur | |
101 |
|
101 | |||
102 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC |
|
102 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC | |
103 |
|
103 | |||
104 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch |
|
104 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch | |
105 | files, otherwise use optimistic fetching |
|
105 | files, otherwise use optimistic fetching | |
106 |
|
106 | |||
107 | ``remotefilelog.pullprefetch`` revset for selecting files that should be |
|
107 | ``remotefilelog.pullprefetch`` revset for selecting files that should be | |
108 | eagerly downloaded rather than lazily |
|
108 | eagerly downloaded rather than lazily | |
109 |
|
109 | |||
110 | ``remotefilelog.reponame`` name of the repo. If set, used to partition |
|
110 | ``remotefilelog.reponame`` name of the repo. If set, used to partition | |
111 | data from other repos in a shared store. |
|
111 | data from other repos in a shared store. | |
112 |
|
112 | |||
113 | ``remotefilelog.server`` if true, enable server-side functionality |
|
113 | ``remotefilelog.server`` if true, enable server-side functionality | |
114 |
|
114 | |||
115 | ``remotefilelog.servercachepath`` path for caching blobs on the server |
|
115 | ``remotefilelog.servercachepath`` path for caching blobs on the server | |
116 |
|
116 | |||
117 | ``remotefilelog.serverexpiration`` number of days to keep cached server |
|
117 | ``remotefilelog.serverexpiration`` number of days to keep cached server | |
118 | blobs |
|
118 | blobs | |
119 |
|
119 | |||
120 | ``remotefilelog.validatecache`` if set, check cache entries for corruption |
|
120 | ``remotefilelog.validatecache`` if set, check cache entries for corruption | |
121 | before returning blobs |
|
121 | before returning blobs | |
122 |
|
122 | |||
123 | ``remotefilelog.validatecachelog`` if set, check cache entries for |
|
123 | ``remotefilelog.validatecachelog`` if set, check cache entries for | |
124 | corruption before returning metadata |
|
124 | corruption before returning metadata | |
125 |
|
125 | |||
126 | """ |
|
126 | """ | |
127 | from __future__ import absolute_import |
|
127 | from __future__ import absolute_import | |
128 |
|
128 | |||
129 | import os |
|
129 | import os | |
130 | import time |
|
130 | import time | |
131 | import traceback |
|
131 | import traceback | |
132 |
|
132 | |||
133 | from mercurial.node import hex |
|
133 | from mercurial.node import hex | |
134 | from mercurial.i18n import _ |
|
134 | from mercurial.i18n import _ | |
135 | from mercurial.pycompat import open |
|
135 | from mercurial.pycompat import open | |
136 | from mercurial import ( |
|
136 | from mercurial import ( | |
137 | changegroup, |
|
137 | changegroup, | |
138 | changelog, |
|
138 | changelog, | |
139 | cmdutil, |
|
139 | cmdutil, | |
140 | commands, |
|
140 | commands, | |
141 | configitems, |
|
141 | configitems, | |
142 | context, |
|
142 | context, | |
143 | copies, |
|
143 | copies, | |
144 | debugcommands as hgdebugcommands, |
|
144 | debugcommands as hgdebugcommands, | |
145 | dispatch, |
|
145 | dispatch, | |
146 | error, |
|
146 | error, | |
147 | exchange, |
|
147 | exchange, | |
148 | extensions, |
|
148 | extensions, | |
149 | hg, |
|
149 | hg, | |
150 | localrepo, |
|
150 | localrepo, | |
151 | match as matchmod, |
|
151 | match as matchmod, | |
152 | merge, |
|
152 | merge, | |
153 | node as nodemod, |
|
153 | node as nodemod, | |
154 | patch, |
|
154 | patch, | |
155 | pycompat, |
|
155 | pycompat, | |
156 | registrar, |
|
156 | registrar, | |
157 | repair, |
|
157 | repair, | |
158 | repoview, |
|
158 | repoview, | |
159 | revset, |
|
159 | revset, | |
160 | scmutil, |
|
160 | scmutil, | |
161 | smartset, |
|
161 | smartset, | |
162 | streamclone, |
|
162 | streamclone, | |
163 | util, |
|
163 | util, | |
164 | ) |
|
164 | ) | |
165 | from . import ( |
|
165 | from . import ( | |
166 | constants, |
|
166 | constants, | |
167 | debugcommands, |
|
167 | debugcommands, | |
168 | fileserverclient, |
|
168 | fileserverclient, | |
169 | remotefilectx, |
|
169 | remotefilectx, | |
170 | remotefilelog, |
|
170 | remotefilelog, | |
171 | remotefilelogserver, |
|
171 | remotefilelogserver, | |
172 | repack as repackmod, |
|
172 | repack as repackmod, | |
173 | shallowbundle, |
|
173 | shallowbundle, | |
174 | shallowrepo, |
|
174 | shallowrepo, | |
175 | shallowstore, |
|
175 | shallowstore, | |
176 | shallowutil, |
|
176 | shallowutil, | |
177 | shallowverifier, |
|
177 | shallowverifier, | |
178 | ) |
|
178 | ) | |
179 |
|
179 | |||
180 | # ensures debug commands are registered |
|
180 | # ensures debug commands are registered | |
181 | hgdebugcommands.command |
|
181 | hgdebugcommands.command | |
182 |
|
182 | |||
183 | cmdtable = {} |
|
183 | cmdtable = {} | |
184 | command = registrar.command(cmdtable) |
|
184 | command = registrar.command(cmdtable) | |
185 |
|
185 | |||
186 | configtable = {} |
|
186 | configtable = {} | |
187 | configitem = registrar.configitem(configtable) |
|
187 | configitem = registrar.configitem(configtable) | |
188 |
|
188 | |||
189 | configitem(b'remotefilelog', b'debug', default=False) |
|
189 | configitem(b'remotefilelog', b'debug', default=False) | |
190 |
|
190 | |||
191 | configitem(b'remotefilelog', b'reponame', default=b'') |
|
191 | configitem(b'remotefilelog', b'reponame', default=b'') | |
192 | configitem(b'remotefilelog', b'cachepath', default=None) |
|
192 | configitem(b'remotefilelog', b'cachepath', default=None) | |
193 | configitem(b'remotefilelog', b'cachegroup', default=None) |
|
193 | configitem(b'remotefilelog', b'cachegroup', default=None) | |
194 | configitem(b'remotefilelog', b'cacheprocess', default=None) |
|
194 | configitem(b'remotefilelog', b'cacheprocess', default=None) | |
195 | configitem(b'remotefilelog', b'cacheprocess.includepath', default=None) |
|
195 | configitem(b'remotefilelog', b'cacheprocess.includepath', default=None) | |
196 | configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB") |
|
196 | configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB") | |
197 |
|
197 | |||
198 | configitem( |
|
198 | configitem( | |
199 | b'remotefilelog', |
|
199 | b'remotefilelog', | |
200 | b'fallbackpath', |
|
200 | b'fallbackpath', | |
201 | default=configitems.dynamicdefault, |
|
201 | default=configitems.dynamicdefault, | |
202 | alias=[(b'remotefilelog', b'fallbackrepo')], |
|
202 | alias=[(b'remotefilelog', b'fallbackrepo')], | |
203 | ) |
|
203 | ) | |
204 |
|
204 | |||
205 | configitem(b'remotefilelog', b'validatecachelog', default=None) |
|
205 | configitem(b'remotefilelog', b'validatecachelog', default=None) | |
206 | configitem(b'remotefilelog', b'validatecache', default=b'on') |
|
206 | configitem(b'remotefilelog', b'validatecache', default=b'on') | |
207 | configitem(b'remotefilelog', b'server', default=None) |
|
207 | configitem(b'remotefilelog', b'server', default=None) | |
208 | configitem(b'remotefilelog', b'servercachepath', default=None) |
|
208 | configitem(b'remotefilelog', b'servercachepath', default=None) | |
209 | configitem(b"remotefilelog", b"serverexpiration", default=30) |
|
209 | configitem(b"remotefilelog", b"serverexpiration", default=30) | |
210 | configitem(b'remotefilelog', b'backgroundrepack', default=False) |
|
210 | configitem(b'remotefilelog', b'backgroundrepack', default=False) | |
211 | configitem(b'remotefilelog', b'bgprefetchrevs', default=None) |
|
211 | configitem(b'remotefilelog', b'bgprefetchrevs', default=None) | |
212 | configitem(b'remotefilelog', b'pullprefetch', default=None) |
|
212 | configitem(b'remotefilelog', b'pullprefetch', default=None) | |
213 | configitem(b'remotefilelog', b'backgroundprefetch', default=False) |
|
213 | configitem(b'remotefilelog', b'backgroundprefetch', default=False) | |
214 | configitem(b'remotefilelog', b'prefetchdelay', default=120) |
|
214 | configitem(b'remotefilelog', b'prefetchdelay', default=120) | |
215 | configitem(b'remotefilelog', b'prefetchdays', default=14) |
|
215 | configitem(b'remotefilelog', b'prefetchdays', default=14) | |
216 |
|
216 | |||
217 | configitem(b'remotefilelog', b'getfilesstep', default=10000) |
|
217 | configitem(b'remotefilelog', b'getfilesstep', default=10000) | |
218 | configitem(b'remotefilelog', b'getfilestype', default=b'optimistic') |
|
218 | configitem(b'remotefilelog', b'getfilestype', default=b'optimistic') | |
219 | configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault) |
|
219 | configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault) | |
220 | configitem(b'remotefilelog', b'fetchwarning', default=b'') |
|
220 | configitem(b'remotefilelog', b'fetchwarning', default=b'') | |
221 |
|
221 | |||
222 | configitem(b'remotefilelog', b'includepattern', default=None) |
|
222 | configitem(b'remotefilelog', b'includepattern', default=None) | |
223 | configitem(b'remotefilelog', b'excludepattern', default=None) |
|
223 | configitem(b'remotefilelog', b'excludepattern', default=None) | |
224 |
|
224 | |||
225 | configitem(b'remotefilelog', b'gcrepack', default=False) |
|
225 | configitem(b'remotefilelog', b'gcrepack', default=False) | |
226 | configitem(b'remotefilelog', b'repackonhggc', default=False) |
|
226 | configitem(b'remotefilelog', b'repackonhggc', default=False) | |
227 | configitem(b'repack', b'chainorphansbysize', default=True, experimental=True) |
|
227 | configitem(b'repack', b'chainorphansbysize', default=True, experimental=True) | |
228 |
|
228 | |||
229 | configitem(b'packs', b'maxpacksize', default=0) |
|
229 | configitem(b'packs', b'maxpacksize', default=0) | |
230 | configitem(b'packs', b'maxchainlen', default=1000) |
|
230 | configitem(b'packs', b'maxchainlen', default=1000) | |
231 |
|
231 | |||
232 | configitem(b'devel', b'remotefilelog.bg-wait', default=False) |
|
232 | configitem(b'devel', b'remotefilelog.bg-wait', default=False) | |
233 |
|
233 | |||
234 | # default TTL limit is 30 days |
|
234 | # default TTL limit is 30 days | |
235 | _defaultlimit = 60 * 60 * 24 * 30 |
|
235 | _defaultlimit = 60 * 60 * 24 * 30 | |
236 | configitem(b'remotefilelog', b'nodettl', default=_defaultlimit) |
|
236 | configitem(b'remotefilelog', b'nodettl', default=_defaultlimit) | |
237 |
|
237 | |||
238 | configitem(b'remotefilelog', b'data.gencountlimit', default=2), |
|
238 | configitem(b'remotefilelog', b'data.gencountlimit', default=2), | |
239 | configitem( |
|
239 | configitem( | |
240 | b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB'] |
|
240 | b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB'] | |
241 | ) |
|
241 | ) | |
242 | configitem(b'remotefilelog', b'data.maxrepackpacks', default=50) |
|
242 | configitem(b'remotefilelog', b'data.maxrepackpacks', default=50) | |
243 | configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB') |
|
243 | configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB') | |
244 | configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB') |
|
244 | configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB') | |
245 |
|
245 | |||
246 | configitem(b'remotefilelog', b'history.gencountlimit', default=2), |
|
246 | configitem(b'remotefilelog', b'history.gencountlimit', default=2), | |
247 | configitem(b'remotefilelog', b'history.generations', default=[b'100MB']) |
|
247 | configitem(b'remotefilelog', b'history.generations', default=[b'100MB']) | |
248 | configitem(b'remotefilelog', b'history.maxrepackpacks', default=50) |
|
248 | configitem(b'remotefilelog', b'history.maxrepackpacks', default=50) | |
249 | configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB') |
|
249 | configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB') | |
250 | configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB') |
|
250 | configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB') | |
251 |
|
251 | |||
252 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
252 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
253 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
253 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
254 | # be specifying the version(s) of Mercurial they are tested with, or |
|
254 | # be specifying the version(s) of Mercurial they are tested with, or | |
255 | # leave the attribute unspecified. |
|
255 | # leave the attribute unspecified. | |
256 | testedwith = b'ships-with-hg-core' |
|
256 | testedwith = b'ships-with-hg-core' | |
257 |
|
257 | |||
258 | repoclass = localrepo.localrepository |
|
258 | repoclass = localrepo.localrepository | |
259 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) |
|
259 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) | |
260 |
|
260 | |||
261 | isenabled = shallowutil.isenabled |
|
261 | isenabled = shallowutil.isenabled | |
262 |
|
262 | |||
263 |
|
263 | |||
264 | def uisetup(ui): |
|
264 | def uisetup(ui): | |
265 | """Wraps user facing Mercurial commands to swap them out with shallow |
|
265 | """Wraps user facing Mercurial commands to swap them out with shallow | |
266 | versions. |
|
266 | versions. | |
267 | """ |
|
267 | """ | |
268 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) |
|
268 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) | |
269 |
|
269 | |||
270 | entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow) |
|
270 | entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow) | |
271 | entry[1].append( |
|
271 | entry[1].append( | |
272 | ( |
|
272 | ( | |
273 | b'', |
|
273 | b'', | |
274 | b'shallow', |
|
274 | b'shallow', | |
275 | None, |
|
275 | None, | |
276 | _(b"create a shallow clone which uses remote file history"), |
|
276 | _(b"create a shallow clone which uses remote file history"), | |
277 | ) |
|
277 | ) | |
278 | ) |
|
278 | ) | |
279 |
|
279 | |||
280 | extensions.wrapcommand( |
|
280 | extensions.wrapcommand( | |
281 | commands.table, b'debugindex', debugcommands.debugindex |
|
281 | commands.table, b'debugindex', debugcommands.debugindex | |
282 | ) |
|
282 | ) | |
283 | extensions.wrapcommand( |
|
283 | extensions.wrapcommand( | |
284 | commands.table, b'debugindexdot', debugcommands.debugindexdot |
|
284 | commands.table, b'debugindexdot', debugcommands.debugindexdot | |
285 | ) |
|
285 | ) | |
286 | extensions.wrapcommand(commands.table, b'log', log) |
|
286 | extensions.wrapcommand(commands.table, b'log', log) | |
287 | extensions.wrapcommand(commands.table, b'pull', pull) |
|
287 | extensions.wrapcommand(commands.table, b'pull', pull) | |
288 |
|
288 | |||
289 | # Prevent 'hg manifest --all' |
|
289 | # Prevent 'hg manifest --all' | |
290 | def _manifest(orig, ui, repo, *args, **opts): |
|
290 | def _manifest(orig, ui, repo, *args, **opts): | |
291 | if isenabled(repo) and opts.get('all'): |
|
291 | if isenabled(repo) and opts.get('all'): | |
292 | raise error.Abort(_(b"--all is not supported in a shallow repo")) |
|
292 | raise error.Abort(_(b"--all is not supported in a shallow repo")) | |
293 |
|
293 | |||
294 | return orig(ui, repo, *args, **opts) |
|
294 | return orig(ui, repo, *args, **opts) | |
295 |
|
295 | |||
296 | extensions.wrapcommand(commands.table, b"manifest", _manifest) |
|
296 | extensions.wrapcommand(commands.table, b"manifest", _manifest) | |
297 |
|
297 | |||
298 | # Wrap remotefilelog with lfs code |
|
298 | # Wrap remotefilelog with lfs code | |
299 | def _lfsloaded(loaded=False): |
|
299 | def _lfsloaded(loaded=False): | |
300 | lfsmod = None |
|
300 | lfsmod = None | |
301 | try: |
|
301 | try: | |
302 | lfsmod = extensions.find(b'lfs') |
|
302 | lfsmod = extensions.find(b'lfs') | |
303 | except KeyError: |
|
303 | except KeyError: | |
304 | pass |
|
304 | pass | |
305 | if lfsmod: |
|
305 | if lfsmod: | |
306 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) |
|
306 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) | |
307 | fileserverclient._lfsmod = lfsmod |
|
307 | fileserverclient._lfsmod = lfsmod | |
308 |
|
308 | |||
309 | extensions.afterloaded(b'lfs', _lfsloaded) |
|
309 | extensions.afterloaded(b'lfs', _lfsloaded) | |
310 |
|
310 | |||
311 | # debugdata needs remotefilelog.len to work |
|
311 | # debugdata needs remotefilelog.len to work | |
312 | extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow) |
|
312 | extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow) | |
313 |
|
313 | |||
314 | changegroup.cgpacker = shallowbundle.shallowcg1packer |
|
314 | changegroup.cgpacker = shallowbundle.shallowcg1packer | |
315 |
|
315 | |||
316 | extensions.wrapfunction( |
|
316 | extensions.wrapfunction( | |
317 | changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles |
|
317 | changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles | |
318 | ) |
|
318 | ) | |
319 | extensions.wrapfunction( |
|
319 | extensions.wrapfunction( | |
320 | changegroup, b'makechangegroup', shallowbundle.makechangegroup |
|
320 | changegroup, b'makechangegroup', shallowbundle.makechangegroup | |
321 | ) |
|
321 | ) | |
322 | extensions.wrapfunction(localrepo, b'makestore', storewrapper) |
|
322 | extensions.wrapfunction(localrepo, b'makestore', storewrapper) | |
323 | extensions.wrapfunction(exchange, b'pull', exchangepull) |
|
323 | extensions.wrapfunction(exchange, b'pull', exchangepull) | |
324 | extensions.wrapfunction(merge, b'applyupdates', applyupdates) |
|
324 | extensions.wrapfunction(merge, b'applyupdates', applyupdates) | |
325 | extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles) |
|
325 | extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles) | |
326 | extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup) |
|
326 | extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup) | |
327 | extensions.wrapfunction(scmutil, b'_findrenames', findrenames) |
|
327 | extensions.wrapfunction(scmutil, b'_findrenames', findrenames) | |
328 | extensions.wrapfunction( |
|
328 | extensions.wrapfunction( | |
329 | copies, b'_computeforwardmissing', computeforwardmissing |
|
329 | copies, b'_computeforwardmissing', computeforwardmissing | |
330 | ) |
|
330 | ) | |
331 | extensions.wrapfunction(dispatch, b'runcommand', runcommand) |
|
331 | extensions.wrapfunction(dispatch, b'runcommand', runcommand) | |
332 | extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets) |
|
332 | extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets) | |
333 | extensions.wrapfunction(context.changectx, b'filectx', filectx) |
|
333 | extensions.wrapfunction(context.changectx, b'filectx', filectx) | |
334 | extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx) |
|
334 | extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx) | |
335 | extensions.wrapfunction(patch, b'trydiff', trydiff) |
|
335 | extensions.wrapfunction(patch, b'trydiff', trydiff) | |
336 | extensions.wrapfunction(hg, b'verify', _verify) |
|
336 | extensions.wrapfunction(hg, b'verify', _verify) | |
337 | scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook) |
|
337 | scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook) | |
338 |
|
338 | |||
339 | # disappointing hacks below |
|
339 | # disappointing hacks below | |
340 | extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn) |
|
340 | extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn) | |
341 | extensions.wrapfunction(revset, b'filelog', filelogrevset) |
|
341 | extensions.wrapfunction(revset, b'filelog', filelogrevset) | |
342 | revset.symbols[b'filelog'] = revset.filelog |
|
342 | revset.symbols[b'filelog'] = revset.filelog | |
343 | extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs) |
|
343 | extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs) | |
344 |
|
344 | |||
345 |
|
345 | |||
346 | def cloneshallow(orig, ui, repo, *args, **opts): |
|
346 | def cloneshallow(orig, ui, repo, *args, **opts): | |
347 | if opts.get('shallow'): |
|
347 | if opts.get('shallow'): | |
348 | repos = [] |
|
348 | repos = [] | |
349 |
|
349 | |||
350 | def pull_shallow(orig, self, *args, **kwargs): |
|
350 | def pull_shallow(orig, self, *args, **kwargs): | |
351 | if not isenabled(self): |
|
351 | if not isenabled(self): | |
352 | repos.append(self.unfiltered()) |
|
352 | repos.append(self.unfiltered()) | |
353 | # set up the client hooks so the post-clone update works |
|
353 | # set up the client hooks so the post-clone update works | |
354 | setupclient(self.ui, self.unfiltered()) |
|
354 | setupclient(self.ui, self.unfiltered()) | |
355 |
|
355 | |||
356 | # setupclient fixed the class on the repo itself |
|
356 | # setupclient fixed the class on the repo itself | |
357 | # but we also need to fix it on the repoview |
|
357 | # but we also need to fix it on the repoview | |
358 | if isinstance(self, repoview.repoview): |
|
358 | if isinstance(self, repoview.repoview): | |
359 | self.__class__.__bases__ = ( |
|
359 | self.__class__.__bases__ = ( | |
360 | self.__class__.__bases__[0], |
|
360 | self.__class__.__bases__[0], | |
361 | self.unfiltered().__class__, |
|
361 | self.unfiltered().__class__, | |
362 | ) |
|
362 | ) | |
363 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
363 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) | |
364 | scmutil.writereporequirements(self) |
|
364 | scmutil.writereporequirements(self) | |
365 |
|
365 | |||
366 | # Since setupclient hadn't been called, exchange.pull was not |
|
366 | # Since setupclient hadn't been called, exchange.pull was not | |
367 | # wrapped. So we need to manually invoke our version of it. |
|
367 | # wrapped. So we need to manually invoke our version of it. | |
368 | return exchangepull(orig, self, *args, **kwargs) |
|
368 | return exchangepull(orig, self, *args, **kwargs) | |
369 | else: |
|
369 | else: | |
370 | return orig(self, *args, **kwargs) |
|
370 | return orig(self, *args, **kwargs) | |
371 |
|
371 | |||
372 | extensions.wrapfunction(exchange, b'pull', pull_shallow) |
|
372 | extensions.wrapfunction(exchange, b'pull', pull_shallow) | |
373 |
|
373 | |||
374 | # Wrap the stream logic to add requirements and to pass include/exclude |
|
374 | # Wrap the stream logic to add requirements and to pass include/exclude | |
375 | # patterns around. |
|
375 | # patterns around. | |
376 | def setup_streamout(repo, remote): |
|
376 | def setup_streamout(repo, remote): | |
377 | # Replace remote.stream_out with a version that sends file |
|
377 | # Replace remote.stream_out with a version that sends file | |
378 | # patterns. |
|
378 | # patterns. | |
379 | def stream_out_shallow(orig): |
|
379 | def stream_out_shallow(orig): | |
380 | caps = remote.capabilities() |
|
380 | caps = remote.capabilities() | |
381 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: |
|
381 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: | |
382 | opts = {} |
|
382 | opts = {} | |
383 | if repo.includepattern: |
|
383 | if repo.includepattern: | |
384 | opts['includepattern'] = b'\0'.join(repo.includepattern) |
|
384 | opts['includepattern'] = b'\0'.join(repo.includepattern) | |
385 | if repo.excludepattern: |
|
385 | if repo.excludepattern: | |
386 | opts['excludepattern'] = b'\0'.join(repo.excludepattern) |
|
386 | opts['excludepattern'] = b'\0'.join(repo.excludepattern) | |
387 | return remote._callstream(b'stream_out_shallow', **opts) |
|
387 | return remote._callstream(b'stream_out_shallow', **opts) | |
388 | else: |
|
388 | else: | |
389 | return orig() |
|
389 | return orig() | |
390 |
|
390 | |||
391 | extensions.wrapfunction(remote, b'stream_out', stream_out_shallow) |
|
391 | extensions.wrapfunction(remote, b'stream_out', stream_out_shallow) | |
392 |
|
392 | |||
393 | def stream_wrap(orig, op): |
|
393 | def stream_wrap(orig, op): | |
394 | setup_streamout(op.repo, op.remote) |
|
394 | setup_streamout(op.repo, op.remote) | |
395 | return orig(op) |
|
395 | return orig(op) | |
396 |
|
396 | |||
397 | extensions.wrapfunction( |
|
397 | extensions.wrapfunction( | |
398 | streamclone, b'maybeperformlegacystreamclone', stream_wrap |
|
398 | streamclone, b'maybeperformlegacystreamclone', stream_wrap | |
399 | ) |
|
399 | ) | |
400 |
|
400 | |||
401 | def canperformstreamclone(orig, pullop, bundle2=False): |
|
401 | def canperformstreamclone(orig, pullop, bundle2=False): | |
402 | # remotefilelog is currently incompatible with the |
|
402 | # remotefilelog is currently incompatible with the | |
403 | # bundle2 flavor of streamclones, so force us to use |
|
403 | # bundle2 flavor of streamclones, so force us to use | |
404 | # v1 instead. |
|
404 | # v1 instead. | |
405 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): |
|
405 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): | |
406 | pullop.remotebundle2caps[b'stream'] = [ |
|
406 | pullop.remotebundle2caps[b'stream'] = [ | |
407 | c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2' |
|
407 | c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2' | |
408 | ] |
|
408 | ] | |
409 | if bundle2: |
|
409 | if bundle2: | |
410 | return False, None |
|
410 | return False, None | |
411 | supported, requirements = orig(pullop, bundle2=bundle2) |
|
411 | supported, requirements = orig(pullop, bundle2=bundle2) | |
412 | if requirements is not None: |
|
412 | if requirements is not None: | |
413 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
413 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) | |
414 | return supported, requirements |
|
414 | return supported, requirements | |
415 |
|
415 | |||
416 | extensions.wrapfunction( |
|
416 | extensions.wrapfunction( | |
417 | streamclone, b'canperformstreamclone', canperformstreamclone |
|
417 | streamclone, b'canperformstreamclone', canperformstreamclone | |
418 | ) |
|
418 | ) | |
419 |
|
419 | |||
420 | try: |
|
420 | try: | |
421 | orig(ui, repo, *args, **opts) |
|
421 | orig(ui, repo, *args, **opts) | |
422 | finally: |
|
422 | finally: | |
423 | if opts.get('shallow'): |
|
423 | if opts.get('shallow'): | |
424 | for r in repos: |
|
424 | for r in repos: | |
425 | if util.safehasattr(r, b'fileservice'): |
|
425 | if util.safehasattr(r, b'fileservice'): | |
426 | r.fileservice.close() |
|
426 | r.fileservice.close() | |
427 |
|
427 | |||
428 |
|
428 | |||
429 | def debugdatashallow(orig, *args, **kwds): |
|
429 | def debugdatashallow(orig, *args, **kwds): | |
430 | oldlen = remotefilelog.remotefilelog.__len__ |
|
430 | oldlen = remotefilelog.remotefilelog.__len__ | |
431 | try: |
|
431 | try: | |
432 | remotefilelog.remotefilelog.__len__ = lambda x: 1 |
|
432 | remotefilelog.remotefilelog.__len__ = lambda x: 1 | |
433 | return orig(*args, **kwds) |
|
433 | return orig(*args, **kwds) | |
434 | finally: |
|
434 | finally: | |
435 | remotefilelog.remotefilelog.__len__ = oldlen |
|
435 | remotefilelog.remotefilelog.__len__ = oldlen | |
436 |
|
436 | |||
437 |
|
437 | |||
438 | def reposetup(ui, repo): |
|
438 | def reposetup(ui, repo): | |
439 | if not repo.local(): |
|
439 | if not repo.local(): | |
440 | return |
|
440 | return | |
441 |
|
441 | |||
442 | # put here intentionally bc doesnt work in uisetup |
|
442 | # put here intentionally bc doesnt work in uisetup | |
443 | ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch) |
|
443 | ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch) | |
444 | ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch) |
|
444 | ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch) | |
445 |
|
445 | |||
446 | isserverenabled = ui.configbool(b'remotefilelog', b'server') |
|
446 | isserverenabled = ui.configbool(b'remotefilelog', b'server') | |
447 | isshallowclient = isenabled(repo) |
|
447 | isshallowclient = isenabled(repo) | |
448 |
|
448 | |||
449 | if isserverenabled and isshallowclient: |
|
449 | if isserverenabled and isshallowclient: | |
450 | raise RuntimeError(b"Cannot be both a server and shallow client.") |
|
450 | raise RuntimeError(b"Cannot be both a server and shallow client.") | |
451 |
|
451 | |||
452 | if isshallowclient: |
|
452 | if isshallowclient: | |
453 | setupclient(ui, repo) |
|
453 | setupclient(ui, repo) | |
454 |
|
454 | |||
455 | if isserverenabled: |
|
455 | if isserverenabled: | |
456 | remotefilelogserver.setupserver(ui, repo) |
|
456 | remotefilelogserver.setupserver(ui, repo) | |
457 |
|
457 | |||
458 |
|
458 | |||
459 | def setupclient(ui, repo): |
|
459 | def setupclient(ui, repo): | |
460 | if not isinstance(repo, localrepo.localrepository): |
|
460 | if not isinstance(repo, localrepo.localrepository): | |
461 | return |
|
461 | return | |
462 |
|
462 | |||
463 | # Even clients get the server setup since they need to have the |
|
463 | # Even clients get the server setup since they need to have the | |
464 | # wireprotocol endpoints registered. |
|
464 | # wireprotocol endpoints registered. | |
465 | remotefilelogserver.onetimesetup(ui) |
|
465 | remotefilelogserver.onetimesetup(ui) | |
466 | onetimeclientsetup(ui) |
|
466 | onetimeclientsetup(ui) | |
467 |
|
467 | |||
468 | shallowrepo.wraprepo(repo) |
|
468 | shallowrepo.wraprepo(repo) | |
469 | repo.store = shallowstore.wrapstore(repo.store) |
|
469 | repo.store = shallowstore.wrapstore(repo.store) | |
470 |
|
470 | |||
471 |
|
471 | |||
472 | def storewrapper(orig, requirements, path, vfstype): |
|
472 | def storewrapper(orig, requirements, path, vfstype): | |
473 | s = orig(requirements, path, vfstype) |
|
473 | s = orig(requirements, path, vfstype) | |
474 | if constants.SHALLOWREPO_REQUIREMENT in requirements: |
|
474 | if constants.SHALLOWREPO_REQUIREMENT in requirements: | |
475 | s = shallowstore.wrapstore(s) |
|
475 | s = shallowstore.wrapstore(s) | |
476 |
|
476 | |||
477 | return s |
|
477 | return s | |
478 |
|
478 | |||
479 |
|
479 | |||
480 | # prefetch files before update |
|
480 | # prefetch files before update | |
481 | def applyupdates( |
|
481 | def applyupdates( | |
482 |
orig, repo, actions, wctx, mctx, overwrite, wantfiledata, |
|
482 | orig, repo, actions, wctx, mctx, overwrite, wantfiledata, **opts | |
483 | ): |
|
483 | ): | |
484 | if isenabled(repo): |
|
484 | if isenabled(repo): | |
485 | manifest = mctx.manifest() |
|
485 | manifest = mctx.manifest() | |
486 | files = [] |
|
486 | files = [] | |
487 | for f, args, msg in actions[b'g']: |
|
487 | for f, args, msg in actions[b'g']: | |
488 | files.append((f, hex(manifest[f]))) |
|
488 | files.append((f, hex(manifest[f]))) | |
489 | # batch fetch the needed files from the server |
|
489 | # batch fetch the needed files from the server | |
490 | repo.fileservice.prefetch(files) |
|
490 | repo.fileservice.prefetch(files) | |
491 | return orig( |
|
491 | return orig(repo, actions, wctx, mctx, overwrite, wantfiledata, **opts) | |
492 | repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels |
|
|||
493 | ) |
|
|||
494 |
|
492 | |||
495 |
|
493 | |||
496 | # Prefetch merge checkunknownfiles |
|
494 | # Prefetch merge checkunknownfiles | |
497 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs): |
|
495 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs): | |
498 | if isenabled(repo): |
|
496 | if isenabled(repo): | |
499 | files = [] |
|
497 | files = [] | |
500 | sparsematch = repo.maybesparsematch(mctx.rev()) |
|
498 | sparsematch = repo.maybesparsematch(mctx.rev()) | |
501 | for f, (m, actionargs, msg) in pycompat.iteritems(actions): |
|
499 | for f, (m, actionargs, msg) in pycompat.iteritems(actions): | |
502 | if sparsematch and not sparsematch(f): |
|
500 | if sparsematch and not sparsematch(f): | |
503 | continue |
|
501 | continue | |
504 | if m in (b'c', b'dc', b'cm'): |
|
502 | if m in (b'c', b'dc', b'cm'): | |
505 | files.append((f, hex(mctx.filenode(f)))) |
|
503 | files.append((f, hex(mctx.filenode(f)))) | |
506 | elif m == b'dg': |
|
504 | elif m == b'dg': | |
507 | f2 = actionargs[0] |
|
505 | f2 = actionargs[0] | |
508 | files.append((f2, hex(mctx.filenode(f2)))) |
|
506 | files.append((f2, hex(mctx.filenode(f2)))) | |
509 | # batch fetch the needed files from the server |
|
507 | # batch fetch the needed files from the server | |
510 | repo.fileservice.prefetch(files) |
|
508 | repo.fileservice.prefetch(files) | |
511 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) |
|
509 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) | |
512 |
|
510 | |||
513 |
|
511 | |||
514 | # Prefetch files before status attempts to look at their size and contents |
|
512 | # Prefetch files before status attempts to look at their size and contents | |
515 | def checklookup(orig, self, files): |
|
513 | def checklookup(orig, self, files): | |
516 | repo = self._repo |
|
514 | repo = self._repo | |
517 | if isenabled(repo): |
|
515 | if isenabled(repo): | |
518 | prefetchfiles = [] |
|
516 | prefetchfiles = [] | |
519 | for parent in self._parents: |
|
517 | for parent in self._parents: | |
520 | for f in files: |
|
518 | for f in files: | |
521 | if f in parent: |
|
519 | if f in parent: | |
522 | prefetchfiles.append((f, hex(parent.filenode(f)))) |
|
520 | prefetchfiles.append((f, hex(parent.filenode(f)))) | |
523 | # batch fetch the needed files from the server |
|
521 | # batch fetch the needed files from the server | |
524 | repo.fileservice.prefetch(prefetchfiles) |
|
522 | repo.fileservice.prefetch(prefetchfiles) | |
525 | return orig(self, files) |
|
523 | return orig(self, files) | |
526 |
|
524 | |||
527 |
|
525 | |||
528 | # Prefetch the logic that compares added and removed files for renames |
|
526 | # Prefetch the logic that compares added and removed files for renames | |
529 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): |
|
527 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): | |
530 | if isenabled(repo): |
|
528 | if isenabled(repo): | |
531 | files = [] |
|
529 | files = [] | |
532 | pmf = repo[b'.'].manifest() |
|
530 | pmf = repo[b'.'].manifest() | |
533 | for f in removed: |
|
531 | for f in removed: | |
534 | if f in pmf: |
|
532 | if f in pmf: | |
535 | files.append((f, hex(pmf[f]))) |
|
533 | files.append((f, hex(pmf[f]))) | |
536 | # batch fetch the needed files from the server |
|
534 | # batch fetch the needed files from the server | |
537 | repo.fileservice.prefetch(files) |
|
535 | repo.fileservice.prefetch(files) | |
538 | return orig(repo, matcher, added, removed, *args, **kwargs) |
|
536 | return orig(repo, matcher, added, removed, *args, **kwargs) | |
539 |
|
537 | |||
540 |
|
538 | |||
541 | # prefetch files before pathcopies check |
|
539 | # prefetch files before pathcopies check | |
542 | def computeforwardmissing(orig, a, b, match=None): |
|
540 | def computeforwardmissing(orig, a, b, match=None): | |
543 | missing = orig(a, b, match=match) |
|
541 | missing = orig(a, b, match=match) | |
544 | repo = a._repo |
|
542 | repo = a._repo | |
545 | if isenabled(repo): |
|
543 | if isenabled(repo): | |
546 | mb = b.manifest() |
|
544 | mb = b.manifest() | |
547 |
|
545 | |||
548 | files = [] |
|
546 | files = [] | |
549 | sparsematch = repo.maybesparsematch(b.rev()) |
|
547 | sparsematch = repo.maybesparsematch(b.rev()) | |
550 | if sparsematch: |
|
548 | if sparsematch: | |
551 | sparsemissing = set() |
|
549 | sparsemissing = set() | |
552 | for f in missing: |
|
550 | for f in missing: | |
553 | if sparsematch(f): |
|
551 | if sparsematch(f): | |
554 | files.append((f, hex(mb[f]))) |
|
552 | files.append((f, hex(mb[f]))) | |
555 | sparsemissing.add(f) |
|
553 | sparsemissing.add(f) | |
556 | missing = sparsemissing |
|
554 | missing = sparsemissing | |
557 |
|
555 | |||
558 | # batch fetch the needed files from the server |
|
556 | # batch fetch the needed files from the server | |
559 | repo.fileservice.prefetch(files) |
|
557 | repo.fileservice.prefetch(files) | |
560 | return missing |
|
558 | return missing | |
561 |
|
559 | |||
562 |
|
560 | |||
563 | # close cache miss server connection after the command has finished |
|
561 | # close cache miss server connection after the command has finished | |
564 | def runcommand(orig, lui, repo, *args, **kwargs): |
|
562 | def runcommand(orig, lui, repo, *args, **kwargs): | |
565 | fileservice = None |
|
563 | fileservice = None | |
566 | # repo can be None when running in chg: |
|
564 | # repo can be None when running in chg: | |
567 | # - at startup, reposetup was called because serve is not norepo |
|
565 | # - at startup, reposetup was called because serve is not norepo | |
568 | # - a norepo command like "help" is called |
|
566 | # - a norepo command like "help" is called | |
569 | if repo and isenabled(repo): |
|
567 | if repo and isenabled(repo): | |
570 | fileservice = repo.fileservice |
|
568 | fileservice = repo.fileservice | |
571 | try: |
|
569 | try: | |
572 | return orig(lui, repo, *args, **kwargs) |
|
570 | return orig(lui, repo, *args, **kwargs) | |
573 | finally: |
|
571 | finally: | |
574 | if fileservice: |
|
572 | if fileservice: | |
575 | fileservice.close() |
|
573 | fileservice.close() | |
576 |
|
574 | |||
577 |
|
575 | |||
578 | # prevent strip from stripping remotefilelogs |
|
576 | # prevent strip from stripping remotefilelogs | |
579 | def _collectbrokencsets(orig, repo, files, striprev): |
|
577 | def _collectbrokencsets(orig, repo, files, striprev): | |
580 | if isenabled(repo): |
|
578 | if isenabled(repo): | |
581 | files = list([f for f in files if not repo.shallowmatch(f)]) |
|
579 | files = list([f for f in files if not repo.shallowmatch(f)]) | |
582 | return orig(repo, files, striprev) |
|
580 | return orig(repo, files, striprev) | |
583 |
|
581 | |||
584 |
|
582 | |||
585 | # changectx wrappers |
|
583 | # changectx wrappers | |
586 | def filectx(orig, self, path, fileid=None, filelog=None): |
|
584 | def filectx(orig, self, path, fileid=None, filelog=None): | |
587 | if fileid is None: |
|
585 | if fileid is None: | |
588 | fileid = self.filenode(path) |
|
586 | fileid = self.filenode(path) | |
589 | if isenabled(self._repo) and self._repo.shallowmatch(path): |
|
587 | if isenabled(self._repo) and self._repo.shallowmatch(path): | |
590 | return remotefilectx.remotefilectx( |
|
588 | return remotefilectx.remotefilectx( | |
591 | self._repo, path, fileid=fileid, changectx=self, filelog=filelog |
|
589 | self._repo, path, fileid=fileid, changectx=self, filelog=filelog | |
592 | ) |
|
590 | ) | |
593 | return orig(self, path, fileid=fileid, filelog=filelog) |
|
591 | return orig(self, path, fileid=fileid, filelog=filelog) | |
594 |
|
592 | |||
595 |
|
593 | |||
596 | def workingfilectx(orig, self, path, filelog=None): |
|
594 | def workingfilectx(orig, self, path, filelog=None): | |
597 | if isenabled(self._repo) and self._repo.shallowmatch(path): |
|
595 | if isenabled(self._repo) and self._repo.shallowmatch(path): | |
598 | return remotefilectx.remoteworkingfilectx( |
|
596 | return remotefilectx.remoteworkingfilectx( | |
599 | self._repo, path, workingctx=self, filelog=filelog |
|
597 | self._repo, path, workingctx=self, filelog=filelog | |
600 | ) |
|
598 | ) | |
601 | return orig(self, path, filelog=filelog) |
|
599 | return orig(self, path, filelog=filelog) | |
602 |
|
600 | |||
603 |
|
601 | |||
604 | # prefetch required revisions before a diff |
|
602 | # prefetch required revisions before a diff | |
605 | def trydiff( |
|
603 | def trydiff( | |
606 | orig, |
|
604 | orig, | |
607 | repo, |
|
605 | repo, | |
608 | revs, |
|
606 | revs, | |
609 | ctx1, |
|
607 | ctx1, | |
610 | ctx2, |
|
608 | ctx2, | |
611 | modified, |
|
609 | modified, | |
612 | added, |
|
610 | added, | |
613 | removed, |
|
611 | removed, | |
614 | copy, |
|
612 | copy, | |
615 | getfilectx, |
|
613 | getfilectx, | |
616 | *args, |
|
614 | *args, | |
617 | **kwargs |
|
615 | **kwargs | |
618 | ): |
|
616 | ): | |
619 | if isenabled(repo): |
|
617 | if isenabled(repo): | |
620 | prefetch = [] |
|
618 | prefetch = [] | |
621 | mf1 = ctx1.manifest() |
|
619 | mf1 = ctx1.manifest() | |
622 | for fname in modified + added + removed: |
|
620 | for fname in modified + added + removed: | |
623 | if fname in mf1: |
|
621 | if fname in mf1: | |
624 | fnode = getfilectx(fname, ctx1).filenode() |
|
622 | fnode = getfilectx(fname, ctx1).filenode() | |
625 | # fnode can be None if it's a edited working ctx file |
|
623 | # fnode can be None if it's a edited working ctx file | |
626 | if fnode: |
|
624 | if fnode: | |
627 | prefetch.append((fname, hex(fnode))) |
|
625 | prefetch.append((fname, hex(fnode))) | |
628 | if fname not in removed: |
|
626 | if fname not in removed: | |
629 | fnode = getfilectx(fname, ctx2).filenode() |
|
627 | fnode = getfilectx(fname, ctx2).filenode() | |
630 | if fnode: |
|
628 | if fnode: | |
631 | prefetch.append((fname, hex(fnode))) |
|
629 | prefetch.append((fname, hex(fnode))) | |
632 |
|
630 | |||
633 | repo.fileservice.prefetch(prefetch) |
|
631 | repo.fileservice.prefetch(prefetch) | |
634 |
|
632 | |||
635 | return orig( |
|
633 | return orig( | |
636 | repo, |
|
634 | repo, | |
637 | revs, |
|
635 | revs, | |
638 | ctx1, |
|
636 | ctx1, | |
639 | ctx2, |
|
637 | ctx2, | |
640 | modified, |
|
638 | modified, | |
641 | added, |
|
639 | added, | |
642 | removed, |
|
640 | removed, | |
643 | copy, |
|
641 | copy, | |
644 | getfilectx, |
|
642 | getfilectx, | |
645 | *args, |
|
643 | *args, | |
646 | **kwargs |
|
644 | **kwargs | |
647 | ) |
|
645 | ) | |
648 |
|
646 | |||
649 |
|
647 | |||
650 | # Prevent verify from processing files |
|
648 | # Prevent verify from processing files | |
651 | # a stub for mercurial.hg.verify() |
|
649 | # a stub for mercurial.hg.verify() | |
652 | def _verify(orig, repo, level=None): |
|
650 | def _verify(orig, repo, level=None): | |
653 | lock = repo.lock() |
|
651 | lock = repo.lock() | |
654 | try: |
|
652 | try: | |
655 | return shallowverifier.shallowverifier(repo).verify() |
|
653 | return shallowverifier.shallowverifier(repo).verify() | |
656 | finally: |
|
654 | finally: | |
657 | lock.release() |
|
655 | lock.release() | |
658 |
|
656 | |||
659 |
|
657 | |||
660 | clientonetime = False |
|
658 | clientonetime = False | |
661 |
|
659 | |||
662 |
|
660 | |||
663 | def onetimeclientsetup(ui): |
|
661 | def onetimeclientsetup(ui): | |
664 | global clientonetime |
|
662 | global clientonetime | |
665 | if clientonetime: |
|
663 | if clientonetime: | |
666 | return |
|
664 | return | |
667 | clientonetime = True |
|
665 | clientonetime = True | |
668 |
|
666 | |||
669 | # Don't commit filelogs until we know the commit hash, since the hash |
|
667 | # Don't commit filelogs until we know the commit hash, since the hash | |
670 | # is present in the filelog blob. |
|
668 | # is present in the filelog blob. | |
671 | # This violates Mercurial's filelog->manifest->changelog write order, |
|
669 | # This violates Mercurial's filelog->manifest->changelog write order, | |
672 | # but is generally fine for client repos. |
|
670 | # but is generally fine for client repos. | |
673 | pendingfilecommits = [] |
|
671 | pendingfilecommits = [] | |
674 |
|
672 | |||
675 | def addrawrevision( |
|
673 | def addrawrevision( | |
676 | orig, |
|
674 | orig, | |
677 | self, |
|
675 | self, | |
678 | rawtext, |
|
676 | rawtext, | |
679 | transaction, |
|
677 | transaction, | |
680 | link, |
|
678 | link, | |
681 | p1, |
|
679 | p1, | |
682 | p2, |
|
680 | p2, | |
683 | node, |
|
681 | node, | |
684 | flags, |
|
682 | flags, | |
685 | cachedelta=None, |
|
683 | cachedelta=None, | |
686 | _metatuple=None, |
|
684 | _metatuple=None, | |
687 | ): |
|
685 | ): | |
688 | if isinstance(link, int): |
|
686 | if isinstance(link, int): | |
689 | pendingfilecommits.append( |
|
687 | pendingfilecommits.append( | |
690 | ( |
|
688 | ( | |
691 | self, |
|
689 | self, | |
692 | rawtext, |
|
690 | rawtext, | |
693 | transaction, |
|
691 | transaction, | |
694 | link, |
|
692 | link, | |
695 | p1, |
|
693 | p1, | |
696 | p2, |
|
694 | p2, | |
697 | node, |
|
695 | node, | |
698 | flags, |
|
696 | flags, | |
699 | cachedelta, |
|
697 | cachedelta, | |
700 | _metatuple, |
|
698 | _metatuple, | |
701 | ) |
|
699 | ) | |
702 | ) |
|
700 | ) | |
703 | return node |
|
701 | return node | |
704 | else: |
|
702 | else: | |
705 | return orig( |
|
703 | return orig( | |
706 | self, |
|
704 | self, | |
707 | rawtext, |
|
705 | rawtext, | |
708 | transaction, |
|
706 | transaction, | |
709 | link, |
|
707 | link, | |
710 | p1, |
|
708 | p1, | |
711 | p2, |
|
709 | p2, | |
712 | node, |
|
710 | node, | |
713 | flags, |
|
711 | flags, | |
714 | cachedelta, |
|
712 | cachedelta, | |
715 | _metatuple=_metatuple, |
|
713 | _metatuple=_metatuple, | |
716 | ) |
|
714 | ) | |
717 |
|
715 | |||
718 | extensions.wrapfunction( |
|
716 | extensions.wrapfunction( | |
719 | remotefilelog.remotefilelog, b'addrawrevision', addrawrevision |
|
717 | remotefilelog.remotefilelog, b'addrawrevision', addrawrevision | |
720 | ) |
|
718 | ) | |
721 |
|
719 | |||
722 | def changelogadd(orig, self, *args, **kwargs): |
|
720 | def changelogadd(orig, self, *args, **kwargs): | |
723 | oldlen = len(self) |
|
721 | oldlen = len(self) | |
724 | node = orig(self, *args, **kwargs) |
|
722 | node = orig(self, *args, **kwargs) | |
725 | newlen = len(self) |
|
723 | newlen = len(self) | |
726 | if oldlen != newlen: |
|
724 | if oldlen != newlen: | |
727 | for oldargs in pendingfilecommits: |
|
725 | for oldargs in pendingfilecommits: | |
728 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs |
|
726 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs | |
729 | linknode = self.node(link) |
|
727 | linknode = self.node(link) | |
730 | if linknode == node: |
|
728 | if linknode == node: | |
731 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) |
|
729 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) | |
732 | else: |
|
730 | else: | |
733 | raise error.ProgrammingError( |
|
731 | raise error.ProgrammingError( | |
734 | b'pending multiple integer revisions are not supported' |
|
732 | b'pending multiple integer revisions are not supported' | |
735 | ) |
|
733 | ) | |
736 | else: |
|
734 | else: | |
737 | # "link" is actually wrong here (it is set to len(changelog)) |
|
735 | # "link" is actually wrong here (it is set to len(changelog)) | |
738 | # if changelog remains unchanged, skip writing file revisions |
|
736 | # if changelog remains unchanged, skip writing file revisions | |
739 | # but still do a sanity check about pending multiple revisions |
|
737 | # but still do a sanity check about pending multiple revisions | |
740 | if len({x[3] for x in pendingfilecommits}) > 1: |
|
738 | if len({x[3] for x in pendingfilecommits}) > 1: | |
741 | raise error.ProgrammingError( |
|
739 | raise error.ProgrammingError( | |
742 | b'pending multiple integer revisions are not supported' |
|
740 | b'pending multiple integer revisions are not supported' | |
743 | ) |
|
741 | ) | |
744 | del pendingfilecommits[:] |
|
742 | del pendingfilecommits[:] | |
745 | return node |
|
743 | return node | |
746 |
|
744 | |||
747 | extensions.wrapfunction(changelog.changelog, b'add', changelogadd) |
|
745 | extensions.wrapfunction(changelog.changelog, b'add', changelogadd) | |
748 |
|
746 | |||
749 |
|
747 | |||
750 | def getrenamedfn(orig, repo, endrev=None): |
|
748 | def getrenamedfn(orig, repo, endrev=None): | |
751 | if not isenabled(repo) or copies.usechangesetcentricalgo(repo): |
|
749 | if not isenabled(repo) or copies.usechangesetcentricalgo(repo): | |
752 | return orig(repo, endrev) |
|
750 | return orig(repo, endrev) | |
753 |
|
751 | |||
754 | rcache = {} |
|
752 | rcache = {} | |
755 |
|
753 | |||
756 | def getrenamed(fn, rev): |
|
754 | def getrenamed(fn, rev): | |
757 | '''looks up all renames for a file (up to endrev) the first |
|
755 | '''looks up all renames for a file (up to endrev) the first | |
758 | time the file is given. It indexes on the changerev and only |
|
756 | time the file is given. It indexes on the changerev and only | |
759 | parses the manifest if linkrev != changerev. |
|
757 | parses the manifest if linkrev != changerev. | |
760 | Returns rename info for fn at changerev rev.''' |
|
758 | Returns rename info for fn at changerev rev.''' | |
761 | if rev in rcache.setdefault(fn, {}): |
|
759 | if rev in rcache.setdefault(fn, {}): | |
762 | return rcache[fn][rev] |
|
760 | return rcache[fn][rev] | |
763 |
|
761 | |||
764 | try: |
|
762 | try: | |
765 | fctx = repo[rev].filectx(fn) |
|
763 | fctx = repo[rev].filectx(fn) | |
766 | for ancestor in fctx.ancestors(): |
|
764 | for ancestor in fctx.ancestors(): | |
767 | if ancestor.path() == fn: |
|
765 | if ancestor.path() == fn: | |
768 | renamed = ancestor.renamed() |
|
766 | renamed = ancestor.renamed() | |
769 | rcache[fn][ancestor.rev()] = renamed and renamed[0] |
|
767 | rcache[fn][ancestor.rev()] = renamed and renamed[0] | |
770 |
|
768 | |||
771 | renamed = fctx.renamed() |
|
769 | renamed = fctx.renamed() | |
772 | return renamed and renamed[0] |
|
770 | return renamed and renamed[0] | |
773 | except error.LookupError: |
|
771 | except error.LookupError: | |
774 | return None |
|
772 | return None | |
775 |
|
773 | |||
776 | return getrenamed |
|
774 | return getrenamed | |
777 |
|
775 | |||
778 |
|
776 | |||
779 | def walkfilerevs(orig, repo, match, follow, revs, fncache): |
|
777 | def walkfilerevs(orig, repo, match, follow, revs, fncache): | |
780 | if not isenabled(repo): |
|
778 | if not isenabled(repo): | |
781 | return orig(repo, match, follow, revs, fncache) |
|
779 | return orig(repo, match, follow, revs, fncache) | |
782 |
|
780 | |||
783 | # remotefilelog's can't be walked in rev order, so throw. |
|
781 | # remotefilelog's can't be walked in rev order, so throw. | |
784 | # The caller will see the exception and walk the commit tree instead. |
|
782 | # The caller will see the exception and walk the commit tree instead. | |
785 | if not follow: |
|
783 | if not follow: | |
786 | raise cmdutil.FileWalkError(b"Cannot walk via filelog") |
|
784 | raise cmdutil.FileWalkError(b"Cannot walk via filelog") | |
787 |
|
785 | |||
788 | wanted = set() |
|
786 | wanted = set() | |
789 | minrev, maxrev = min(revs), max(revs) |
|
787 | minrev, maxrev = min(revs), max(revs) | |
790 |
|
788 | |||
791 | pctx = repo[b'.'] |
|
789 | pctx = repo[b'.'] | |
792 | for filename in match.files(): |
|
790 | for filename in match.files(): | |
793 | if filename not in pctx: |
|
791 | if filename not in pctx: | |
794 | raise error.Abort( |
|
792 | raise error.Abort( | |
795 | _(b'cannot follow file not in parent revision: "%s"') % filename |
|
793 | _(b'cannot follow file not in parent revision: "%s"') % filename | |
796 | ) |
|
794 | ) | |
797 | fctx = pctx[filename] |
|
795 | fctx = pctx[filename] | |
798 |
|
796 | |||
799 | linkrev = fctx.linkrev() |
|
797 | linkrev = fctx.linkrev() | |
800 | if linkrev >= minrev and linkrev <= maxrev: |
|
798 | if linkrev >= minrev and linkrev <= maxrev: | |
801 | fncache.setdefault(linkrev, []).append(filename) |
|
799 | fncache.setdefault(linkrev, []).append(filename) | |
802 | wanted.add(linkrev) |
|
800 | wanted.add(linkrev) | |
803 |
|
801 | |||
804 | for ancestor in fctx.ancestors(): |
|
802 | for ancestor in fctx.ancestors(): | |
805 | linkrev = ancestor.linkrev() |
|
803 | linkrev = ancestor.linkrev() | |
806 | if linkrev >= minrev and linkrev <= maxrev: |
|
804 | if linkrev >= minrev and linkrev <= maxrev: | |
807 | fncache.setdefault(linkrev, []).append(ancestor.path()) |
|
805 | fncache.setdefault(linkrev, []).append(ancestor.path()) | |
808 | wanted.add(linkrev) |
|
806 | wanted.add(linkrev) | |
809 |
|
807 | |||
810 | return wanted |
|
808 | return wanted | |
811 |
|
809 | |||
812 |
|
810 | |||
813 | def filelogrevset(orig, repo, subset, x): |
|
811 | def filelogrevset(orig, repo, subset, x): | |
814 | """``filelog(pattern)`` |
|
812 | """``filelog(pattern)`` | |
815 | Changesets connected to the specified filelog. |
|
813 | Changesets connected to the specified filelog. | |
816 |
|
814 | |||
817 | For performance reasons, ``filelog()`` does not show every changeset |
|
815 | For performance reasons, ``filelog()`` does not show every changeset | |
818 | that affects the requested file(s). See :hg:`help log` for details. For |
|
816 | that affects the requested file(s). See :hg:`help log` for details. For | |
819 | a slower, more accurate result, use ``file()``. |
|
817 | a slower, more accurate result, use ``file()``. | |
820 | """ |
|
818 | """ | |
821 |
|
819 | |||
822 | if not isenabled(repo): |
|
820 | if not isenabled(repo): | |
823 | return orig(repo, subset, x) |
|
821 | return orig(repo, subset, x) | |
824 |
|
822 | |||
825 | # i18n: "filelog" is a keyword |
|
823 | # i18n: "filelog" is a keyword | |
826 | pat = revset.getstring(x, _(b"filelog requires a pattern")) |
|
824 | pat = revset.getstring(x, _(b"filelog requires a pattern")) | |
827 | m = matchmod.match( |
|
825 | m = matchmod.match( | |
828 | repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None] |
|
826 | repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None] | |
829 | ) |
|
827 | ) | |
830 | s = set() |
|
828 | s = set() | |
831 |
|
829 | |||
832 | if not matchmod.patkind(pat): |
|
830 | if not matchmod.patkind(pat): | |
833 | # slow |
|
831 | # slow | |
834 | for r in subset: |
|
832 | for r in subset: | |
835 | ctx = repo[r] |
|
833 | ctx = repo[r] | |
836 | cfiles = ctx.files() |
|
834 | cfiles = ctx.files() | |
837 | for f in m.files(): |
|
835 | for f in m.files(): | |
838 | if f in cfiles: |
|
836 | if f in cfiles: | |
839 | s.add(ctx.rev()) |
|
837 | s.add(ctx.rev()) | |
840 | break |
|
838 | break | |
841 | else: |
|
839 | else: | |
842 | # partial |
|
840 | # partial | |
843 | files = (f for f in repo[None] if m(f)) |
|
841 | files = (f for f in repo[None] if m(f)) | |
844 | for f in files: |
|
842 | for f in files: | |
845 | fctx = repo[None].filectx(f) |
|
843 | fctx = repo[None].filectx(f) | |
846 | s.add(fctx.linkrev()) |
|
844 | s.add(fctx.linkrev()) | |
847 | for actx in fctx.ancestors(): |
|
845 | for actx in fctx.ancestors(): | |
848 | s.add(actx.linkrev()) |
|
846 | s.add(actx.linkrev()) | |
849 |
|
847 | |||
850 | return smartset.baseset([r for r in subset if r in s]) |
|
848 | return smartset.baseset([r for r in subset if r in s]) | |
851 |
|
849 | |||
852 |
|
850 | |||
853 | @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True) |
|
851 | @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True) | |
854 | def gc(ui, *args, **opts): |
|
852 | def gc(ui, *args, **opts): | |
855 | '''garbage collect the client and server filelog caches |
|
853 | '''garbage collect the client and server filelog caches | |
856 | ''' |
|
854 | ''' | |
857 | cachepaths = set() |
|
855 | cachepaths = set() | |
858 |
|
856 | |||
859 | # get the system client cache |
|
857 | # get the system client cache | |
860 | systemcache = shallowutil.getcachepath(ui, allowempty=True) |
|
858 | systemcache = shallowutil.getcachepath(ui, allowempty=True) | |
861 | if systemcache: |
|
859 | if systemcache: | |
862 | cachepaths.add(systemcache) |
|
860 | cachepaths.add(systemcache) | |
863 |
|
861 | |||
864 | # get repo client and server cache |
|
862 | # get repo client and server cache | |
865 | repopaths = [] |
|
863 | repopaths = [] | |
866 | pwd = ui.environ.get(b'PWD') |
|
864 | pwd = ui.environ.get(b'PWD') | |
867 | if pwd: |
|
865 | if pwd: | |
868 | repopaths.append(pwd) |
|
866 | repopaths.append(pwd) | |
869 |
|
867 | |||
870 | repopaths.extend(args) |
|
868 | repopaths.extend(args) | |
871 | repos = [] |
|
869 | repos = [] | |
872 | for repopath in repopaths: |
|
870 | for repopath in repopaths: | |
873 | try: |
|
871 | try: | |
874 | repo = hg.peer(ui, {}, repopath) |
|
872 | repo = hg.peer(ui, {}, repopath) | |
875 | repos.append(repo) |
|
873 | repos.append(repo) | |
876 |
|
874 | |||
877 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) |
|
875 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) | |
878 | if repocache: |
|
876 | if repocache: | |
879 | cachepaths.add(repocache) |
|
877 | cachepaths.add(repocache) | |
880 | except error.RepoError: |
|
878 | except error.RepoError: | |
881 | pass |
|
879 | pass | |
882 |
|
880 | |||
883 | # gc client cache |
|
881 | # gc client cache | |
884 | for cachepath in cachepaths: |
|
882 | for cachepath in cachepaths: | |
885 | gcclient(ui, cachepath) |
|
883 | gcclient(ui, cachepath) | |
886 |
|
884 | |||
887 | # gc server cache |
|
885 | # gc server cache | |
888 | for repo in repos: |
|
886 | for repo in repos: | |
889 | remotefilelogserver.gcserver(ui, repo._repo) |
|
887 | remotefilelogserver.gcserver(ui, repo._repo) | |
890 |
|
888 | |||
891 |
|
889 | |||
892 | def gcclient(ui, cachepath): |
|
890 | def gcclient(ui, cachepath): | |
893 | # get list of repos that use this cache |
|
891 | # get list of repos that use this cache | |
894 | repospath = os.path.join(cachepath, b'repos') |
|
892 | repospath = os.path.join(cachepath, b'repos') | |
895 | if not os.path.exists(repospath): |
|
893 | if not os.path.exists(repospath): | |
896 | ui.warn(_(b"no known cache at %s\n") % cachepath) |
|
894 | ui.warn(_(b"no known cache at %s\n") % cachepath) | |
897 | return |
|
895 | return | |
898 |
|
896 | |||
899 | reposfile = open(repospath, b'rb') |
|
897 | reposfile = open(repospath, b'rb') | |
900 | repos = {r[:-1] for r in reposfile.readlines()} |
|
898 | repos = {r[:-1] for r in reposfile.readlines()} | |
901 | reposfile.close() |
|
899 | reposfile.close() | |
902 |
|
900 | |||
903 | # build list of useful files |
|
901 | # build list of useful files | |
904 | validrepos = [] |
|
902 | validrepos = [] | |
905 | keepkeys = set() |
|
903 | keepkeys = set() | |
906 |
|
904 | |||
907 | sharedcache = None |
|
905 | sharedcache = None | |
908 | filesrepacked = False |
|
906 | filesrepacked = False | |
909 |
|
907 | |||
910 | count = 0 |
|
908 | count = 0 | |
911 | progress = ui.makeprogress( |
|
909 | progress = ui.makeprogress( | |
912 | _(b"analyzing repositories"), unit=b"repos", total=len(repos) |
|
910 | _(b"analyzing repositories"), unit=b"repos", total=len(repos) | |
913 | ) |
|
911 | ) | |
914 | for path in repos: |
|
912 | for path in repos: | |
915 | progress.update(count) |
|
913 | progress.update(count) | |
916 | count += 1 |
|
914 | count += 1 | |
917 | try: |
|
915 | try: | |
918 | path = ui.expandpath(os.path.normpath(path)) |
|
916 | path = ui.expandpath(os.path.normpath(path)) | |
919 | except TypeError as e: |
|
917 | except TypeError as e: | |
920 | ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e)) |
|
918 | ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e)) | |
921 | traceback.print_exc() |
|
919 | traceback.print_exc() | |
922 | continue |
|
920 | continue | |
923 | try: |
|
921 | try: | |
924 | peer = hg.peer(ui, {}, path) |
|
922 | peer = hg.peer(ui, {}, path) | |
925 | repo = peer._repo |
|
923 | repo = peer._repo | |
926 | except error.RepoError: |
|
924 | except error.RepoError: | |
927 | continue |
|
925 | continue | |
928 |
|
926 | |||
929 | validrepos.append(path) |
|
927 | validrepos.append(path) | |
930 |
|
928 | |||
931 | # Protect against any repo or config changes that have happened since |
|
929 | # Protect against any repo or config changes that have happened since | |
932 | # this repo was added to the repos file. We'd rather this loop succeed |
|
930 | # this repo was added to the repos file. We'd rather this loop succeed | |
933 | # and too much be deleted, than the loop fail and nothing gets deleted. |
|
931 | # and too much be deleted, than the loop fail and nothing gets deleted. | |
934 | if not isenabled(repo): |
|
932 | if not isenabled(repo): | |
935 | continue |
|
933 | continue | |
936 |
|
934 | |||
937 | if not util.safehasattr(repo, b'name'): |
|
935 | if not util.safehasattr(repo, b'name'): | |
938 | ui.warn( |
|
936 | ui.warn( | |
939 | _(b"repo %s is a misconfigured remotefilelog repo\n") % path |
|
937 | _(b"repo %s is a misconfigured remotefilelog repo\n") % path | |
940 | ) |
|
938 | ) | |
941 | continue |
|
939 | continue | |
942 |
|
940 | |||
943 | # If garbage collection on repack and repack on hg gc are enabled |
|
941 | # If garbage collection on repack and repack on hg gc are enabled | |
944 | # then loose files are repacked and garbage collected. |
|
942 | # then loose files are repacked and garbage collected. | |
945 | # Otherwise regular garbage collection is performed. |
|
943 | # Otherwise regular garbage collection is performed. | |
946 | repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc') |
|
944 | repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc') | |
947 | gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack') |
|
945 | gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack') | |
948 | if repackonhggc and gcrepack: |
|
946 | if repackonhggc and gcrepack: | |
949 | try: |
|
947 | try: | |
950 | repackmod.incrementalrepack(repo) |
|
948 | repackmod.incrementalrepack(repo) | |
951 | filesrepacked = True |
|
949 | filesrepacked = True | |
952 | continue |
|
950 | continue | |
953 | except (IOError, repackmod.RepackAlreadyRunning): |
|
951 | except (IOError, repackmod.RepackAlreadyRunning): | |
954 | # If repack cannot be performed due to not enough disk space |
|
952 | # If repack cannot be performed due to not enough disk space | |
955 | # continue doing garbage collection of loose files w/o repack |
|
953 | # continue doing garbage collection of loose files w/o repack | |
956 | pass |
|
954 | pass | |
957 |
|
955 | |||
958 | reponame = repo.name |
|
956 | reponame = repo.name | |
959 | if not sharedcache: |
|
957 | if not sharedcache: | |
960 | sharedcache = repo.sharedstore |
|
958 | sharedcache = repo.sharedstore | |
961 |
|
959 | |||
962 | # Compute a keepset which is not garbage collected |
|
960 | # Compute a keepset which is not garbage collected | |
963 | def keyfn(fname, fnode): |
|
961 | def keyfn(fname, fnode): | |
964 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) |
|
962 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) | |
965 |
|
963 | |||
966 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) |
|
964 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) | |
967 |
|
965 | |||
968 | progress.complete() |
|
966 | progress.complete() | |
969 |
|
967 | |||
970 | # write list of valid repos back |
|
968 | # write list of valid repos back | |
971 | oldumask = os.umask(0o002) |
|
969 | oldumask = os.umask(0o002) | |
972 | try: |
|
970 | try: | |
973 | reposfile = open(repospath, b'wb') |
|
971 | reposfile = open(repospath, b'wb') | |
974 | reposfile.writelines([(b"%s\n" % r) for r in validrepos]) |
|
972 | reposfile.writelines([(b"%s\n" % r) for r in validrepos]) | |
975 | reposfile.close() |
|
973 | reposfile.close() | |
976 | finally: |
|
974 | finally: | |
977 | os.umask(oldumask) |
|
975 | os.umask(oldumask) | |
978 |
|
976 | |||
979 | # prune cache |
|
977 | # prune cache | |
980 | if sharedcache is not None: |
|
978 | if sharedcache is not None: | |
981 | sharedcache.gc(keepkeys) |
|
979 | sharedcache.gc(keepkeys) | |
982 | elif not filesrepacked: |
|
980 | elif not filesrepacked: | |
983 | ui.warn(_(b"warning: no valid repos in repofile\n")) |
|
981 | ui.warn(_(b"warning: no valid repos in repofile\n")) | |
984 |
|
982 | |||
985 |
|
983 | |||
986 | def log(orig, ui, repo, *pats, **opts): |
|
984 | def log(orig, ui, repo, *pats, **opts): | |
987 | if not isenabled(repo): |
|
985 | if not isenabled(repo): | |
988 | return orig(ui, repo, *pats, **opts) |
|
986 | return orig(ui, repo, *pats, **opts) | |
989 |
|
987 | |||
990 | follow = opts.get('follow') |
|
988 | follow = opts.get('follow') | |
991 | revs = opts.get('rev') |
|
989 | revs = opts.get('rev') | |
992 | if pats: |
|
990 | if pats: | |
993 | # Force slowpath for non-follow patterns and follows that start from |
|
991 | # Force slowpath for non-follow patterns and follows that start from | |
994 | # non-working-copy-parent revs. |
|
992 | # non-working-copy-parent revs. | |
995 | if not follow or revs: |
|
993 | if not follow or revs: | |
996 | # This forces the slowpath |
|
994 | # This forces the slowpath | |
997 | opts['removed'] = True |
|
995 | opts['removed'] = True | |
998 |
|
996 | |||
999 | # If this is a non-follow log without any revs specified, recommend that |
|
997 | # If this is a non-follow log without any revs specified, recommend that | |
1000 | # the user add -f to speed it up. |
|
998 | # the user add -f to speed it up. | |
1001 | if not follow and not revs: |
|
999 | if not follow and not revs: | |
1002 | match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts)) |
|
1000 | match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts)) | |
1003 | isfile = not match.anypats() |
|
1001 | isfile = not match.anypats() | |
1004 | if isfile: |
|
1002 | if isfile: | |
1005 | for file in match.files(): |
|
1003 | for file in match.files(): | |
1006 | if not os.path.isfile(repo.wjoin(file)): |
|
1004 | if not os.path.isfile(repo.wjoin(file)): | |
1007 | isfile = False |
|
1005 | isfile = False | |
1008 | break |
|
1006 | break | |
1009 |
|
1007 | |||
1010 | if isfile: |
|
1008 | if isfile: | |
1011 | ui.warn( |
|
1009 | ui.warn( | |
1012 | _( |
|
1010 | _( | |
1013 | b"warning: file log can be slow on large repos - " |
|
1011 | b"warning: file log can be slow on large repos - " | |
1014 | + b"use -f to speed it up\n" |
|
1012 | + b"use -f to speed it up\n" | |
1015 | ) |
|
1013 | ) | |
1016 | ) |
|
1014 | ) | |
1017 |
|
1015 | |||
1018 | return orig(ui, repo, *pats, **opts) |
|
1016 | return orig(ui, repo, *pats, **opts) | |
1019 |
|
1017 | |||
1020 |
|
1018 | |||
1021 | def revdatelimit(ui, revset): |
|
1019 | def revdatelimit(ui, revset): | |
1022 | """Update revset so that only changesets no older than 'prefetchdays' days |
|
1020 | """Update revset so that only changesets no older than 'prefetchdays' days | |
1023 | are included. The default value is set to 14 days. If 'prefetchdays' is set |
|
1021 | are included. The default value is set to 14 days. If 'prefetchdays' is set | |
1024 | to zero or negative value then date restriction is not applied. |
|
1022 | to zero or negative value then date restriction is not applied. | |
1025 | """ |
|
1023 | """ | |
1026 | days = ui.configint(b'remotefilelog', b'prefetchdays') |
|
1024 | days = ui.configint(b'remotefilelog', b'prefetchdays') | |
1027 | if days > 0: |
|
1025 | if days > 0: | |
1028 | revset = b'(%s) & date(-%s)' % (revset, days) |
|
1026 | revset = b'(%s) & date(-%s)' % (revset, days) | |
1029 | return revset |
|
1027 | return revset | |
1030 |
|
1028 | |||
1031 |
|
1029 | |||
1032 | def readytofetch(repo): |
|
1030 | def readytofetch(repo): | |
1033 | """Check that enough time has passed since the last background prefetch. |
|
1031 | """Check that enough time has passed since the last background prefetch. | |
1034 | This only relates to prefetches after operations that change the working |
|
1032 | This only relates to prefetches after operations that change the working | |
1035 | copy parent. Default delay between background prefetches is 2 minutes. |
|
1033 | copy parent. Default delay between background prefetches is 2 minutes. | |
1036 | """ |
|
1034 | """ | |
1037 | timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay') |
|
1035 | timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay') | |
1038 | fname = repo.vfs.join(b'lastprefetch') |
|
1036 | fname = repo.vfs.join(b'lastprefetch') | |
1039 |
|
1037 | |||
1040 | ready = False |
|
1038 | ready = False | |
1041 | with open(fname, b'a'): |
|
1039 | with open(fname, b'a'): | |
1042 | # the with construct above is used to avoid race conditions |
|
1040 | # the with construct above is used to avoid race conditions | |
1043 | modtime = os.path.getmtime(fname) |
|
1041 | modtime = os.path.getmtime(fname) | |
1044 | if (time.time() - modtime) > timeout: |
|
1042 | if (time.time() - modtime) > timeout: | |
1045 | os.utime(fname, None) |
|
1043 | os.utime(fname, None) | |
1046 | ready = True |
|
1044 | ready = True | |
1047 |
|
1045 | |||
1048 | return ready |
|
1046 | return ready | |
1049 |
|
1047 | |||
1050 |
|
1048 | |||
1051 | def wcpprefetch(ui, repo, **kwargs): |
|
1049 | def wcpprefetch(ui, repo, **kwargs): | |
1052 | """Prefetches in background revisions specified by bgprefetchrevs revset. |
|
1050 | """Prefetches in background revisions specified by bgprefetchrevs revset. | |
1053 | Does background repack if backgroundrepack flag is set in config. |
|
1051 | Does background repack if backgroundrepack flag is set in config. | |
1054 | """ |
|
1052 | """ | |
1055 | shallow = isenabled(repo) |
|
1053 | shallow = isenabled(repo) | |
1056 | bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs') |
|
1054 | bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs') | |
1057 | isready = readytofetch(repo) |
|
1055 | isready = readytofetch(repo) | |
1058 |
|
1056 | |||
1059 | if not (shallow and bgprefetchrevs and isready): |
|
1057 | if not (shallow and bgprefetchrevs and isready): | |
1060 | return |
|
1058 | return | |
1061 |
|
1059 | |||
1062 | bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack') |
|
1060 | bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack') | |
1063 | # update a revset with a date limit |
|
1061 | # update a revset with a date limit | |
1064 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) |
|
1062 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) | |
1065 |
|
1063 | |||
1066 | def anon(unused_success): |
|
1064 | def anon(unused_success): | |
1067 | if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch: |
|
1065 | if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch: | |
1068 | return |
|
1066 | return | |
1069 | repo.ranprefetch = True |
|
1067 | repo.ranprefetch = True | |
1070 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) |
|
1068 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) | |
1071 |
|
1069 | |||
1072 | repo._afterlock(anon) |
|
1070 | repo._afterlock(anon) | |
1073 |
|
1071 | |||
1074 |
|
1072 | |||
1075 | def pull(orig, ui, repo, *pats, **opts): |
|
1073 | def pull(orig, ui, repo, *pats, **opts): | |
1076 | result = orig(ui, repo, *pats, **opts) |
|
1074 | result = orig(ui, repo, *pats, **opts) | |
1077 |
|
1075 | |||
1078 | if isenabled(repo): |
|
1076 | if isenabled(repo): | |
1079 | # prefetch if it's configured |
|
1077 | # prefetch if it's configured | |
1080 | prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch') |
|
1078 | prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch') | |
1081 | bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack') |
|
1079 | bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack') | |
1082 | bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch') |
|
1080 | bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch') | |
1083 |
|
1081 | |||
1084 | if prefetchrevset: |
|
1082 | if prefetchrevset: | |
1085 | ui.status(_(b"prefetching file contents\n")) |
|
1083 | ui.status(_(b"prefetching file contents\n")) | |
1086 | revs = scmutil.revrange(repo, [prefetchrevset]) |
|
1084 | revs = scmutil.revrange(repo, [prefetchrevset]) | |
1087 | base = repo[b'.'].rev() |
|
1085 | base = repo[b'.'].rev() | |
1088 | if bgprefetch: |
|
1086 | if bgprefetch: | |
1089 | repo.backgroundprefetch(prefetchrevset, repack=bgrepack) |
|
1087 | repo.backgroundprefetch(prefetchrevset, repack=bgrepack) | |
1090 | else: |
|
1088 | else: | |
1091 | repo.prefetch(revs, base=base) |
|
1089 | repo.prefetch(revs, base=base) | |
1092 | if bgrepack: |
|
1090 | if bgrepack: | |
1093 | repackmod.backgroundrepack(repo, incremental=True) |
|
1091 | repackmod.backgroundrepack(repo, incremental=True) | |
1094 | elif bgrepack: |
|
1092 | elif bgrepack: | |
1095 | repackmod.backgroundrepack(repo, incremental=True) |
|
1093 | repackmod.backgroundrepack(repo, incremental=True) | |
1096 |
|
1094 | |||
1097 | return result |
|
1095 | return result | |
1098 |
|
1096 | |||
1099 |
|
1097 | |||
1100 | def exchangepull(orig, repo, remote, *args, **kwargs): |
|
1098 | def exchangepull(orig, repo, remote, *args, **kwargs): | |
1101 | # Hook into the callstream/getbundle to insert bundle capabilities |
|
1099 | # Hook into the callstream/getbundle to insert bundle capabilities | |
1102 | # during a pull. |
|
1100 | # during a pull. | |
1103 | def localgetbundle( |
|
1101 | def localgetbundle( | |
1104 | orig, source, heads=None, common=None, bundlecaps=None, **kwargs |
|
1102 | orig, source, heads=None, common=None, bundlecaps=None, **kwargs | |
1105 | ): |
|
1103 | ): | |
1106 | if not bundlecaps: |
|
1104 | if not bundlecaps: | |
1107 | bundlecaps = set() |
|
1105 | bundlecaps = set() | |
1108 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) |
|
1106 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) | |
1109 | return orig( |
|
1107 | return orig( | |
1110 | source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs |
|
1108 | source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs | |
1111 | ) |
|
1109 | ) | |
1112 |
|
1110 | |||
1113 | if util.safehasattr(remote, b'_callstream'): |
|
1111 | if util.safehasattr(remote, b'_callstream'): | |
1114 | remote._localrepo = repo |
|
1112 | remote._localrepo = repo | |
1115 | elif util.safehasattr(remote, b'getbundle'): |
|
1113 | elif util.safehasattr(remote, b'getbundle'): | |
1116 | extensions.wrapfunction(remote, b'getbundle', localgetbundle) |
|
1114 | extensions.wrapfunction(remote, b'getbundle', localgetbundle) | |
1117 |
|
1115 | |||
1118 | return orig(repo, remote, *args, **kwargs) |
|
1116 | return orig(repo, remote, *args, **kwargs) | |
1119 |
|
1117 | |||
1120 |
|
1118 | |||
1121 | def _fileprefetchhook(repo, revmatches): |
|
1119 | def _fileprefetchhook(repo, revmatches): | |
1122 | if isenabled(repo): |
|
1120 | if isenabled(repo): | |
1123 | allfiles = [] |
|
1121 | allfiles = [] | |
1124 | for rev, match in revmatches: |
|
1122 | for rev, match in revmatches: | |
1125 | if rev == nodemod.wdirrev or rev is None: |
|
1123 | if rev == nodemod.wdirrev or rev is None: | |
1126 | continue |
|
1124 | continue | |
1127 | ctx = repo[rev] |
|
1125 | ctx = repo[rev] | |
1128 | mf = ctx.manifest() |
|
1126 | mf = ctx.manifest() | |
1129 | sparsematch = repo.maybesparsematch(ctx.rev()) |
|
1127 | sparsematch = repo.maybesparsematch(ctx.rev()) | |
1130 | for path in ctx.walk(match): |
|
1128 | for path in ctx.walk(match): | |
1131 | if (not sparsematch or sparsematch(path)) and path in mf: |
|
1129 | if (not sparsematch or sparsematch(path)) and path in mf: | |
1132 | allfiles.append((path, hex(mf[path]))) |
|
1130 | allfiles.append((path, hex(mf[path]))) | |
1133 | repo.fileservice.prefetch(allfiles) |
|
1131 | repo.fileservice.prefetch(allfiles) | |
1134 |
|
1132 | |||
1135 |
|
1133 | |||
1136 | @command( |
|
1134 | @command( | |
1137 | b'debugremotefilelog', |
|
1135 | b'debugremotefilelog', | |
1138 | [(b'd', b'decompress', None, _(b'decompress the filelog first')),], |
|
1136 | [(b'd', b'decompress', None, _(b'decompress the filelog first')),], | |
1139 | _(b'hg debugremotefilelog <path>'), |
|
1137 | _(b'hg debugremotefilelog <path>'), | |
1140 | norepo=True, |
|
1138 | norepo=True, | |
1141 | ) |
|
1139 | ) | |
1142 | def debugremotefilelog(ui, path, **opts): |
|
1140 | def debugremotefilelog(ui, path, **opts): | |
1143 | return debugcommands.debugremotefilelog(ui, path, **opts) |
|
1141 | return debugcommands.debugremotefilelog(ui, path, **opts) | |
1144 |
|
1142 | |||
1145 |
|
1143 | |||
1146 | @command( |
|
1144 | @command( | |
1147 | b'verifyremotefilelog', |
|
1145 | b'verifyremotefilelog', | |
1148 | [(b'd', b'decompress', None, _(b'decompress the filelogs first')),], |
|
1146 | [(b'd', b'decompress', None, _(b'decompress the filelogs first')),], | |
1149 | _(b'hg verifyremotefilelogs <directory>'), |
|
1147 | _(b'hg verifyremotefilelogs <directory>'), | |
1150 | norepo=True, |
|
1148 | norepo=True, | |
1151 | ) |
|
1149 | ) | |
1152 | def verifyremotefilelog(ui, path, **opts): |
|
1150 | def verifyremotefilelog(ui, path, **opts): | |
1153 | return debugcommands.verifyremotefilelog(ui, path, **opts) |
|
1151 | return debugcommands.verifyremotefilelog(ui, path, **opts) | |
1154 |
|
1152 | |||
1155 |
|
1153 | |||
1156 | @command( |
|
1154 | @command( | |
1157 | b'debugdatapack', |
|
1155 | b'debugdatapack', | |
1158 | [ |
|
1156 | [ | |
1159 | (b'', b'long', None, _(b'print the long hashes')), |
|
1157 | (b'', b'long', None, _(b'print the long hashes')), | |
1160 | (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'), |
|
1158 | (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'), | |
1161 | ], |
|
1159 | ], | |
1162 | _(b'hg debugdatapack <paths>'), |
|
1160 | _(b'hg debugdatapack <paths>'), | |
1163 | norepo=True, |
|
1161 | norepo=True, | |
1164 | ) |
|
1162 | ) | |
1165 | def debugdatapack(ui, *paths, **opts): |
|
1163 | def debugdatapack(ui, *paths, **opts): | |
1166 | return debugcommands.debugdatapack(ui, *paths, **opts) |
|
1164 | return debugcommands.debugdatapack(ui, *paths, **opts) | |
1167 |
|
1165 | |||
1168 |
|
1166 | |||
1169 | @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True) |
|
1167 | @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True) | |
1170 | def debughistorypack(ui, path, **opts): |
|
1168 | def debughistorypack(ui, path, **opts): | |
1171 | return debugcommands.debughistorypack(ui, path) |
|
1169 | return debugcommands.debughistorypack(ui, path) | |
1172 |
|
1170 | |||
1173 |
|
1171 | |||
1174 | @command(b'debugkeepset', [], _(b'hg debugkeepset')) |
|
1172 | @command(b'debugkeepset', [], _(b'hg debugkeepset')) | |
1175 | def debugkeepset(ui, repo, **opts): |
|
1173 | def debugkeepset(ui, repo, **opts): | |
1176 | # The command is used to measure keepset computation time |
|
1174 | # The command is used to measure keepset computation time | |
1177 | def keyfn(fname, fnode): |
|
1175 | def keyfn(fname, fnode): | |
1178 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) |
|
1176 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) | |
1179 |
|
1177 | |||
1180 | repackmod.keepset(repo, keyfn) |
|
1178 | repackmod.keepset(repo, keyfn) | |
1181 | return |
|
1179 | return | |
1182 |
|
1180 | |||
1183 |
|
1181 | |||
1184 | @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack')) |
|
1182 | @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack')) | |
1185 | def debugwaitonrepack(ui, repo, **opts): |
|
1183 | def debugwaitonrepack(ui, repo, **opts): | |
1186 | return debugcommands.debugwaitonrepack(repo) |
|
1184 | return debugcommands.debugwaitonrepack(repo) | |
1187 |
|
1185 | |||
1188 |
|
1186 | |||
1189 | @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch')) |
|
1187 | @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch')) | |
1190 | def debugwaitonprefetch(ui, repo, **opts): |
|
1188 | def debugwaitonprefetch(ui, repo, **opts): | |
1191 | return debugcommands.debugwaitonprefetch(repo) |
|
1189 | return debugcommands.debugwaitonprefetch(repo) | |
1192 |
|
1190 | |||
1193 |
|
1191 | |||
1194 | def resolveprefetchopts(ui, opts): |
|
1192 | def resolveprefetchopts(ui, opts): | |
1195 | if not opts.get(b'rev'): |
|
1193 | if not opts.get(b'rev'): | |
1196 | revset = [b'.', b'draft()'] |
|
1194 | revset = [b'.', b'draft()'] | |
1197 |
|
1195 | |||
1198 | prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None) |
|
1196 | prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None) | |
1199 | if prefetchrevset: |
|
1197 | if prefetchrevset: | |
1200 | revset.append(b'(%s)' % prefetchrevset) |
|
1198 | revset.append(b'(%s)' % prefetchrevset) | |
1201 | bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None) |
|
1199 | bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None) | |
1202 | if bgprefetchrevs: |
|
1200 | if bgprefetchrevs: | |
1203 | revset.append(b'(%s)' % bgprefetchrevs) |
|
1201 | revset.append(b'(%s)' % bgprefetchrevs) | |
1204 | revset = b'+'.join(revset) |
|
1202 | revset = b'+'.join(revset) | |
1205 |
|
1203 | |||
1206 | # update a revset with a date limit |
|
1204 | # update a revset with a date limit | |
1207 | revset = revdatelimit(ui, revset) |
|
1205 | revset = revdatelimit(ui, revset) | |
1208 |
|
1206 | |||
1209 | opts[b'rev'] = [revset] |
|
1207 | opts[b'rev'] = [revset] | |
1210 |
|
1208 | |||
1211 | if not opts.get(b'base'): |
|
1209 | if not opts.get(b'base'): | |
1212 | opts[b'base'] = None |
|
1210 | opts[b'base'] = None | |
1213 |
|
1211 | |||
1214 | return opts |
|
1212 | return opts | |
1215 |
|
1213 | |||
1216 |
|
1214 | |||
1217 | @command( |
|
1215 | @command( | |
1218 | b'prefetch', |
|
1216 | b'prefetch', | |
1219 | [ |
|
1217 | [ | |
1220 | (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')), |
|
1218 | (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')), | |
1221 | (b'', b'repack', False, _(b'run repack after prefetch')), |
|
1219 | (b'', b'repack', False, _(b'run repack after prefetch')), | |
1222 | (b'b', b'base', b'', _(b"rev that is assumed to already be local")), |
|
1220 | (b'b', b'base', b'', _(b"rev that is assumed to already be local")), | |
1223 | ] |
|
1221 | ] | |
1224 | + commands.walkopts, |
|
1222 | + commands.walkopts, | |
1225 | _(b'hg prefetch [OPTIONS] [FILE...]'), |
|
1223 | _(b'hg prefetch [OPTIONS] [FILE...]'), | |
1226 | helpcategory=command.CATEGORY_MAINTENANCE, |
|
1224 | helpcategory=command.CATEGORY_MAINTENANCE, | |
1227 | ) |
|
1225 | ) | |
1228 | def prefetch(ui, repo, *pats, **opts): |
|
1226 | def prefetch(ui, repo, *pats, **opts): | |
1229 | """prefetch file revisions from the server |
|
1227 | """prefetch file revisions from the server | |
1230 |
|
1228 | |||
1231 | Prefetchs file revisions for the specified revs and stores them in the |
|
1229 | Prefetchs file revisions for the specified revs and stores them in the | |
1232 | local remotefilelog cache. If no rev is specified, the default rev is |
|
1230 | local remotefilelog cache. If no rev is specified, the default rev is | |
1233 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. |
|
1231 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. | |
1234 | File names or patterns can be used to limit which files are downloaded. |
|
1232 | File names or patterns can be used to limit which files are downloaded. | |
1235 |
|
1233 | |||
1236 | Return 0 on success. |
|
1234 | Return 0 on success. | |
1237 | """ |
|
1235 | """ | |
1238 | opts = pycompat.byteskwargs(opts) |
|
1236 | opts = pycompat.byteskwargs(opts) | |
1239 | if not isenabled(repo): |
|
1237 | if not isenabled(repo): | |
1240 | raise error.Abort(_(b"repo is not shallow")) |
|
1238 | raise error.Abort(_(b"repo is not shallow")) | |
1241 |
|
1239 | |||
1242 | opts = resolveprefetchopts(ui, opts) |
|
1240 | opts = resolveprefetchopts(ui, opts) | |
1243 | revs = scmutil.revrange(repo, opts.get(b'rev')) |
|
1241 | revs = scmutil.revrange(repo, opts.get(b'rev')) | |
1244 | repo.prefetch(revs, opts.get(b'base'), pats, opts) |
|
1242 | repo.prefetch(revs, opts.get(b'base'), pats, opts) | |
1245 |
|
1243 | |||
1246 | # Run repack in background |
|
1244 | # Run repack in background | |
1247 | if opts.get(b'repack'): |
|
1245 | if opts.get(b'repack'): | |
1248 | repackmod.backgroundrepack(repo, incremental=True) |
|
1246 | repackmod.backgroundrepack(repo, incremental=True) | |
1249 |
|
1247 | |||
1250 |
|
1248 | |||
1251 | @command( |
|
1249 | @command( | |
1252 | b'repack', |
|
1250 | b'repack', | |
1253 | [ |
|
1251 | [ | |
1254 | (b'', b'background', None, _(b'run in a background process'), None), |
|
1252 | (b'', b'background', None, _(b'run in a background process'), None), | |
1255 | (b'', b'incremental', None, _(b'do an incremental repack'), None), |
|
1253 | (b'', b'incremental', None, _(b'do an incremental repack'), None), | |
1256 | ( |
|
1254 | ( | |
1257 | b'', |
|
1255 | b'', | |
1258 | b'packsonly', |
|
1256 | b'packsonly', | |
1259 | None, |
|
1257 | None, | |
1260 | _(b'only repack packs (skip loose objects)'), |
|
1258 | _(b'only repack packs (skip loose objects)'), | |
1261 | None, |
|
1259 | None, | |
1262 | ), |
|
1260 | ), | |
1263 | ], |
|
1261 | ], | |
1264 | _(b'hg repack [OPTIONS]'), |
|
1262 | _(b'hg repack [OPTIONS]'), | |
1265 | ) |
|
1263 | ) | |
1266 | def repack_(ui, repo, *pats, **opts): |
|
1264 | def repack_(ui, repo, *pats, **opts): | |
1267 | if opts.get('background'): |
|
1265 | if opts.get('background'): | |
1268 | repackmod.backgroundrepack( |
|
1266 | repackmod.backgroundrepack( | |
1269 | repo, |
|
1267 | repo, | |
1270 | incremental=opts.get('incremental'), |
|
1268 | incremental=opts.get('incremental'), | |
1271 | packsonly=opts.get('packsonly', False), |
|
1269 | packsonly=opts.get('packsonly', False), | |
1272 | ) |
|
1270 | ) | |
1273 | return |
|
1271 | return | |
1274 |
|
1272 | |||
1275 | options = {b'packsonly': opts.get('packsonly')} |
|
1273 | options = {b'packsonly': opts.get('packsonly')} | |
1276 |
|
1274 | |||
1277 | try: |
|
1275 | try: | |
1278 | if opts.get('incremental'): |
|
1276 | if opts.get('incremental'): | |
1279 | repackmod.incrementalrepack(repo, options=options) |
|
1277 | repackmod.incrementalrepack(repo, options=options) | |
1280 | else: |
|
1278 | else: | |
1281 | repackmod.fullrepack(repo, options=options) |
|
1279 | repackmod.fullrepack(repo, options=options) | |
1282 | except repackmod.RepackAlreadyRunning as ex: |
|
1280 | except repackmod.RepackAlreadyRunning as ex: | |
1283 | # Don't propogate the exception if the repack is already in |
|
1281 | # Don't propogate the exception if the repack is already in | |
1284 | # progress, since we want the command to exit 0. |
|
1282 | # progress, since we want the command to exit 0. | |
1285 | repo.ui.warn(b'%s\n' % ex) |
|
1283 | repo.ui.warn(b'%s\n' % ex) |
@@ -1,2159 +1,2184 b'' | |||||
1 | # merge.py - directory-level update/merge handling for Mercurial |
|
1 | # merge.py - directory-level update/merge handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import stat |
|
11 | import stat | |
12 | import struct |
|
12 | import struct | |
13 |
|
13 | |||
14 | from .i18n import _ |
|
14 | from .i18n import _ | |
15 | from .node import ( |
|
15 | from .node import ( | |
16 | addednodeid, |
|
16 | addednodeid, | |
17 | modifiednodeid, |
|
17 | modifiednodeid, | |
18 | nullid, |
|
18 | nullid, | |
19 | nullrev, |
|
19 | nullrev, | |
20 | ) |
|
20 | ) | |
21 | from .thirdparty import attr |
|
21 | from .thirdparty import attr | |
22 | from . import ( |
|
22 | from . import ( | |
23 | copies, |
|
23 | copies, | |
24 | encoding, |
|
24 | encoding, | |
25 | error, |
|
25 | error, | |
26 | filemerge, |
|
26 | filemerge, | |
27 | match as matchmod, |
|
27 | match as matchmod, | |
28 | mergestate as mergestatemod, |
|
28 | mergestate as mergestatemod, | |
29 | obsutil, |
|
29 | obsutil, | |
30 | pathutil, |
|
30 | pathutil, | |
31 | pycompat, |
|
31 | pycompat, | |
32 | scmutil, |
|
32 | scmutil, | |
33 | subrepoutil, |
|
33 | subrepoutil, | |
34 | util, |
|
34 | util, | |
35 | worker, |
|
35 | worker, | |
36 | ) |
|
36 | ) | |
37 |
|
37 | |||
38 | _pack = struct.pack |
|
38 | _pack = struct.pack | |
39 | _unpack = struct.unpack |
|
39 | _unpack = struct.unpack | |
40 |
|
40 | |||
41 |
|
41 | |||
42 | def _getcheckunknownconfig(repo, section, name): |
|
42 | def _getcheckunknownconfig(repo, section, name): | |
43 | config = repo.ui.config(section, name) |
|
43 | config = repo.ui.config(section, name) | |
44 | valid = [b'abort', b'ignore', b'warn'] |
|
44 | valid = [b'abort', b'ignore', b'warn'] | |
45 | if config not in valid: |
|
45 | if config not in valid: | |
46 | validstr = b', '.join([b"'" + v + b"'" for v in valid]) |
|
46 | validstr = b', '.join([b"'" + v + b"'" for v in valid]) | |
47 | raise error.ConfigError( |
|
47 | raise error.ConfigError( | |
48 | _(b"%s.%s not valid ('%s' is none of %s)") |
|
48 | _(b"%s.%s not valid ('%s' is none of %s)") | |
49 | % (section, name, config, validstr) |
|
49 | % (section, name, config, validstr) | |
50 | ) |
|
50 | ) | |
51 | return config |
|
51 | return config | |
52 |
|
52 | |||
53 |
|
53 | |||
54 | def _checkunknownfile(repo, wctx, mctx, f, f2=None): |
|
54 | def _checkunknownfile(repo, wctx, mctx, f, f2=None): | |
55 | if wctx.isinmemory(): |
|
55 | if wctx.isinmemory(): | |
56 | # Nothing to do in IMM because nothing in the "working copy" can be an |
|
56 | # Nothing to do in IMM because nothing in the "working copy" can be an | |
57 | # unknown file. |
|
57 | # unknown file. | |
58 | # |
|
58 | # | |
59 | # Note that we should bail out here, not in ``_checkunknownfiles()``, |
|
59 | # Note that we should bail out here, not in ``_checkunknownfiles()``, | |
60 | # because that function does other useful work. |
|
60 | # because that function does other useful work. | |
61 | return False |
|
61 | return False | |
62 |
|
62 | |||
63 | if f2 is None: |
|
63 | if f2 is None: | |
64 | f2 = f |
|
64 | f2 = f | |
65 | return ( |
|
65 | return ( | |
66 | repo.wvfs.audit.check(f) |
|
66 | repo.wvfs.audit.check(f) | |
67 | and repo.wvfs.isfileorlink(f) |
|
67 | and repo.wvfs.isfileorlink(f) | |
68 | and repo.dirstate.normalize(f) not in repo.dirstate |
|
68 | and repo.dirstate.normalize(f) not in repo.dirstate | |
69 | and mctx[f2].cmp(wctx[f]) |
|
69 | and mctx[f2].cmp(wctx[f]) | |
70 | ) |
|
70 | ) | |
71 |
|
71 | |||
72 |
|
72 | |||
73 | class _unknowndirschecker(object): |
|
73 | class _unknowndirschecker(object): | |
74 | """ |
|
74 | """ | |
75 | Look for any unknown files or directories that may have a path conflict |
|
75 | Look for any unknown files or directories that may have a path conflict | |
76 | with a file. If any path prefix of the file exists as a file or link, |
|
76 | with a file. If any path prefix of the file exists as a file or link, | |
77 | then it conflicts. If the file itself is a directory that contains any |
|
77 | then it conflicts. If the file itself is a directory that contains any | |
78 | file that is not tracked, then it conflicts. |
|
78 | file that is not tracked, then it conflicts. | |
79 |
|
79 | |||
80 | Returns the shortest path at which a conflict occurs, or None if there is |
|
80 | Returns the shortest path at which a conflict occurs, or None if there is | |
81 | no conflict. |
|
81 | no conflict. | |
82 | """ |
|
82 | """ | |
83 |
|
83 | |||
84 | def __init__(self): |
|
84 | def __init__(self): | |
85 | # A set of paths known to be good. This prevents repeated checking of |
|
85 | # A set of paths known to be good. This prevents repeated checking of | |
86 | # dirs. It will be updated with any new dirs that are checked and found |
|
86 | # dirs. It will be updated with any new dirs that are checked and found | |
87 | # to be safe. |
|
87 | # to be safe. | |
88 | self._unknowndircache = set() |
|
88 | self._unknowndircache = set() | |
89 |
|
89 | |||
90 | # A set of paths that are known to be absent. This prevents repeated |
|
90 | # A set of paths that are known to be absent. This prevents repeated | |
91 | # checking of subdirectories that are known not to exist. It will be |
|
91 | # checking of subdirectories that are known not to exist. It will be | |
92 | # updated with any new dirs that are checked and found to be absent. |
|
92 | # updated with any new dirs that are checked and found to be absent. | |
93 | self._missingdircache = set() |
|
93 | self._missingdircache = set() | |
94 |
|
94 | |||
95 | def __call__(self, repo, wctx, f): |
|
95 | def __call__(self, repo, wctx, f): | |
96 | if wctx.isinmemory(): |
|
96 | if wctx.isinmemory(): | |
97 | # Nothing to do in IMM for the same reason as ``_checkunknownfile``. |
|
97 | # Nothing to do in IMM for the same reason as ``_checkunknownfile``. | |
98 | return False |
|
98 | return False | |
99 |
|
99 | |||
100 | # Check for path prefixes that exist as unknown files. |
|
100 | # Check for path prefixes that exist as unknown files. | |
101 | for p in reversed(list(pathutil.finddirs(f))): |
|
101 | for p in reversed(list(pathutil.finddirs(f))): | |
102 | if p in self._missingdircache: |
|
102 | if p in self._missingdircache: | |
103 | return |
|
103 | return | |
104 | if p in self._unknowndircache: |
|
104 | if p in self._unknowndircache: | |
105 | continue |
|
105 | continue | |
106 | if repo.wvfs.audit.check(p): |
|
106 | if repo.wvfs.audit.check(p): | |
107 | if ( |
|
107 | if ( | |
108 | repo.wvfs.isfileorlink(p) |
|
108 | repo.wvfs.isfileorlink(p) | |
109 | and repo.dirstate.normalize(p) not in repo.dirstate |
|
109 | and repo.dirstate.normalize(p) not in repo.dirstate | |
110 | ): |
|
110 | ): | |
111 | return p |
|
111 | return p | |
112 | if not repo.wvfs.lexists(p): |
|
112 | if not repo.wvfs.lexists(p): | |
113 | self._missingdircache.add(p) |
|
113 | self._missingdircache.add(p) | |
114 | return |
|
114 | return | |
115 | self._unknowndircache.add(p) |
|
115 | self._unknowndircache.add(p) | |
116 |
|
116 | |||
117 | # Check if the file conflicts with a directory containing unknown files. |
|
117 | # Check if the file conflicts with a directory containing unknown files. | |
118 | if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f): |
|
118 | if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f): | |
119 | # Does the directory contain any files that are not in the dirstate? |
|
119 | # Does the directory contain any files that are not in the dirstate? | |
120 | for p, dirs, files in repo.wvfs.walk(f): |
|
120 | for p, dirs, files in repo.wvfs.walk(f): | |
121 | for fn in files: |
|
121 | for fn in files: | |
122 | relf = util.pconvert(repo.wvfs.reljoin(p, fn)) |
|
122 | relf = util.pconvert(repo.wvfs.reljoin(p, fn)) | |
123 | relf = repo.dirstate.normalize(relf, isknown=True) |
|
123 | relf = repo.dirstate.normalize(relf, isknown=True) | |
124 | if relf not in repo.dirstate: |
|
124 | if relf not in repo.dirstate: | |
125 | return f |
|
125 | return f | |
126 | return None |
|
126 | return None | |
127 |
|
127 | |||
128 |
|
128 | |||
129 | def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce): |
|
129 | def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce): | |
130 | """ |
|
130 | """ | |
131 | Considers any actions that care about the presence of conflicting unknown |
|
131 | Considers any actions that care about the presence of conflicting unknown | |
132 | files. For some actions, the result is to abort; for others, it is to |
|
132 | files. For some actions, the result is to abort; for others, it is to | |
133 | choose a different action. |
|
133 | choose a different action. | |
134 | """ |
|
134 | """ | |
135 | fileconflicts = set() |
|
135 | fileconflicts = set() | |
136 | pathconflicts = set() |
|
136 | pathconflicts = set() | |
137 | warnconflicts = set() |
|
137 | warnconflicts = set() | |
138 | abortconflicts = set() |
|
138 | abortconflicts = set() | |
139 | unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown') |
|
139 | unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown') | |
140 | ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored') |
|
140 | ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored') | |
141 | pathconfig = repo.ui.configbool( |
|
141 | pathconfig = repo.ui.configbool( | |
142 | b'experimental', b'merge.checkpathconflicts' |
|
142 | b'experimental', b'merge.checkpathconflicts' | |
143 | ) |
|
143 | ) | |
144 | if not force: |
|
144 | if not force: | |
145 |
|
145 | |||
146 | def collectconflicts(conflicts, config): |
|
146 | def collectconflicts(conflicts, config): | |
147 | if config == b'abort': |
|
147 | if config == b'abort': | |
148 | abortconflicts.update(conflicts) |
|
148 | abortconflicts.update(conflicts) | |
149 | elif config == b'warn': |
|
149 | elif config == b'warn': | |
150 | warnconflicts.update(conflicts) |
|
150 | warnconflicts.update(conflicts) | |
151 |
|
151 | |||
152 | checkunknowndirs = _unknowndirschecker() |
|
152 | checkunknowndirs = _unknowndirschecker() | |
153 | for f, (m, args, msg) in pycompat.iteritems(actions): |
|
153 | for f, (m, args, msg) in pycompat.iteritems(actions): | |
154 | if m in ( |
|
154 | if m in ( | |
155 | mergestatemod.ACTION_CREATED, |
|
155 | mergestatemod.ACTION_CREATED, | |
156 | mergestatemod.ACTION_DELETED_CHANGED, |
|
156 | mergestatemod.ACTION_DELETED_CHANGED, | |
157 | ): |
|
157 | ): | |
158 | if _checkunknownfile(repo, wctx, mctx, f): |
|
158 | if _checkunknownfile(repo, wctx, mctx, f): | |
159 | fileconflicts.add(f) |
|
159 | fileconflicts.add(f) | |
160 | elif pathconfig and f not in wctx: |
|
160 | elif pathconfig and f not in wctx: | |
161 | path = checkunknowndirs(repo, wctx, f) |
|
161 | path = checkunknowndirs(repo, wctx, f) | |
162 | if path is not None: |
|
162 | if path is not None: | |
163 | pathconflicts.add(path) |
|
163 | pathconflicts.add(path) | |
164 | elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET: |
|
164 | elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET: | |
165 | if _checkunknownfile(repo, wctx, mctx, f, args[0]): |
|
165 | if _checkunknownfile(repo, wctx, mctx, f, args[0]): | |
166 | fileconflicts.add(f) |
|
166 | fileconflicts.add(f) | |
167 |
|
167 | |||
168 | allconflicts = fileconflicts | pathconflicts |
|
168 | allconflicts = fileconflicts | pathconflicts | |
169 | ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)} |
|
169 | ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)} | |
170 | unknownconflicts = allconflicts - ignoredconflicts |
|
170 | unknownconflicts = allconflicts - ignoredconflicts | |
171 | collectconflicts(ignoredconflicts, ignoredconfig) |
|
171 | collectconflicts(ignoredconflicts, ignoredconfig) | |
172 | collectconflicts(unknownconflicts, unknownconfig) |
|
172 | collectconflicts(unknownconflicts, unknownconfig) | |
173 | else: |
|
173 | else: | |
174 | for f, (m, args, msg) in pycompat.iteritems(actions): |
|
174 | for f, (m, args, msg) in pycompat.iteritems(actions): | |
175 | if m == mergestatemod.ACTION_CREATED_MERGE: |
|
175 | if m == mergestatemod.ACTION_CREATED_MERGE: | |
176 | fl2, anc = args |
|
176 | fl2, anc = args | |
177 | different = _checkunknownfile(repo, wctx, mctx, f) |
|
177 | different = _checkunknownfile(repo, wctx, mctx, f) | |
178 | if repo.dirstate._ignore(f): |
|
178 | if repo.dirstate._ignore(f): | |
179 | config = ignoredconfig |
|
179 | config = ignoredconfig | |
180 | else: |
|
180 | else: | |
181 | config = unknownconfig |
|
181 | config = unknownconfig | |
182 |
|
182 | |||
183 | # The behavior when force is True is described by this table: |
|
183 | # The behavior when force is True is described by this table: | |
184 | # config different mergeforce | action backup |
|
184 | # config different mergeforce | action backup | |
185 | # * n * | get n |
|
185 | # * n * | get n | |
186 | # * y y | merge - |
|
186 | # * y y | merge - | |
187 | # abort y n | merge - (1) |
|
187 | # abort y n | merge - (1) | |
188 | # warn y n | warn + get y |
|
188 | # warn y n | warn + get y | |
189 | # ignore y n | get y |
|
189 | # ignore y n | get y | |
190 | # |
|
190 | # | |
191 | # (1) this is probably the wrong behavior here -- we should |
|
191 | # (1) this is probably the wrong behavior here -- we should | |
192 | # probably abort, but some actions like rebases currently |
|
192 | # probably abort, but some actions like rebases currently | |
193 | # don't like an abort happening in the middle of |
|
193 | # don't like an abort happening in the middle of | |
194 | # merge.update. |
|
194 | # merge.update. | |
195 | if not different: |
|
195 | if not different: | |
196 | actions[f] = ( |
|
196 | actions[f] = ( | |
197 | mergestatemod.ACTION_GET, |
|
197 | mergestatemod.ACTION_GET, | |
198 | (fl2, False), |
|
198 | (fl2, False), | |
199 | b'remote created', |
|
199 | b'remote created', | |
200 | ) |
|
200 | ) | |
201 | elif mergeforce or config == b'abort': |
|
201 | elif mergeforce or config == b'abort': | |
202 | actions[f] = ( |
|
202 | actions[f] = ( | |
203 | mergestatemod.ACTION_MERGE, |
|
203 | mergestatemod.ACTION_MERGE, | |
204 | (f, f, None, False, anc), |
|
204 | (f, f, None, False, anc), | |
205 | b'remote differs from untracked local', |
|
205 | b'remote differs from untracked local', | |
206 | ) |
|
206 | ) | |
207 | elif config == b'abort': |
|
207 | elif config == b'abort': | |
208 | abortconflicts.add(f) |
|
208 | abortconflicts.add(f) | |
209 | else: |
|
209 | else: | |
210 | if config == b'warn': |
|
210 | if config == b'warn': | |
211 | warnconflicts.add(f) |
|
211 | warnconflicts.add(f) | |
212 | actions[f] = ( |
|
212 | actions[f] = ( | |
213 | mergestatemod.ACTION_GET, |
|
213 | mergestatemod.ACTION_GET, | |
214 | (fl2, True), |
|
214 | (fl2, True), | |
215 | b'remote created', |
|
215 | b'remote created', | |
216 | ) |
|
216 | ) | |
217 |
|
217 | |||
218 | for f in sorted(abortconflicts): |
|
218 | for f in sorted(abortconflicts): | |
219 | warn = repo.ui.warn |
|
219 | warn = repo.ui.warn | |
220 | if f in pathconflicts: |
|
220 | if f in pathconflicts: | |
221 | if repo.wvfs.isfileorlink(f): |
|
221 | if repo.wvfs.isfileorlink(f): | |
222 | warn(_(b"%s: untracked file conflicts with directory\n") % f) |
|
222 | warn(_(b"%s: untracked file conflicts with directory\n") % f) | |
223 | else: |
|
223 | else: | |
224 | warn(_(b"%s: untracked directory conflicts with file\n") % f) |
|
224 | warn(_(b"%s: untracked directory conflicts with file\n") % f) | |
225 | else: |
|
225 | else: | |
226 | warn(_(b"%s: untracked file differs\n") % f) |
|
226 | warn(_(b"%s: untracked file differs\n") % f) | |
227 | if abortconflicts: |
|
227 | if abortconflicts: | |
228 | raise error.Abort( |
|
228 | raise error.Abort( | |
229 | _( |
|
229 | _( | |
230 | b"untracked files in working directory " |
|
230 | b"untracked files in working directory " | |
231 | b"differ from files in requested revision" |
|
231 | b"differ from files in requested revision" | |
232 | ) |
|
232 | ) | |
233 | ) |
|
233 | ) | |
234 |
|
234 | |||
235 | for f in sorted(warnconflicts): |
|
235 | for f in sorted(warnconflicts): | |
236 | if repo.wvfs.isfileorlink(f): |
|
236 | if repo.wvfs.isfileorlink(f): | |
237 | repo.ui.warn(_(b"%s: replacing untracked file\n") % f) |
|
237 | repo.ui.warn(_(b"%s: replacing untracked file\n") % f) | |
238 | else: |
|
238 | else: | |
239 | repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f) |
|
239 | repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f) | |
240 |
|
240 | |||
241 | for f, (m, args, msg) in pycompat.iteritems(actions): |
|
241 | for f, (m, args, msg) in pycompat.iteritems(actions): | |
242 | if m == mergestatemod.ACTION_CREATED: |
|
242 | if m == mergestatemod.ACTION_CREATED: | |
243 | backup = ( |
|
243 | backup = ( | |
244 | f in fileconflicts |
|
244 | f in fileconflicts | |
245 | or f in pathconflicts |
|
245 | or f in pathconflicts | |
246 | or any(p in pathconflicts for p in pathutil.finddirs(f)) |
|
246 | or any(p in pathconflicts for p in pathutil.finddirs(f)) | |
247 | ) |
|
247 | ) | |
248 | (flags,) = args |
|
248 | (flags,) = args | |
249 | actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg) |
|
249 | actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg) | |
250 |
|
250 | |||
251 |
|
251 | |||
252 | def _forgetremoved(wctx, mctx, branchmerge): |
|
252 | def _forgetremoved(wctx, mctx, branchmerge): | |
253 | """ |
|
253 | """ | |
254 | Forget removed files |
|
254 | Forget removed files | |
255 |
|
255 | |||
256 | If we're jumping between revisions (as opposed to merging), and if |
|
256 | If we're jumping between revisions (as opposed to merging), and if | |
257 | neither the working directory nor the target rev has the file, |
|
257 | neither the working directory nor the target rev has the file, | |
258 | then we need to remove it from the dirstate, to prevent the |
|
258 | then we need to remove it from the dirstate, to prevent the | |
259 | dirstate from listing the file when it is no longer in the |
|
259 | dirstate from listing the file when it is no longer in the | |
260 | manifest. |
|
260 | manifest. | |
261 |
|
261 | |||
262 | If we're merging, and the other revision has removed a file |
|
262 | If we're merging, and the other revision has removed a file | |
263 | that is not present in the working directory, we need to mark it |
|
263 | that is not present in the working directory, we need to mark it | |
264 | as removed. |
|
264 | as removed. | |
265 | """ |
|
265 | """ | |
266 |
|
266 | |||
267 | actions = {} |
|
267 | actions = {} | |
268 | m = mergestatemod.ACTION_FORGET |
|
268 | m = mergestatemod.ACTION_FORGET | |
269 | if branchmerge: |
|
269 | if branchmerge: | |
270 | m = mergestatemod.ACTION_REMOVE |
|
270 | m = mergestatemod.ACTION_REMOVE | |
271 | for f in wctx.deleted(): |
|
271 | for f in wctx.deleted(): | |
272 | if f not in mctx: |
|
272 | if f not in mctx: | |
273 | actions[f] = m, None, b"forget deleted" |
|
273 | actions[f] = m, None, b"forget deleted" | |
274 |
|
274 | |||
275 | if not branchmerge: |
|
275 | if not branchmerge: | |
276 | for f in wctx.removed(): |
|
276 | for f in wctx.removed(): | |
277 | if f not in mctx: |
|
277 | if f not in mctx: | |
278 | actions[f] = ( |
|
278 | actions[f] = ( | |
279 | mergestatemod.ACTION_FORGET, |
|
279 | mergestatemod.ACTION_FORGET, | |
280 | None, |
|
280 | None, | |
281 | b"forget removed", |
|
281 | b"forget removed", | |
282 | ) |
|
282 | ) | |
283 |
|
283 | |||
284 | return actions |
|
284 | return actions | |
285 |
|
285 | |||
286 |
|
286 | |||
287 | def _checkcollision(repo, wmf, actions): |
|
287 | def _checkcollision(repo, wmf, actions): | |
288 | """ |
|
288 | """ | |
289 | Check for case-folding collisions. |
|
289 | Check for case-folding collisions. | |
290 | """ |
|
290 | """ | |
291 | # If the repo is narrowed, filter out files outside the narrowspec. |
|
291 | # If the repo is narrowed, filter out files outside the narrowspec. | |
292 | narrowmatch = repo.narrowmatch() |
|
292 | narrowmatch = repo.narrowmatch() | |
293 | if not narrowmatch.always(): |
|
293 | if not narrowmatch.always(): | |
294 | pmmf = set(wmf.walk(narrowmatch)) |
|
294 | pmmf = set(wmf.walk(narrowmatch)) | |
295 | if actions: |
|
295 | if actions: | |
296 | narrowactions = {} |
|
296 | narrowactions = {} | |
297 | for m, actionsfortype in pycompat.iteritems(actions): |
|
297 | for m, actionsfortype in pycompat.iteritems(actions): | |
298 | narrowactions[m] = [] |
|
298 | narrowactions[m] = [] | |
299 | for (f, args, msg) in actionsfortype: |
|
299 | for (f, args, msg) in actionsfortype: | |
300 | if narrowmatch(f): |
|
300 | if narrowmatch(f): | |
301 | narrowactions[m].append((f, args, msg)) |
|
301 | narrowactions[m].append((f, args, msg)) | |
302 | actions = narrowactions |
|
302 | actions = narrowactions | |
303 | else: |
|
303 | else: | |
304 | # build provisional merged manifest up |
|
304 | # build provisional merged manifest up | |
305 | pmmf = set(wmf) |
|
305 | pmmf = set(wmf) | |
306 |
|
306 | |||
307 | if actions: |
|
307 | if actions: | |
308 | # KEEP and EXEC are no-op |
|
308 | # KEEP and EXEC are no-op | |
309 | for m in ( |
|
309 | for m in ( | |
310 | mergestatemod.ACTION_ADD, |
|
310 | mergestatemod.ACTION_ADD, | |
311 | mergestatemod.ACTION_ADD_MODIFIED, |
|
311 | mergestatemod.ACTION_ADD_MODIFIED, | |
312 | mergestatemod.ACTION_FORGET, |
|
312 | mergestatemod.ACTION_FORGET, | |
313 | mergestatemod.ACTION_GET, |
|
313 | mergestatemod.ACTION_GET, | |
314 | mergestatemod.ACTION_CHANGED_DELETED, |
|
314 | mergestatemod.ACTION_CHANGED_DELETED, | |
315 | mergestatemod.ACTION_DELETED_CHANGED, |
|
315 | mergestatemod.ACTION_DELETED_CHANGED, | |
316 | ): |
|
316 | ): | |
317 | for f, args, msg in actions[m]: |
|
317 | for f, args, msg in actions[m]: | |
318 | pmmf.add(f) |
|
318 | pmmf.add(f) | |
319 | for f, args, msg in actions[mergestatemod.ACTION_REMOVE]: |
|
319 | for f, args, msg in actions[mergestatemod.ACTION_REMOVE]: | |
320 | pmmf.discard(f) |
|
320 | pmmf.discard(f) | |
321 | for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]: |
|
321 | for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]: | |
322 | f2, flags = args |
|
322 | f2, flags = args | |
323 | pmmf.discard(f2) |
|
323 | pmmf.discard(f2) | |
324 | pmmf.add(f) |
|
324 | pmmf.add(f) | |
325 | for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]: |
|
325 | for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]: | |
326 | pmmf.add(f) |
|
326 | pmmf.add(f) | |
327 | for f, args, msg in actions[mergestatemod.ACTION_MERGE]: |
|
327 | for f, args, msg in actions[mergestatemod.ACTION_MERGE]: | |
328 | f1, f2, fa, move, anc = args |
|
328 | f1, f2, fa, move, anc = args | |
329 | if move: |
|
329 | if move: | |
330 | pmmf.discard(f1) |
|
330 | pmmf.discard(f1) | |
331 | pmmf.add(f) |
|
331 | pmmf.add(f) | |
332 |
|
332 | |||
333 | # check case-folding collision in provisional merged manifest |
|
333 | # check case-folding collision in provisional merged manifest | |
334 | foldmap = {} |
|
334 | foldmap = {} | |
335 | for f in pmmf: |
|
335 | for f in pmmf: | |
336 | fold = util.normcase(f) |
|
336 | fold = util.normcase(f) | |
337 | if fold in foldmap: |
|
337 | if fold in foldmap: | |
338 | raise error.Abort( |
|
338 | raise error.Abort( | |
339 | _(b"case-folding collision between %s and %s") |
|
339 | _(b"case-folding collision between %s and %s") | |
340 | % (f, foldmap[fold]) |
|
340 | % (f, foldmap[fold]) | |
341 | ) |
|
341 | ) | |
342 | foldmap[fold] = f |
|
342 | foldmap[fold] = f | |
343 |
|
343 | |||
344 | # check case-folding of directories |
|
344 | # check case-folding of directories | |
345 | foldprefix = unfoldprefix = lastfull = b'' |
|
345 | foldprefix = unfoldprefix = lastfull = b'' | |
346 | for fold, f in sorted(foldmap.items()): |
|
346 | for fold, f in sorted(foldmap.items()): | |
347 | if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): |
|
347 | if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): | |
348 | # the folded prefix matches but actual casing is different |
|
348 | # the folded prefix matches but actual casing is different | |
349 | raise error.Abort( |
|
349 | raise error.Abort( | |
350 | _(b"case-folding collision between %s and directory of %s") |
|
350 | _(b"case-folding collision between %s and directory of %s") | |
351 | % (lastfull, f) |
|
351 | % (lastfull, f) | |
352 | ) |
|
352 | ) | |
353 | foldprefix = fold + b'/' |
|
353 | foldprefix = fold + b'/' | |
354 | unfoldprefix = f + b'/' |
|
354 | unfoldprefix = f + b'/' | |
355 | lastfull = f |
|
355 | lastfull = f | |
356 |
|
356 | |||
357 |
|
357 | |||
358 | def driverpreprocess(repo, ms, wctx, labels=None): |
|
358 | def driverpreprocess(repo, ms, wctx, labels=None): | |
359 | """run the preprocess step of the merge driver, if any |
|
359 | """run the preprocess step of the merge driver, if any | |
360 |
|
360 | |||
361 | This is currently not implemented -- it's an extension point.""" |
|
361 | This is currently not implemented -- it's an extension point.""" | |
362 | return True |
|
362 | return True | |
363 |
|
363 | |||
364 |
|
364 | |||
365 | def driverconclude(repo, ms, wctx, labels=None): |
|
365 | def driverconclude(repo, ms, wctx, labels=None): | |
366 | """run the conclude step of the merge driver, if any |
|
366 | """run the conclude step of the merge driver, if any | |
367 |
|
367 | |||
368 | This is currently not implemented -- it's an extension point.""" |
|
368 | This is currently not implemented -- it's an extension point.""" | |
369 | return True |
|
369 | return True | |
370 |
|
370 | |||
371 |
|
371 | |||
372 | def _filesindirs(repo, manifest, dirs): |
|
372 | def _filesindirs(repo, manifest, dirs): | |
373 | """ |
|
373 | """ | |
374 | Generator that yields pairs of all the files in the manifest that are found |
|
374 | Generator that yields pairs of all the files in the manifest that are found | |
375 | inside the directories listed in dirs, and which directory they are found |
|
375 | inside the directories listed in dirs, and which directory they are found | |
376 | in. |
|
376 | in. | |
377 | """ |
|
377 | """ | |
378 | for f in manifest: |
|
378 | for f in manifest: | |
379 | for p in pathutil.finddirs(f): |
|
379 | for p in pathutil.finddirs(f): | |
380 | if p in dirs: |
|
380 | if p in dirs: | |
381 | yield f, p |
|
381 | yield f, p | |
382 | break |
|
382 | break | |
383 |
|
383 | |||
384 |
|
384 | |||
385 | def checkpathconflicts(repo, wctx, mctx, actions): |
|
385 | def checkpathconflicts(repo, wctx, mctx, actions): | |
386 | """ |
|
386 | """ | |
387 | Check if any actions introduce path conflicts in the repository, updating |
|
387 | Check if any actions introduce path conflicts in the repository, updating | |
388 | actions to record or handle the path conflict accordingly. |
|
388 | actions to record or handle the path conflict accordingly. | |
389 | """ |
|
389 | """ | |
390 | mf = wctx.manifest() |
|
390 | mf = wctx.manifest() | |
391 |
|
391 | |||
392 | # The set of local files that conflict with a remote directory. |
|
392 | # The set of local files that conflict with a remote directory. | |
393 | localconflicts = set() |
|
393 | localconflicts = set() | |
394 |
|
394 | |||
395 | # The set of directories that conflict with a remote file, and so may cause |
|
395 | # The set of directories that conflict with a remote file, and so may cause | |
396 | # conflicts if they still contain any files after the merge. |
|
396 | # conflicts if they still contain any files after the merge. | |
397 | remoteconflicts = set() |
|
397 | remoteconflicts = set() | |
398 |
|
398 | |||
399 | # The set of directories that appear as both a file and a directory in the |
|
399 | # The set of directories that appear as both a file and a directory in the | |
400 | # remote manifest. These indicate an invalid remote manifest, which |
|
400 | # remote manifest. These indicate an invalid remote manifest, which | |
401 | # can't be updated to cleanly. |
|
401 | # can't be updated to cleanly. | |
402 | invalidconflicts = set() |
|
402 | invalidconflicts = set() | |
403 |
|
403 | |||
404 | # The set of directories that contain files that are being created. |
|
404 | # The set of directories that contain files that are being created. | |
405 | createdfiledirs = set() |
|
405 | createdfiledirs = set() | |
406 |
|
406 | |||
407 | # The set of files deleted by all the actions. |
|
407 | # The set of files deleted by all the actions. | |
408 | deletedfiles = set() |
|
408 | deletedfiles = set() | |
409 |
|
409 | |||
410 | for f, (m, args, msg) in actions.items(): |
|
410 | for f, (m, args, msg) in actions.items(): | |
411 | if m in ( |
|
411 | if m in ( | |
412 | mergestatemod.ACTION_CREATED, |
|
412 | mergestatemod.ACTION_CREATED, | |
413 | mergestatemod.ACTION_DELETED_CHANGED, |
|
413 | mergestatemod.ACTION_DELETED_CHANGED, | |
414 | mergestatemod.ACTION_MERGE, |
|
414 | mergestatemod.ACTION_MERGE, | |
415 | mergestatemod.ACTION_CREATED_MERGE, |
|
415 | mergestatemod.ACTION_CREATED_MERGE, | |
416 | ): |
|
416 | ): | |
417 | # This action may create a new local file. |
|
417 | # This action may create a new local file. | |
418 | createdfiledirs.update(pathutil.finddirs(f)) |
|
418 | createdfiledirs.update(pathutil.finddirs(f)) | |
419 | if mf.hasdir(f): |
|
419 | if mf.hasdir(f): | |
420 | # The file aliases a local directory. This might be ok if all |
|
420 | # The file aliases a local directory. This might be ok if all | |
421 | # the files in the local directory are being deleted. This |
|
421 | # the files in the local directory are being deleted. This | |
422 | # will be checked once we know what all the deleted files are. |
|
422 | # will be checked once we know what all the deleted files are. | |
423 | remoteconflicts.add(f) |
|
423 | remoteconflicts.add(f) | |
424 | # Track the names of all deleted files. |
|
424 | # Track the names of all deleted files. | |
425 | if m == mergestatemod.ACTION_REMOVE: |
|
425 | if m == mergestatemod.ACTION_REMOVE: | |
426 | deletedfiles.add(f) |
|
426 | deletedfiles.add(f) | |
427 | if m == mergestatemod.ACTION_MERGE: |
|
427 | if m == mergestatemod.ACTION_MERGE: | |
428 | f1, f2, fa, move, anc = args |
|
428 | f1, f2, fa, move, anc = args | |
429 | if move: |
|
429 | if move: | |
430 | deletedfiles.add(f1) |
|
430 | deletedfiles.add(f1) | |
431 | if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL: |
|
431 | if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL: | |
432 | f2, flags = args |
|
432 | f2, flags = args | |
433 | deletedfiles.add(f2) |
|
433 | deletedfiles.add(f2) | |
434 |
|
434 | |||
435 | # Check all directories that contain created files for path conflicts. |
|
435 | # Check all directories that contain created files for path conflicts. | |
436 | for p in createdfiledirs: |
|
436 | for p in createdfiledirs: | |
437 | if p in mf: |
|
437 | if p in mf: | |
438 | if p in mctx: |
|
438 | if p in mctx: | |
439 | # A file is in a directory which aliases both a local |
|
439 | # A file is in a directory which aliases both a local | |
440 | # and a remote file. This is an internal inconsistency |
|
440 | # and a remote file. This is an internal inconsistency | |
441 | # within the remote manifest. |
|
441 | # within the remote manifest. | |
442 | invalidconflicts.add(p) |
|
442 | invalidconflicts.add(p) | |
443 | else: |
|
443 | else: | |
444 | # A file is in a directory which aliases a local file. |
|
444 | # A file is in a directory which aliases a local file. | |
445 | # We will need to rename the local file. |
|
445 | # We will need to rename the local file. | |
446 | localconflicts.add(p) |
|
446 | localconflicts.add(p) | |
447 | if p in actions and actions[p][0] in ( |
|
447 | if p in actions and actions[p][0] in ( | |
448 | mergestatemod.ACTION_CREATED, |
|
448 | mergestatemod.ACTION_CREATED, | |
449 | mergestatemod.ACTION_DELETED_CHANGED, |
|
449 | mergestatemod.ACTION_DELETED_CHANGED, | |
450 | mergestatemod.ACTION_MERGE, |
|
450 | mergestatemod.ACTION_MERGE, | |
451 | mergestatemod.ACTION_CREATED_MERGE, |
|
451 | mergestatemod.ACTION_CREATED_MERGE, | |
452 | ): |
|
452 | ): | |
453 | # The file is in a directory which aliases a remote file. |
|
453 | # The file is in a directory which aliases a remote file. | |
454 | # This is an internal inconsistency within the remote |
|
454 | # This is an internal inconsistency within the remote | |
455 | # manifest. |
|
455 | # manifest. | |
456 | invalidconflicts.add(p) |
|
456 | invalidconflicts.add(p) | |
457 |
|
457 | |||
458 | # Rename all local conflicting files that have not been deleted. |
|
458 | # Rename all local conflicting files that have not been deleted. | |
459 | for p in localconflicts: |
|
459 | for p in localconflicts: | |
460 | if p not in deletedfiles: |
|
460 | if p not in deletedfiles: | |
461 | ctxname = bytes(wctx).rstrip(b'+') |
|
461 | ctxname = bytes(wctx).rstrip(b'+') | |
462 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) |
|
462 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) | |
463 | porig = wctx[p].copysource() or p |
|
463 | porig = wctx[p].copysource() or p | |
464 | actions[pnew] = ( |
|
464 | actions[pnew] = ( | |
465 | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, |
|
465 | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, | |
466 | (p, porig), |
|
466 | (p, porig), | |
467 | b'local path conflict', |
|
467 | b'local path conflict', | |
468 | ) |
|
468 | ) | |
469 | actions[p] = ( |
|
469 | actions[p] = ( | |
470 | mergestatemod.ACTION_PATH_CONFLICT, |
|
470 | mergestatemod.ACTION_PATH_CONFLICT, | |
471 | (pnew, b'l'), |
|
471 | (pnew, b'l'), | |
472 | b'path conflict', |
|
472 | b'path conflict', | |
473 | ) |
|
473 | ) | |
474 |
|
474 | |||
475 | if remoteconflicts: |
|
475 | if remoteconflicts: | |
476 | # Check if all files in the conflicting directories have been removed. |
|
476 | # Check if all files in the conflicting directories have been removed. | |
477 | ctxname = bytes(mctx).rstrip(b'+') |
|
477 | ctxname = bytes(mctx).rstrip(b'+') | |
478 | for f, p in _filesindirs(repo, mf, remoteconflicts): |
|
478 | for f, p in _filesindirs(repo, mf, remoteconflicts): | |
479 | if f not in deletedfiles: |
|
479 | if f not in deletedfiles: | |
480 | m, args, msg = actions[p] |
|
480 | m, args, msg = actions[p] | |
481 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) |
|
481 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) | |
482 | if m in ( |
|
482 | if m in ( | |
483 | mergestatemod.ACTION_DELETED_CHANGED, |
|
483 | mergestatemod.ACTION_DELETED_CHANGED, | |
484 | mergestatemod.ACTION_MERGE, |
|
484 | mergestatemod.ACTION_MERGE, | |
485 | ): |
|
485 | ): | |
486 | # Action was merge, just update target. |
|
486 | # Action was merge, just update target. | |
487 | actions[pnew] = (m, args, msg) |
|
487 | actions[pnew] = (m, args, msg) | |
488 | else: |
|
488 | else: | |
489 | # Action was create, change to renamed get action. |
|
489 | # Action was create, change to renamed get action. | |
490 | fl = args[0] |
|
490 | fl = args[0] | |
491 | actions[pnew] = ( |
|
491 | actions[pnew] = ( | |
492 | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, |
|
492 | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, | |
493 | (p, fl), |
|
493 | (p, fl), | |
494 | b'remote path conflict', |
|
494 | b'remote path conflict', | |
495 | ) |
|
495 | ) | |
496 | actions[p] = ( |
|
496 | actions[p] = ( | |
497 | mergestatemod.ACTION_PATH_CONFLICT, |
|
497 | mergestatemod.ACTION_PATH_CONFLICT, | |
498 | (pnew, mergestatemod.ACTION_REMOVE), |
|
498 | (pnew, mergestatemod.ACTION_REMOVE), | |
499 | b'path conflict', |
|
499 | b'path conflict', | |
500 | ) |
|
500 | ) | |
501 | remoteconflicts.remove(p) |
|
501 | remoteconflicts.remove(p) | |
502 | break |
|
502 | break | |
503 |
|
503 | |||
504 | if invalidconflicts: |
|
504 | if invalidconflicts: | |
505 | for p in invalidconflicts: |
|
505 | for p in invalidconflicts: | |
506 | repo.ui.warn(_(b"%s: is both a file and a directory\n") % p) |
|
506 | repo.ui.warn(_(b"%s: is both a file and a directory\n") % p) | |
507 | raise error.Abort(_(b"destination manifest contains path conflicts")) |
|
507 | raise error.Abort(_(b"destination manifest contains path conflicts")) | |
508 |
|
508 | |||
509 |
|
509 | |||
510 | def _filternarrowactions(narrowmatch, branchmerge, actions): |
|
510 | def _filternarrowactions(narrowmatch, branchmerge, actions): | |
511 | """ |
|
511 | """ | |
512 | Filters out actions that can ignored because the repo is narrowed. |
|
512 | Filters out actions that can ignored because the repo is narrowed. | |
513 |
|
513 | |||
514 | Raise an exception if the merge cannot be completed because the repo is |
|
514 | Raise an exception if the merge cannot be completed because the repo is | |
515 | narrowed. |
|
515 | narrowed. | |
516 | """ |
|
516 | """ | |
517 | nooptypes = {b'k'} # TODO: handle with nonconflicttypes |
|
517 | nooptypes = {b'k'} # TODO: handle with nonconflicttypes | |
518 | nonconflicttypes = set(b'a am c cm f g gs r e'.split()) |
|
518 | nonconflicttypes = set(b'a am c cm f g gs r e'.split()) | |
519 | # We mutate the items in the dict during iteration, so iterate |
|
519 | # We mutate the items in the dict during iteration, so iterate | |
520 | # over a copy. |
|
520 | # over a copy. | |
521 | for f, action in list(actions.items()): |
|
521 | for f, action in list(actions.items()): | |
522 | if narrowmatch(f): |
|
522 | if narrowmatch(f): | |
523 | pass |
|
523 | pass | |
524 | elif not branchmerge: |
|
524 | elif not branchmerge: | |
525 | del actions[f] # just updating, ignore changes outside clone |
|
525 | del actions[f] # just updating, ignore changes outside clone | |
526 | elif action[0] in nooptypes: |
|
526 | elif action[0] in nooptypes: | |
527 | del actions[f] # merge does not affect file |
|
527 | del actions[f] # merge does not affect file | |
528 | elif action[0] in nonconflicttypes: |
|
528 | elif action[0] in nonconflicttypes: | |
529 | raise error.Abort( |
|
529 | raise error.Abort( | |
530 | _( |
|
530 | _( | |
531 | b'merge affects file \'%s\' outside narrow, ' |
|
531 | b'merge affects file \'%s\' outside narrow, ' | |
532 | b'which is not yet supported' |
|
532 | b'which is not yet supported' | |
533 | ) |
|
533 | ) | |
534 | % f, |
|
534 | % f, | |
535 | hint=_(b'merging in the other direction may work'), |
|
535 | hint=_(b'merging in the other direction may work'), | |
536 | ) |
|
536 | ) | |
537 | else: |
|
537 | else: | |
538 | raise error.Abort( |
|
538 | raise error.Abort( | |
539 | _(b'conflict in file \'%s\' is outside narrow clone') % f |
|
539 | _(b'conflict in file \'%s\' is outside narrow clone') % f | |
540 | ) |
|
540 | ) | |
541 |
|
541 | |||
542 |
|
542 | |||
543 | class mergeresult(object): |
|
543 | class mergeresult(object): | |
544 | ''''An object representing result of merging manifests. |
|
544 | ''''An object representing result of merging manifests. | |
545 |
|
545 | |||
546 | It has information about what actions need to be performed on dirstate |
|
546 | It has information about what actions need to be performed on dirstate | |
547 | mapping of divergent renames and other such cases. ''' |
|
547 | mapping of divergent renames and other such cases. ''' | |
548 |
|
548 | |||
549 | def __init__(self, actions, diverge, renamedelete, commitinfo): |
|
549 | def __init__(self, actions, diverge, renamedelete, commitinfo): | |
550 | """ |
|
550 | """ | |
551 | actions: dict of filename as keys and action related info as values |
|
551 | actions: dict of filename as keys and action related info as values | |
552 | diverge: mapping of source name -> list of dest name for |
|
552 | diverge: mapping of source name -> list of dest name for | |
553 | divergent renames |
|
553 | divergent renames | |
554 | renamedelete: mapping of source name -> list of destinations for files |
|
554 | renamedelete: mapping of source name -> list of destinations for files | |
555 | deleted on one side and renamed on other. |
|
555 | deleted on one side and renamed on other. | |
556 | commitinfo: dict containing data which should be used on commit |
|
556 | commitinfo: dict containing data which should be used on commit | |
557 | contains a filename -> info mapping |
|
557 | contains a filename -> info mapping | |
558 | """ |
|
558 | """ | |
559 |
|
559 | |||
560 | self._actions = actions |
|
560 | self._actions = actions | |
561 | self._diverge = diverge |
|
561 | self._diverge = diverge | |
562 | self._renamedelete = renamedelete |
|
562 | self._renamedelete = renamedelete | |
563 | self._commitinfo = commitinfo |
|
563 | self._commitinfo = commitinfo | |
564 |
|
564 | |||
565 | @property |
|
565 | @property | |
566 | def actions(self): |
|
566 | def actions(self): | |
567 | return self._actions |
|
567 | return self._actions | |
568 |
|
568 | |||
569 | @property |
|
569 | @property | |
570 | def diverge(self): |
|
570 | def diverge(self): | |
571 | return self._diverge |
|
571 | return self._diverge | |
572 |
|
572 | |||
573 | @property |
|
573 | @property | |
574 | def renamedelete(self): |
|
574 | def renamedelete(self): | |
575 | return self._renamedelete |
|
575 | return self._renamedelete | |
576 |
|
576 | |||
577 | @property |
|
577 | @property | |
578 | def commitinfo(self): |
|
578 | def commitinfo(self): | |
579 | return self._commitinfo |
|
579 | return self._commitinfo | |
580 |
|
580 | |||
581 | def setactions(self, actions): |
|
581 | def setactions(self, actions): | |
582 | self._actions = actions |
|
582 | self._actions = actions | |
583 |
|
583 | |||
584 |
|
584 | |||
585 | def manifestmerge( |
|
585 | def manifestmerge( | |
586 | repo, |
|
586 | repo, | |
587 | wctx, |
|
587 | wctx, | |
588 | p2, |
|
588 | p2, | |
589 | pa, |
|
589 | pa, | |
590 | branchmerge, |
|
590 | branchmerge, | |
591 | force, |
|
591 | force, | |
592 | matcher, |
|
592 | matcher, | |
593 | acceptremote, |
|
593 | acceptremote, | |
594 | followcopies, |
|
594 | followcopies, | |
595 | forcefulldiff=False, |
|
595 | forcefulldiff=False, | |
596 | ): |
|
596 | ): | |
597 | """ |
|
597 | """ | |
598 | Merge wctx and p2 with ancestor pa and generate merge action list |
|
598 | Merge wctx and p2 with ancestor pa and generate merge action list | |
599 |
|
599 | |||
600 | branchmerge and force are as passed in to update |
|
600 | branchmerge and force are as passed in to update | |
601 | matcher = matcher to filter file lists |
|
601 | matcher = matcher to filter file lists | |
602 | acceptremote = accept the incoming changes without prompting |
|
602 | acceptremote = accept the incoming changes without prompting | |
603 |
|
603 | |||
604 | Returns an object of mergeresult class |
|
604 | Returns an object of mergeresult class | |
605 | """ |
|
605 | """ | |
606 | if matcher is not None and matcher.always(): |
|
606 | if matcher is not None and matcher.always(): | |
607 | matcher = None |
|
607 | matcher = None | |
608 |
|
608 | |||
609 | # manifests fetched in order are going to be faster, so prime the caches |
|
609 | # manifests fetched in order are going to be faster, so prime the caches | |
610 | [ |
|
610 | [ | |
611 | x.manifest() |
|
611 | x.manifest() | |
612 | for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev) |
|
612 | for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev) | |
613 | ] |
|
613 | ] | |
614 |
|
614 | |||
615 | branch_copies1 = copies.branch_copies() |
|
615 | branch_copies1 = copies.branch_copies() | |
616 | branch_copies2 = copies.branch_copies() |
|
616 | branch_copies2 = copies.branch_copies() | |
617 | diverge = {} |
|
617 | diverge = {} | |
618 | # information from merge which is needed at commit time |
|
618 | # information from merge which is needed at commit time | |
619 | # for example choosing filelog of which parent to commit |
|
619 | # for example choosing filelog of which parent to commit | |
620 | # TODO: use specific constants in future for this mapping |
|
620 | # TODO: use specific constants in future for this mapping | |
621 | commitinfo = {} |
|
621 | commitinfo = {} | |
622 | if followcopies: |
|
622 | if followcopies: | |
623 | branch_copies1, branch_copies2, diverge = copies.mergecopies( |
|
623 | branch_copies1, branch_copies2, diverge = copies.mergecopies( | |
624 | repo, wctx, p2, pa |
|
624 | repo, wctx, p2, pa | |
625 | ) |
|
625 | ) | |
626 |
|
626 | |||
627 | boolbm = pycompat.bytestr(bool(branchmerge)) |
|
627 | boolbm = pycompat.bytestr(bool(branchmerge)) | |
628 | boolf = pycompat.bytestr(bool(force)) |
|
628 | boolf = pycompat.bytestr(bool(force)) | |
629 | boolm = pycompat.bytestr(bool(matcher)) |
|
629 | boolm = pycompat.bytestr(bool(matcher)) | |
630 | repo.ui.note(_(b"resolving manifests\n")) |
|
630 | repo.ui.note(_(b"resolving manifests\n")) | |
631 | repo.ui.debug( |
|
631 | repo.ui.debug( | |
632 | b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm) |
|
632 | b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm) | |
633 | ) |
|
633 | ) | |
634 | repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2)) |
|
634 | repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2)) | |
635 |
|
635 | |||
636 | m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest() |
|
636 | m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest() | |
637 | copied1 = set(branch_copies1.copy.values()) |
|
637 | copied1 = set(branch_copies1.copy.values()) | |
638 | copied1.update(branch_copies1.movewithdir.values()) |
|
638 | copied1.update(branch_copies1.movewithdir.values()) | |
639 | copied2 = set(branch_copies2.copy.values()) |
|
639 | copied2 = set(branch_copies2.copy.values()) | |
640 | copied2.update(branch_copies2.movewithdir.values()) |
|
640 | copied2.update(branch_copies2.movewithdir.values()) | |
641 |
|
641 | |||
642 | if b'.hgsubstate' in m1 and wctx.rev() is None: |
|
642 | if b'.hgsubstate' in m1 and wctx.rev() is None: | |
643 | # Check whether sub state is modified, and overwrite the manifest |
|
643 | # Check whether sub state is modified, and overwrite the manifest | |
644 | # to flag the change. If wctx is a committed revision, we shouldn't |
|
644 | # to flag the change. If wctx is a committed revision, we shouldn't | |
645 | # care for the dirty state of the working directory. |
|
645 | # care for the dirty state of the working directory. | |
646 | if any(wctx.sub(s).dirty() for s in wctx.substate): |
|
646 | if any(wctx.sub(s).dirty() for s in wctx.substate): | |
647 | m1[b'.hgsubstate'] = modifiednodeid |
|
647 | m1[b'.hgsubstate'] = modifiednodeid | |
648 |
|
648 | |||
649 | # Don't use m2-vs-ma optimization if: |
|
649 | # Don't use m2-vs-ma optimization if: | |
650 | # - ma is the same as m1 or m2, which we're just going to diff again later |
|
650 | # - ma is the same as m1 or m2, which we're just going to diff again later | |
651 | # - The caller specifically asks for a full diff, which is useful during bid |
|
651 | # - The caller specifically asks for a full diff, which is useful during bid | |
652 | # merge. |
|
652 | # merge. | |
653 | if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff: |
|
653 | if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff: | |
654 | # Identify which files are relevant to the merge, so we can limit the |
|
654 | # Identify which files are relevant to the merge, so we can limit the | |
655 | # total m1-vs-m2 diff to just those files. This has significant |
|
655 | # total m1-vs-m2 diff to just those files. This has significant | |
656 | # performance benefits in large repositories. |
|
656 | # performance benefits in large repositories. | |
657 | relevantfiles = set(ma.diff(m2).keys()) |
|
657 | relevantfiles = set(ma.diff(m2).keys()) | |
658 |
|
658 | |||
659 | # For copied and moved files, we need to add the source file too. |
|
659 | # For copied and moved files, we need to add the source file too. | |
660 | for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy): |
|
660 | for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy): | |
661 | if copyvalue in relevantfiles: |
|
661 | if copyvalue in relevantfiles: | |
662 | relevantfiles.add(copykey) |
|
662 | relevantfiles.add(copykey) | |
663 | for movedirkey in branch_copies1.movewithdir: |
|
663 | for movedirkey in branch_copies1.movewithdir: | |
664 | relevantfiles.add(movedirkey) |
|
664 | relevantfiles.add(movedirkey) | |
665 | filesmatcher = scmutil.matchfiles(repo, relevantfiles) |
|
665 | filesmatcher = scmutil.matchfiles(repo, relevantfiles) | |
666 | matcher = matchmod.intersectmatchers(matcher, filesmatcher) |
|
666 | matcher = matchmod.intersectmatchers(matcher, filesmatcher) | |
667 |
|
667 | |||
668 | diff = m1.diff(m2, match=matcher) |
|
668 | diff = m1.diff(m2, match=matcher) | |
669 |
|
669 | |||
670 | actions = {} |
|
670 | actions = {} | |
671 | for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff): |
|
671 | for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff): | |
672 | if n1 and n2: # file exists on both local and remote side |
|
672 | if n1 and n2: # file exists on both local and remote side | |
673 | if f not in ma: |
|
673 | if f not in ma: | |
674 | # TODO: what if they're renamed from different sources? |
|
674 | # TODO: what if they're renamed from different sources? | |
675 | fa = branch_copies1.copy.get( |
|
675 | fa = branch_copies1.copy.get( | |
676 | f, None |
|
676 | f, None | |
677 | ) or branch_copies2.copy.get(f, None) |
|
677 | ) or branch_copies2.copy.get(f, None) | |
678 | if fa is not None: |
|
678 | if fa is not None: | |
679 | actions[f] = ( |
|
679 | actions[f] = ( | |
680 | mergestatemod.ACTION_MERGE, |
|
680 | mergestatemod.ACTION_MERGE, | |
681 | (f, f, fa, False, pa.node()), |
|
681 | (f, f, fa, False, pa.node()), | |
682 | b'both renamed from %s' % fa, |
|
682 | b'both renamed from %s' % fa, | |
683 | ) |
|
683 | ) | |
684 | else: |
|
684 | else: | |
685 | actions[f] = ( |
|
685 | actions[f] = ( | |
686 | mergestatemod.ACTION_MERGE, |
|
686 | mergestatemod.ACTION_MERGE, | |
687 | (f, f, None, False, pa.node()), |
|
687 | (f, f, None, False, pa.node()), | |
688 | b'both created', |
|
688 | b'both created', | |
689 | ) |
|
689 | ) | |
690 | else: |
|
690 | else: | |
691 | a = ma[f] |
|
691 | a = ma[f] | |
692 | fla = ma.flags(f) |
|
692 | fla = ma.flags(f) | |
693 | nol = b'l' not in fl1 + fl2 + fla |
|
693 | nol = b'l' not in fl1 + fl2 + fla | |
694 | if n2 == a and fl2 == fla: |
|
694 | if n2 == a and fl2 == fla: | |
695 | actions[f] = ( |
|
695 | actions[f] = ( | |
696 | mergestatemod.ACTION_KEEP, |
|
696 | mergestatemod.ACTION_KEEP, | |
697 | (), |
|
697 | (), | |
698 | b'remote unchanged', |
|
698 | b'remote unchanged', | |
699 | ) |
|
699 | ) | |
700 | elif n1 == a and fl1 == fla: # local unchanged - use remote |
|
700 | elif n1 == a and fl1 == fla: # local unchanged - use remote | |
701 | if n1 == n2: # optimization: keep local content |
|
701 | if n1 == n2: # optimization: keep local content | |
702 | actions[f] = ( |
|
702 | actions[f] = ( | |
703 | mergestatemod.ACTION_EXEC, |
|
703 | mergestatemod.ACTION_EXEC, | |
704 | (fl2,), |
|
704 | (fl2,), | |
705 | b'update permissions', |
|
705 | b'update permissions', | |
706 | ) |
|
706 | ) | |
707 | else: |
|
707 | else: | |
708 | actions[f] = ( |
|
708 | actions[f] = ( | |
709 | mergestatemod.ACTION_GET_OTHER_AND_STORE |
|
709 | mergestatemod.ACTION_GET_OTHER_AND_STORE | |
710 | if branchmerge |
|
710 | if branchmerge | |
711 | else mergestatemod.ACTION_GET, |
|
711 | else mergestatemod.ACTION_GET, | |
712 | (fl2, False), |
|
712 | (fl2, False), | |
713 | b'remote is newer', |
|
713 | b'remote is newer', | |
714 | ) |
|
714 | ) | |
715 | if branchmerge: |
|
715 | if branchmerge: | |
716 | commitinfo[f] = b'other' |
|
716 | commitinfo[f] = b'other' | |
717 | elif nol and n2 == a: # remote only changed 'x' |
|
717 | elif nol and n2 == a: # remote only changed 'x' | |
718 | actions[f] = ( |
|
718 | actions[f] = ( | |
719 | mergestatemod.ACTION_EXEC, |
|
719 | mergestatemod.ACTION_EXEC, | |
720 | (fl2,), |
|
720 | (fl2,), | |
721 | b'update permissions', |
|
721 | b'update permissions', | |
722 | ) |
|
722 | ) | |
723 | elif nol and n1 == a: # local only changed 'x' |
|
723 | elif nol and n1 == a: # local only changed 'x' | |
724 | actions[f] = ( |
|
724 | actions[f] = ( | |
725 | mergestatemod.ACTION_GET_OTHER_AND_STORE |
|
725 | mergestatemod.ACTION_GET_OTHER_AND_STORE | |
726 | if branchmerge |
|
726 | if branchmerge | |
727 | else mergestatemod.ACTION_GET, |
|
727 | else mergestatemod.ACTION_GET, | |
728 | (fl1, False), |
|
728 | (fl1, False), | |
729 | b'remote is newer', |
|
729 | b'remote is newer', | |
730 | ) |
|
730 | ) | |
731 | if branchmerge: |
|
731 | if branchmerge: | |
732 | commitinfo[f] = b'other' |
|
732 | commitinfo[f] = b'other' | |
733 | else: # both changed something |
|
733 | else: # both changed something | |
734 | actions[f] = ( |
|
734 | actions[f] = ( | |
735 | mergestatemod.ACTION_MERGE, |
|
735 | mergestatemod.ACTION_MERGE, | |
736 | (f, f, f, False, pa.node()), |
|
736 | (f, f, f, False, pa.node()), | |
737 | b'versions differ', |
|
737 | b'versions differ', | |
738 | ) |
|
738 | ) | |
739 | elif n1: # file exists only on local side |
|
739 | elif n1: # file exists only on local side | |
740 | if f in copied2: |
|
740 | if f in copied2: | |
741 | pass # we'll deal with it on m2 side |
|
741 | pass # we'll deal with it on m2 side | |
742 | elif ( |
|
742 | elif ( | |
743 | f in branch_copies1.movewithdir |
|
743 | f in branch_copies1.movewithdir | |
744 | ): # directory rename, move local |
|
744 | ): # directory rename, move local | |
745 | f2 = branch_copies1.movewithdir[f] |
|
745 | f2 = branch_copies1.movewithdir[f] | |
746 | if f2 in m2: |
|
746 | if f2 in m2: | |
747 | actions[f2] = ( |
|
747 | actions[f2] = ( | |
748 | mergestatemod.ACTION_MERGE, |
|
748 | mergestatemod.ACTION_MERGE, | |
749 | (f, f2, None, True, pa.node()), |
|
749 | (f, f2, None, True, pa.node()), | |
750 | b'remote directory rename, both created', |
|
750 | b'remote directory rename, both created', | |
751 | ) |
|
751 | ) | |
752 | else: |
|
752 | else: | |
753 | actions[f2] = ( |
|
753 | actions[f2] = ( | |
754 | mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL, |
|
754 | mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL, | |
755 | (f, fl1), |
|
755 | (f, fl1), | |
756 | b'remote directory rename - move from %s' % f, |
|
756 | b'remote directory rename - move from %s' % f, | |
757 | ) |
|
757 | ) | |
758 | elif f in branch_copies1.copy: |
|
758 | elif f in branch_copies1.copy: | |
759 | f2 = branch_copies1.copy[f] |
|
759 | f2 = branch_copies1.copy[f] | |
760 | actions[f] = ( |
|
760 | actions[f] = ( | |
761 | mergestatemod.ACTION_MERGE, |
|
761 | mergestatemod.ACTION_MERGE, | |
762 | (f, f2, f2, False, pa.node()), |
|
762 | (f, f2, f2, False, pa.node()), | |
763 | b'local copied/moved from %s' % f2, |
|
763 | b'local copied/moved from %s' % f2, | |
764 | ) |
|
764 | ) | |
765 | elif f in ma: # clean, a different, no remote |
|
765 | elif f in ma: # clean, a different, no remote | |
766 | if n1 != ma[f]: |
|
766 | if n1 != ma[f]: | |
767 | if acceptremote: |
|
767 | if acceptremote: | |
768 | actions[f] = ( |
|
768 | actions[f] = ( | |
769 | mergestatemod.ACTION_REMOVE, |
|
769 | mergestatemod.ACTION_REMOVE, | |
770 | None, |
|
770 | None, | |
771 | b'remote delete', |
|
771 | b'remote delete', | |
772 | ) |
|
772 | ) | |
773 | else: |
|
773 | else: | |
774 | actions[f] = ( |
|
774 | actions[f] = ( | |
775 | mergestatemod.ACTION_CHANGED_DELETED, |
|
775 | mergestatemod.ACTION_CHANGED_DELETED, | |
776 | (f, None, f, False, pa.node()), |
|
776 | (f, None, f, False, pa.node()), | |
777 | b'prompt changed/deleted', |
|
777 | b'prompt changed/deleted', | |
778 | ) |
|
778 | ) | |
779 | elif n1 == addednodeid: |
|
779 | elif n1 == addednodeid: | |
780 | # This file was locally added. We should forget it instead of |
|
780 | # This file was locally added. We should forget it instead of | |
781 | # deleting it. |
|
781 | # deleting it. | |
782 | actions[f] = ( |
|
782 | actions[f] = ( | |
783 | mergestatemod.ACTION_FORGET, |
|
783 | mergestatemod.ACTION_FORGET, | |
784 | None, |
|
784 | None, | |
785 | b'remote deleted', |
|
785 | b'remote deleted', | |
786 | ) |
|
786 | ) | |
787 | else: |
|
787 | else: | |
788 | actions[f] = ( |
|
788 | actions[f] = ( | |
789 | mergestatemod.ACTION_REMOVE, |
|
789 | mergestatemod.ACTION_REMOVE, | |
790 | None, |
|
790 | None, | |
791 | b'other deleted', |
|
791 | b'other deleted', | |
792 | ) |
|
792 | ) | |
793 | elif n2: # file exists only on remote side |
|
793 | elif n2: # file exists only on remote side | |
794 | if f in copied1: |
|
794 | if f in copied1: | |
795 | pass # we'll deal with it on m1 side |
|
795 | pass # we'll deal with it on m1 side | |
796 | elif f in branch_copies2.movewithdir: |
|
796 | elif f in branch_copies2.movewithdir: | |
797 | f2 = branch_copies2.movewithdir[f] |
|
797 | f2 = branch_copies2.movewithdir[f] | |
798 | if f2 in m1: |
|
798 | if f2 in m1: | |
799 | actions[f2] = ( |
|
799 | actions[f2] = ( | |
800 | mergestatemod.ACTION_MERGE, |
|
800 | mergestatemod.ACTION_MERGE, | |
801 | (f2, f, None, False, pa.node()), |
|
801 | (f2, f, None, False, pa.node()), | |
802 | b'local directory rename, both created', |
|
802 | b'local directory rename, both created', | |
803 | ) |
|
803 | ) | |
804 | else: |
|
804 | else: | |
805 | actions[f2] = ( |
|
805 | actions[f2] = ( | |
806 | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, |
|
806 | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, | |
807 | (f, fl2), |
|
807 | (f, fl2), | |
808 | b'local directory rename - get from %s' % f, |
|
808 | b'local directory rename - get from %s' % f, | |
809 | ) |
|
809 | ) | |
810 | elif f in branch_copies2.copy: |
|
810 | elif f in branch_copies2.copy: | |
811 | f2 = branch_copies2.copy[f] |
|
811 | f2 = branch_copies2.copy[f] | |
812 | if f2 in m2: |
|
812 | if f2 in m2: | |
813 | actions[f] = ( |
|
813 | actions[f] = ( | |
814 | mergestatemod.ACTION_MERGE, |
|
814 | mergestatemod.ACTION_MERGE, | |
815 | (f2, f, f2, False, pa.node()), |
|
815 | (f2, f, f2, False, pa.node()), | |
816 | b'remote copied from %s' % f2, |
|
816 | b'remote copied from %s' % f2, | |
817 | ) |
|
817 | ) | |
818 | else: |
|
818 | else: | |
819 | actions[f] = ( |
|
819 | actions[f] = ( | |
820 | mergestatemod.ACTION_MERGE, |
|
820 | mergestatemod.ACTION_MERGE, | |
821 | (f2, f, f2, True, pa.node()), |
|
821 | (f2, f, f2, True, pa.node()), | |
822 | b'remote moved from %s' % f2, |
|
822 | b'remote moved from %s' % f2, | |
823 | ) |
|
823 | ) | |
824 | elif f not in ma: |
|
824 | elif f not in ma: | |
825 | # local unknown, remote created: the logic is described by the |
|
825 | # local unknown, remote created: the logic is described by the | |
826 | # following table: |
|
826 | # following table: | |
827 | # |
|
827 | # | |
828 | # force branchmerge different | action |
|
828 | # force branchmerge different | action | |
829 | # n * * | create |
|
829 | # n * * | create | |
830 | # y n * | create |
|
830 | # y n * | create | |
831 | # y y n | create |
|
831 | # y y n | create | |
832 | # y y y | merge |
|
832 | # y y y | merge | |
833 | # |
|
833 | # | |
834 | # Checking whether the files are different is expensive, so we |
|
834 | # Checking whether the files are different is expensive, so we | |
835 | # don't do that when we can avoid it. |
|
835 | # don't do that when we can avoid it. | |
836 | if not force: |
|
836 | if not force: | |
837 | actions[f] = ( |
|
837 | actions[f] = ( | |
838 | mergestatemod.ACTION_CREATED, |
|
838 | mergestatemod.ACTION_CREATED, | |
839 | (fl2,), |
|
839 | (fl2,), | |
840 | b'remote created', |
|
840 | b'remote created', | |
841 | ) |
|
841 | ) | |
842 | elif not branchmerge: |
|
842 | elif not branchmerge: | |
843 | actions[f] = ( |
|
843 | actions[f] = ( | |
844 | mergestatemod.ACTION_CREATED, |
|
844 | mergestatemod.ACTION_CREATED, | |
845 | (fl2,), |
|
845 | (fl2,), | |
846 | b'remote created', |
|
846 | b'remote created', | |
847 | ) |
|
847 | ) | |
848 | else: |
|
848 | else: | |
849 | actions[f] = ( |
|
849 | actions[f] = ( | |
850 | mergestatemod.ACTION_CREATED_MERGE, |
|
850 | mergestatemod.ACTION_CREATED_MERGE, | |
851 | (fl2, pa.node()), |
|
851 | (fl2, pa.node()), | |
852 | b'remote created, get or merge', |
|
852 | b'remote created, get or merge', | |
853 | ) |
|
853 | ) | |
854 | elif n2 != ma[f]: |
|
854 | elif n2 != ma[f]: | |
855 | df = None |
|
855 | df = None | |
856 | for d in branch_copies1.dirmove: |
|
856 | for d in branch_copies1.dirmove: | |
857 | if f.startswith(d): |
|
857 | if f.startswith(d): | |
858 | # new file added in a directory that was moved |
|
858 | # new file added in a directory that was moved | |
859 | df = branch_copies1.dirmove[d] + f[len(d) :] |
|
859 | df = branch_copies1.dirmove[d] + f[len(d) :] | |
860 | break |
|
860 | break | |
861 | if df is not None and df in m1: |
|
861 | if df is not None and df in m1: | |
862 | actions[df] = ( |
|
862 | actions[df] = ( | |
863 | mergestatemod.ACTION_MERGE, |
|
863 | mergestatemod.ACTION_MERGE, | |
864 | (df, f, f, False, pa.node()), |
|
864 | (df, f, f, False, pa.node()), | |
865 | b'local directory rename - respect move ' |
|
865 | b'local directory rename - respect move ' | |
866 | b'from %s' % f, |
|
866 | b'from %s' % f, | |
867 | ) |
|
867 | ) | |
868 | elif acceptremote: |
|
868 | elif acceptremote: | |
869 | actions[f] = ( |
|
869 | actions[f] = ( | |
870 | mergestatemod.ACTION_CREATED, |
|
870 | mergestatemod.ACTION_CREATED, | |
871 | (fl2,), |
|
871 | (fl2,), | |
872 | b'remote recreating', |
|
872 | b'remote recreating', | |
873 | ) |
|
873 | ) | |
874 | else: |
|
874 | else: | |
875 | actions[f] = ( |
|
875 | actions[f] = ( | |
876 | mergestatemod.ACTION_DELETED_CHANGED, |
|
876 | mergestatemod.ACTION_DELETED_CHANGED, | |
877 | (None, f, f, False, pa.node()), |
|
877 | (None, f, f, False, pa.node()), | |
878 | b'prompt deleted/changed', |
|
878 | b'prompt deleted/changed', | |
879 | ) |
|
879 | ) | |
880 |
|
880 | |||
881 | if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'): |
|
881 | if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'): | |
882 | # If we are merging, look for path conflicts. |
|
882 | # If we are merging, look for path conflicts. | |
883 | checkpathconflicts(repo, wctx, p2, actions) |
|
883 | checkpathconflicts(repo, wctx, p2, actions) | |
884 |
|
884 | |||
885 | narrowmatch = repo.narrowmatch() |
|
885 | narrowmatch = repo.narrowmatch() | |
886 | if not narrowmatch.always(): |
|
886 | if not narrowmatch.always(): | |
887 | # Updates "actions" in place |
|
887 | # Updates "actions" in place | |
888 | _filternarrowactions(narrowmatch, branchmerge, actions) |
|
888 | _filternarrowactions(narrowmatch, branchmerge, actions) | |
889 |
|
889 | |||
890 | renamedelete = branch_copies1.renamedelete |
|
890 | renamedelete = branch_copies1.renamedelete | |
891 | renamedelete.update(branch_copies2.renamedelete) |
|
891 | renamedelete.update(branch_copies2.renamedelete) | |
892 |
|
892 | |||
893 | return mergeresult(actions, diverge, renamedelete, commitinfo) |
|
893 | return mergeresult(actions, diverge, renamedelete, commitinfo) | |
894 |
|
894 | |||
895 |
|
895 | |||
896 | def _resolvetrivial(repo, wctx, mctx, ancestor, actions): |
|
896 | def _resolvetrivial(repo, wctx, mctx, ancestor, actions): | |
897 | """Resolves false conflicts where the nodeid changed but the content |
|
897 | """Resolves false conflicts where the nodeid changed but the content | |
898 | remained the same.""" |
|
898 | remained the same.""" | |
899 | # We force a copy of actions.items() because we're going to mutate |
|
899 | # We force a copy of actions.items() because we're going to mutate | |
900 | # actions as we resolve trivial conflicts. |
|
900 | # actions as we resolve trivial conflicts. | |
901 | for f, (m, args, msg) in list(actions.items()): |
|
901 | for f, (m, args, msg) in list(actions.items()): | |
902 | if ( |
|
902 | if ( | |
903 | m == mergestatemod.ACTION_CHANGED_DELETED |
|
903 | m == mergestatemod.ACTION_CHANGED_DELETED | |
904 | and f in ancestor |
|
904 | and f in ancestor | |
905 | and not wctx[f].cmp(ancestor[f]) |
|
905 | and not wctx[f].cmp(ancestor[f]) | |
906 | ): |
|
906 | ): | |
907 | # local did change but ended up with same content |
|
907 | # local did change but ended up with same content | |
908 | actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same' |
|
908 | actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same' | |
909 | elif ( |
|
909 | elif ( | |
910 | m == mergestatemod.ACTION_DELETED_CHANGED |
|
910 | m == mergestatemod.ACTION_DELETED_CHANGED | |
911 | and f in ancestor |
|
911 | and f in ancestor | |
912 | and not mctx[f].cmp(ancestor[f]) |
|
912 | and not mctx[f].cmp(ancestor[f]) | |
913 | ): |
|
913 | ): | |
914 | # remote did change but ended up with same content |
|
914 | # remote did change but ended up with same content | |
915 | del actions[f] # don't get = keep local deleted |
|
915 | del actions[f] # don't get = keep local deleted | |
916 |
|
916 | |||
917 |
|
917 | |||
918 | def calculateupdates( |
|
918 | def calculateupdates( | |
919 | repo, |
|
919 | repo, | |
920 | wctx, |
|
920 | wctx, | |
921 | mctx, |
|
921 | mctx, | |
922 | ancestors, |
|
922 | ancestors, | |
923 | branchmerge, |
|
923 | branchmerge, | |
924 | force, |
|
924 | force, | |
925 | acceptremote, |
|
925 | acceptremote, | |
926 | followcopies, |
|
926 | followcopies, | |
927 | matcher=None, |
|
927 | matcher=None, | |
928 | mergeforce=False, |
|
928 | mergeforce=False, | |
929 | ): |
|
929 | ): | |
930 | """ |
|
930 | """ | |
931 | Calculate the actions needed to merge mctx into wctx using ancestors |
|
931 | Calculate the actions needed to merge mctx into wctx using ancestors | |
932 |
|
932 | |||
933 | Uses manifestmerge() to merge manifest and get list of actions required to |
|
933 | Uses manifestmerge() to merge manifest and get list of actions required to | |
934 | perform for merging two manifests. If there are multiple ancestors, uses bid |
|
934 | perform for merging two manifests. If there are multiple ancestors, uses bid | |
935 | merge if enabled. |
|
935 | merge if enabled. | |
936 |
|
936 | |||
937 | Also filters out actions which are unrequired if repository is sparse. |
|
937 | Also filters out actions which are unrequired if repository is sparse. | |
938 |
|
938 | |||
939 | Returns mergeresult object same as manifestmerge(). |
|
939 | Returns mergeresult object same as manifestmerge(). | |
940 | """ |
|
940 | """ | |
941 | # Avoid cycle. |
|
941 | # Avoid cycle. | |
942 | from . import sparse |
|
942 | from . import sparse | |
943 |
|
943 | |||
944 | if len(ancestors) == 1: # default |
|
944 | if len(ancestors) == 1: # default | |
945 | mresult = manifestmerge( |
|
945 | mresult = manifestmerge( | |
946 | repo, |
|
946 | repo, | |
947 | wctx, |
|
947 | wctx, | |
948 | mctx, |
|
948 | mctx, | |
949 | ancestors[0], |
|
949 | ancestors[0], | |
950 | branchmerge, |
|
950 | branchmerge, | |
951 | force, |
|
951 | force, | |
952 | matcher, |
|
952 | matcher, | |
953 | acceptremote, |
|
953 | acceptremote, | |
954 | followcopies, |
|
954 | followcopies, | |
955 | ) |
|
955 | ) | |
956 | _checkunknownfiles(repo, wctx, mctx, force, mresult.actions, mergeforce) |
|
956 | _checkunknownfiles(repo, wctx, mctx, force, mresult.actions, mergeforce) | |
957 |
|
957 | |||
958 | else: # only when merge.preferancestor=* - the default |
|
958 | else: # only when merge.preferancestor=* - the default | |
959 | repo.ui.note( |
|
959 | repo.ui.note( | |
960 | _(b"note: merging %s and %s using bids from ancestors %s\n") |
|
960 | _(b"note: merging %s and %s using bids from ancestors %s\n") | |
961 | % ( |
|
961 | % ( | |
962 | wctx, |
|
962 | wctx, | |
963 | mctx, |
|
963 | mctx, | |
964 | _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors), |
|
964 | _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors), | |
965 | ) |
|
965 | ) | |
966 | ) |
|
966 | ) | |
967 |
|
967 | |||
968 | # Call for bids |
|
968 | # Call for bids | |
969 | fbids = ( |
|
969 | fbids = ( | |
970 | {} |
|
970 | {} | |
971 | ) # mapping filename to bids (action method to list af actions) |
|
971 | ) # mapping filename to bids (action method to list af actions) | |
972 | diverge, renamedelete = None, None |
|
972 | diverge, renamedelete = None, None | |
973 | for ancestor in ancestors: |
|
973 | for ancestor in ancestors: | |
974 | repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor) |
|
974 | repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor) | |
975 | mresult1 = manifestmerge( |
|
975 | mresult1 = manifestmerge( | |
976 | repo, |
|
976 | repo, | |
977 | wctx, |
|
977 | wctx, | |
978 | mctx, |
|
978 | mctx, | |
979 | ancestor, |
|
979 | ancestor, | |
980 | branchmerge, |
|
980 | branchmerge, | |
981 | force, |
|
981 | force, | |
982 | matcher, |
|
982 | matcher, | |
983 | acceptremote, |
|
983 | acceptremote, | |
984 | followcopies, |
|
984 | followcopies, | |
985 | forcefulldiff=True, |
|
985 | forcefulldiff=True, | |
986 | ) |
|
986 | ) | |
987 | _checkunknownfiles( |
|
987 | _checkunknownfiles( | |
988 | repo, wctx, mctx, force, mresult1.actions, mergeforce |
|
988 | repo, wctx, mctx, force, mresult1.actions, mergeforce | |
989 | ) |
|
989 | ) | |
990 |
|
990 | |||
991 | # Track the shortest set of warning on the theory that bid |
|
991 | # Track the shortest set of warning on the theory that bid | |
992 | # merge will correctly incorporate more information |
|
992 | # merge will correctly incorporate more information | |
993 | if diverge is None or len(mresult1.diverge) < len(diverge): |
|
993 | if diverge is None or len(mresult1.diverge) < len(diverge): | |
994 | diverge = mresult1.diverge |
|
994 | diverge = mresult1.diverge | |
995 | if renamedelete is None or len(renamedelete) < len( |
|
995 | if renamedelete is None or len(renamedelete) < len( | |
996 | mresult1.renamedelete |
|
996 | mresult1.renamedelete | |
997 | ): |
|
997 | ): | |
998 | renamedelete = mresult1.renamedelete |
|
998 | renamedelete = mresult1.renamedelete | |
999 |
|
999 | |||
1000 | for f, a in sorted(pycompat.iteritems(mresult1.actions)): |
|
1000 | for f, a in sorted(pycompat.iteritems(mresult1.actions)): | |
1001 | m, args, msg = a |
|
1001 | m, args, msg = a | |
1002 | if m == mergestatemod.ACTION_GET_OTHER_AND_STORE: |
|
1002 | if m == mergestatemod.ACTION_GET_OTHER_AND_STORE: | |
1003 | m = mergestatemod.ACTION_GET |
|
1003 | m = mergestatemod.ACTION_GET | |
1004 | repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m)) |
|
1004 | repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m)) | |
1005 | if f in fbids: |
|
1005 | if f in fbids: | |
1006 | d = fbids[f] |
|
1006 | d = fbids[f] | |
1007 | if m in d: |
|
1007 | if m in d: | |
1008 | d[m].append(a) |
|
1008 | d[m].append(a) | |
1009 | else: |
|
1009 | else: | |
1010 | d[m] = [a] |
|
1010 | d[m] = [a] | |
1011 | else: |
|
1011 | else: | |
1012 | fbids[f] = {m: [a]} |
|
1012 | fbids[f] = {m: [a]} | |
1013 |
|
1013 | |||
1014 | # Pick the best bid for each file |
|
1014 | # Pick the best bid for each file | |
1015 | repo.ui.note(_(b'\nauction for merging merge bids\n')) |
|
1015 | repo.ui.note(_(b'\nauction for merging merge bids\n')) | |
1016 | actions = {} |
|
1016 | actions = {} | |
1017 | for f, bids in sorted(fbids.items()): |
|
1017 | for f, bids in sorted(fbids.items()): | |
1018 | # bids is a mapping from action method to list af actions |
|
1018 | # bids is a mapping from action method to list af actions | |
1019 | # Consensus? |
|
1019 | # Consensus? | |
1020 | if len(bids) == 1: # all bids are the same kind of method |
|
1020 | if len(bids) == 1: # all bids are the same kind of method | |
1021 | m, l = list(bids.items())[0] |
|
1021 | m, l = list(bids.items())[0] | |
1022 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 |
|
1022 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 | |
1023 | repo.ui.note(_(b" %s: consensus for %s\n") % (f, m)) |
|
1023 | repo.ui.note(_(b" %s: consensus for %s\n") % (f, m)) | |
1024 | actions[f] = l[0] |
|
1024 | actions[f] = l[0] | |
1025 | continue |
|
1025 | continue | |
1026 | # If keep is an option, just do it. |
|
1026 | # If keep is an option, just do it. | |
1027 | if mergestatemod.ACTION_KEEP in bids: |
|
1027 | if mergestatemod.ACTION_KEEP in bids: | |
1028 | repo.ui.note(_(b" %s: picking 'keep' action\n") % f) |
|
1028 | repo.ui.note(_(b" %s: picking 'keep' action\n") % f) | |
1029 | actions[f] = bids[mergestatemod.ACTION_KEEP][0] |
|
1029 | actions[f] = bids[mergestatemod.ACTION_KEEP][0] | |
1030 | continue |
|
1030 | continue | |
1031 | # If there are gets and they all agree [how could they not?], do it. |
|
1031 | # If there are gets and they all agree [how could they not?], do it. | |
1032 | if mergestatemod.ACTION_GET in bids: |
|
1032 | if mergestatemod.ACTION_GET in bids: | |
1033 | ga0 = bids[mergestatemod.ACTION_GET][0] |
|
1033 | ga0 = bids[mergestatemod.ACTION_GET][0] | |
1034 | if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]): |
|
1034 | if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]): | |
1035 | repo.ui.note(_(b" %s: picking 'get' action\n") % f) |
|
1035 | repo.ui.note(_(b" %s: picking 'get' action\n") % f) | |
1036 | actions[f] = ga0 |
|
1036 | actions[f] = ga0 | |
1037 | continue |
|
1037 | continue | |
1038 | # TODO: Consider other simple actions such as mode changes |
|
1038 | # TODO: Consider other simple actions such as mode changes | |
1039 | # Handle inefficient democrazy. |
|
1039 | # Handle inefficient democrazy. | |
1040 | repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f) |
|
1040 | repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f) | |
1041 | for m, l in sorted(bids.items()): |
|
1041 | for m, l in sorted(bids.items()): | |
1042 | for _f, args, msg in l: |
|
1042 | for _f, args, msg in l: | |
1043 | repo.ui.note(b' %s -> %s\n' % (msg, m)) |
|
1043 | repo.ui.note(b' %s -> %s\n' % (msg, m)) | |
1044 | # Pick random action. TODO: Instead, prompt user when resolving |
|
1044 | # Pick random action. TODO: Instead, prompt user when resolving | |
1045 | m, l = list(bids.items())[0] |
|
1045 | m, l = list(bids.items())[0] | |
1046 | repo.ui.warn( |
|
1046 | repo.ui.warn( | |
1047 | _(b' %s: ambiguous merge - picked %s action\n') % (f, m) |
|
1047 | _(b' %s: ambiguous merge - picked %s action\n') % (f, m) | |
1048 | ) |
|
1048 | ) | |
1049 | actions[f] = l[0] |
|
1049 | actions[f] = l[0] | |
1050 | continue |
|
1050 | continue | |
1051 | repo.ui.note(_(b'end of auction\n\n')) |
|
1051 | repo.ui.note(_(b'end of auction\n\n')) | |
1052 | # TODO: think about commitinfo when bid merge is used |
|
1052 | # TODO: think about commitinfo when bid merge is used | |
1053 | mresult = mergeresult(actions, diverge, renamedelete, {}) |
|
1053 | mresult = mergeresult(actions, diverge, renamedelete, {}) | |
1054 |
|
1054 | |||
1055 | if wctx.rev() is None: |
|
1055 | if wctx.rev() is None: | |
1056 | fractions = _forgetremoved(wctx, mctx, branchmerge) |
|
1056 | fractions = _forgetremoved(wctx, mctx, branchmerge) | |
1057 | mresult.actions.update(fractions) |
|
1057 | mresult.actions.update(fractions) | |
1058 |
|
1058 | |||
1059 | prunedactions = sparse.filterupdatesactions( |
|
1059 | prunedactions = sparse.filterupdatesactions( | |
1060 | repo, wctx, mctx, branchmerge, mresult.actions |
|
1060 | repo, wctx, mctx, branchmerge, mresult.actions | |
1061 | ) |
|
1061 | ) | |
1062 | _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult.actions) |
|
1062 | _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult.actions) | |
1063 |
|
1063 | |||
1064 | mresult.setactions(prunedactions) |
|
1064 | mresult.setactions(prunedactions) | |
1065 | return mresult |
|
1065 | return mresult | |
1066 |
|
1066 | |||
1067 |
|
1067 | |||
1068 | def _getcwd(): |
|
1068 | def _getcwd(): | |
1069 | try: |
|
1069 | try: | |
1070 | return encoding.getcwd() |
|
1070 | return encoding.getcwd() | |
1071 | except OSError as err: |
|
1071 | except OSError as err: | |
1072 | if err.errno == errno.ENOENT: |
|
1072 | if err.errno == errno.ENOENT: | |
1073 | return None |
|
1073 | return None | |
1074 | raise |
|
1074 | raise | |
1075 |
|
1075 | |||
1076 |
|
1076 | |||
1077 | def batchremove(repo, wctx, actions): |
|
1077 | def batchremove(repo, wctx, actions): | |
1078 | """apply removes to the working directory |
|
1078 | """apply removes to the working directory | |
1079 |
|
1079 | |||
1080 | yields tuples for progress updates |
|
1080 | yields tuples for progress updates | |
1081 | """ |
|
1081 | """ | |
1082 | verbose = repo.ui.verbose |
|
1082 | verbose = repo.ui.verbose | |
1083 | cwd = _getcwd() |
|
1083 | cwd = _getcwd() | |
1084 | i = 0 |
|
1084 | i = 0 | |
1085 | for f, args, msg in actions: |
|
1085 | for f, args, msg in actions: | |
1086 | repo.ui.debug(b" %s: %s -> r\n" % (f, msg)) |
|
1086 | repo.ui.debug(b" %s: %s -> r\n" % (f, msg)) | |
1087 | if verbose: |
|
1087 | if verbose: | |
1088 | repo.ui.note(_(b"removing %s\n") % f) |
|
1088 | repo.ui.note(_(b"removing %s\n") % f) | |
1089 | wctx[f].audit() |
|
1089 | wctx[f].audit() | |
1090 | try: |
|
1090 | try: | |
1091 | wctx[f].remove(ignoremissing=True) |
|
1091 | wctx[f].remove(ignoremissing=True) | |
1092 | except OSError as inst: |
|
1092 | except OSError as inst: | |
1093 | repo.ui.warn( |
|
1093 | repo.ui.warn( | |
1094 | _(b"update failed to remove %s: %s!\n") % (f, inst.strerror) |
|
1094 | _(b"update failed to remove %s: %s!\n") % (f, inst.strerror) | |
1095 | ) |
|
1095 | ) | |
1096 | if i == 100: |
|
1096 | if i == 100: | |
1097 | yield i, f |
|
1097 | yield i, f | |
1098 | i = 0 |
|
1098 | i = 0 | |
1099 | i += 1 |
|
1099 | i += 1 | |
1100 | if i > 0: |
|
1100 | if i > 0: | |
1101 | yield i, f |
|
1101 | yield i, f | |
1102 |
|
1102 | |||
1103 | if cwd and not _getcwd(): |
|
1103 | if cwd and not _getcwd(): | |
1104 | # cwd was removed in the course of removing files; print a helpful |
|
1104 | # cwd was removed in the course of removing files; print a helpful | |
1105 | # warning. |
|
1105 | # warning. | |
1106 | repo.ui.warn( |
|
1106 | repo.ui.warn( | |
1107 | _( |
|
1107 | _( | |
1108 | b"current directory was removed\n" |
|
1108 | b"current directory was removed\n" | |
1109 | b"(consider changing to repo root: %s)\n" |
|
1109 | b"(consider changing to repo root: %s)\n" | |
1110 | ) |
|
1110 | ) | |
1111 | % repo.root |
|
1111 | % repo.root | |
1112 | ) |
|
1112 | ) | |
1113 |
|
1113 | |||
1114 |
|
1114 | |||
1115 | def batchget(repo, mctx, wctx, wantfiledata, actions): |
|
1115 | def batchget(repo, mctx, wctx, wantfiledata, actions): | |
1116 | """apply gets to the working directory |
|
1116 | """apply gets to the working directory | |
1117 |
|
1117 | |||
1118 | mctx is the context to get from |
|
1118 | mctx is the context to get from | |
1119 |
|
1119 | |||
1120 | Yields arbitrarily many (False, tuple) for progress updates, followed by |
|
1120 | Yields arbitrarily many (False, tuple) for progress updates, followed by | |
1121 | exactly one (True, filedata). When wantfiledata is false, filedata is an |
|
1121 | exactly one (True, filedata). When wantfiledata is false, filedata is an | |
1122 | empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size, |
|
1122 | empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size, | |
1123 | mtime) of the file f written for each action. |
|
1123 | mtime) of the file f written for each action. | |
1124 | """ |
|
1124 | """ | |
1125 | filedata = {} |
|
1125 | filedata = {} | |
1126 | verbose = repo.ui.verbose |
|
1126 | verbose = repo.ui.verbose | |
1127 | fctx = mctx.filectx |
|
1127 | fctx = mctx.filectx | |
1128 | ui = repo.ui |
|
1128 | ui = repo.ui | |
1129 | i = 0 |
|
1129 | i = 0 | |
1130 | with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)): |
|
1130 | with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)): | |
1131 | for f, (flags, backup), msg in actions: |
|
1131 | for f, (flags, backup), msg in actions: | |
1132 | repo.ui.debug(b" %s: %s -> g\n" % (f, msg)) |
|
1132 | repo.ui.debug(b" %s: %s -> g\n" % (f, msg)) | |
1133 | if verbose: |
|
1133 | if verbose: | |
1134 | repo.ui.note(_(b"getting %s\n") % f) |
|
1134 | repo.ui.note(_(b"getting %s\n") % f) | |
1135 |
|
1135 | |||
1136 | if backup: |
|
1136 | if backup: | |
1137 | # If a file or directory exists with the same name, back that |
|
1137 | # If a file or directory exists with the same name, back that | |
1138 | # up. Otherwise, look to see if there is a file that conflicts |
|
1138 | # up. Otherwise, look to see if there is a file that conflicts | |
1139 | # with a directory this file is in, and if so, back that up. |
|
1139 | # with a directory this file is in, and if so, back that up. | |
1140 | conflicting = f |
|
1140 | conflicting = f | |
1141 | if not repo.wvfs.lexists(f): |
|
1141 | if not repo.wvfs.lexists(f): | |
1142 | for p in pathutil.finddirs(f): |
|
1142 | for p in pathutil.finddirs(f): | |
1143 | if repo.wvfs.isfileorlink(p): |
|
1143 | if repo.wvfs.isfileorlink(p): | |
1144 | conflicting = p |
|
1144 | conflicting = p | |
1145 | break |
|
1145 | break | |
1146 | if repo.wvfs.lexists(conflicting): |
|
1146 | if repo.wvfs.lexists(conflicting): | |
1147 | orig = scmutil.backuppath(ui, repo, conflicting) |
|
1147 | orig = scmutil.backuppath(ui, repo, conflicting) | |
1148 | util.rename(repo.wjoin(conflicting), orig) |
|
1148 | util.rename(repo.wjoin(conflicting), orig) | |
1149 | wfctx = wctx[f] |
|
1149 | wfctx = wctx[f] | |
1150 | wfctx.clearunknown() |
|
1150 | wfctx.clearunknown() | |
1151 | atomictemp = ui.configbool(b"experimental", b"update.atomic-file") |
|
1151 | atomictemp = ui.configbool(b"experimental", b"update.atomic-file") | |
1152 | size = wfctx.write( |
|
1152 | size = wfctx.write( | |
1153 | fctx(f).data(), |
|
1153 | fctx(f).data(), | |
1154 | flags, |
|
1154 | flags, | |
1155 | backgroundclose=True, |
|
1155 | backgroundclose=True, | |
1156 | atomictemp=atomictemp, |
|
1156 | atomictemp=atomictemp, | |
1157 | ) |
|
1157 | ) | |
1158 | if wantfiledata: |
|
1158 | if wantfiledata: | |
1159 | s = wfctx.lstat() |
|
1159 | s = wfctx.lstat() | |
1160 | mode = s.st_mode |
|
1160 | mode = s.st_mode | |
1161 | mtime = s[stat.ST_MTIME] |
|
1161 | mtime = s[stat.ST_MTIME] | |
1162 | filedata[f] = (mode, size, mtime) # for dirstate.normal |
|
1162 | filedata[f] = (mode, size, mtime) # for dirstate.normal | |
1163 | if i == 100: |
|
1163 | if i == 100: | |
1164 | yield False, (i, f) |
|
1164 | yield False, (i, f) | |
1165 | i = 0 |
|
1165 | i = 0 | |
1166 | i += 1 |
|
1166 | i += 1 | |
1167 | if i > 0: |
|
1167 | if i > 0: | |
1168 | yield False, (i, f) |
|
1168 | yield False, (i, f) | |
1169 | yield True, filedata |
|
1169 | yield True, filedata | |
1170 |
|
1170 | |||
1171 |
|
1171 | |||
1172 | def _prefetchfiles(repo, ctx, actions): |
|
1172 | def _prefetchfiles(repo, ctx, actions): | |
1173 | """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict |
|
1173 | """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict | |
1174 | of merge actions. ``ctx`` is the context being merged in.""" |
|
1174 | of merge actions. ``ctx`` is the context being merged in.""" | |
1175 |
|
1175 | |||
1176 | # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they |
|
1176 | # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they | |
1177 | # don't touch the context to be merged in. 'cd' is skipped, because |
|
1177 | # don't touch the context to be merged in. 'cd' is skipped, because | |
1178 | # changed/deleted never resolves to something from the remote side. |
|
1178 | # changed/deleted never resolves to something from the remote side. | |
1179 | oplist = [ |
|
1179 | oplist = [ | |
1180 | actions[a] |
|
1180 | actions[a] | |
1181 | for a in ( |
|
1181 | for a in ( | |
1182 | mergestatemod.ACTION_GET, |
|
1182 | mergestatemod.ACTION_GET, | |
1183 | mergestatemod.ACTION_DELETED_CHANGED, |
|
1183 | mergestatemod.ACTION_DELETED_CHANGED, | |
1184 | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, |
|
1184 | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, | |
1185 | mergestatemod.ACTION_MERGE, |
|
1185 | mergestatemod.ACTION_MERGE, | |
1186 | ) |
|
1186 | ) | |
1187 | ] |
|
1187 | ] | |
1188 | prefetch = scmutil.prefetchfiles |
|
1188 | prefetch = scmutil.prefetchfiles | |
1189 | matchfiles = scmutil.matchfiles |
|
1189 | matchfiles = scmutil.matchfiles | |
1190 | prefetch( |
|
1190 | prefetch( | |
1191 | repo, |
|
1191 | repo, | |
1192 | [ |
|
1192 | [ | |
1193 | ( |
|
1193 | ( | |
1194 | ctx.rev(), |
|
1194 | ctx.rev(), | |
1195 | matchfiles( |
|
1195 | matchfiles( | |
1196 | repo, [f for sublist in oplist for f, args, msg in sublist] |
|
1196 | repo, [f for sublist in oplist for f, args, msg in sublist] | |
1197 | ), |
|
1197 | ), | |
1198 | ) |
|
1198 | ) | |
1199 | ], |
|
1199 | ], | |
1200 | ) |
|
1200 | ) | |
1201 |
|
1201 | |||
1202 |
|
1202 | |||
1203 | @attr.s(frozen=True) |
|
1203 | @attr.s(frozen=True) | |
1204 | class updateresult(object): |
|
1204 | class updateresult(object): | |
1205 | updatedcount = attr.ib() |
|
1205 | updatedcount = attr.ib() | |
1206 | mergedcount = attr.ib() |
|
1206 | mergedcount = attr.ib() | |
1207 | removedcount = attr.ib() |
|
1207 | removedcount = attr.ib() | |
1208 | unresolvedcount = attr.ib() |
|
1208 | unresolvedcount = attr.ib() | |
1209 |
|
1209 | |||
1210 | def isempty(self): |
|
1210 | def isempty(self): | |
1211 | return not ( |
|
1211 | return not ( | |
1212 | self.updatedcount |
|
1212 | self.updatedcount | |
1213 | or self.mergedcount |
|
1213 | or self.mergedcount | |
1214 | or self.removedcount |
|
1214 | or self.removedcount | |
1215 | or self.unresolvedcount |
|
1215 | or self.unresolvedcount | |
1216 | ) |
|
1216 | ) | |
1217 |
|
1217 | |||
1218 |
|
1218 | |||
1219 | def emptyactions(): |
|
1219 | def emptyactions(): | |
1220 | """create an actions dict, to be populated and passed to applyupdates()""" |
|
1220 | """create an actions dict, to be populated and passed to applyupdates()""" | |
1221 | return { |
|
1221 | return { | |
1222 | m: [] |
|
1222 | m: [] | |
1223 | for m in ( |
|
1223 | for m in ( | |
1224 | mergestatemod.ACTION_ADD, |
|
1224 | mergestatemod.ACTION_ADD, | |
1225 | mergestatemod.ACTION_ADD_MODIFIED, |
|
1225 | mergestatemod.ACTION_ADD_MODIFIED, | |
1226 | mergestatemod.ACTION_FORGET, |
|
1226 | mergestatemod.ACTION_FORGET, | |
1227 | mergestatemod.ACTION_GET, |
|
1227 | mergestatemod.ACTION_GET, | |
1228 | mergestatemod.ACTION_CHANGED_DELETED, |
|
1228 | mergestatemod.ACTION_CHANGED_DELETED, | |
1229 | mergestatemod.ACTION_DELETED_CHANGED, |
|
1229 | mergestatemod.ACTION_DELETED_CHANGED, | |
1230 | mergestatemod.ACTION_REMOVE, |
|
1230 | mergestatemod.ACTION_REMOVE, | |
1231 | mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL, |
|
1231 | mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL, | |
1232 | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, |
|
1232 | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, | |
1233 | mergestatemod.ACTION_MERGE, |
|
1233 | mergestatemod.ACTION_MERGE, | |
1234 | mergestatemod.ACTION_EXEC, |
|
1234 | mergestatemod.ACTION_EXEC, | |
1235 | mergestatemod.ACTION_KEEP, |
|
1235 | mergestatemod.ACTION_KEEP, | |
1236 | mergestatemod.ACTION_PATH_CONFLICT, |
|
1236 | mergestatemod.ACTION_PATH_CONFLICT, | |
1237 | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, |
|
1237 | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, | |
1238 | mergestatemod.ACTION_GET_OTHER_AND_STORE, |
|
1238 | mergestatemod.ACTION_GET_OTHER_AND_STORE, | |
1239 | ) |
|
1239 | ) | |
1240 | } |
|
1240 | } | |
1241 |
|
1241 | |||
1242 |
|
1242 | |||
1243 | def applyupdates( |
|
1243 | def applyupdates( | |
1244 | repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None |
|
1244 | repo, | |
|
1245 | actions, | |||
|
1246 | wctx, | |||
|
1247 | mctx, | |||
|
1248 | overwrite, | |||
|
1249 | wantfiledata, | |||
|
1250 | labels=None, | |||
|
1251 | commitinfo=None, | |||
1245 | ): |
|
1252 | ): | |
1246 | """apply the merge action list to the working directory |
|
1253 | """apply the merge action list to the working directory | |
1247 |
|
1254 | |||
1248 | wctx is the working copy context |
|
1255 | wctx is the working copy context | |
1249 | mctx is the context to be merged into the working copy |
|
1256 | mctx is the context to be merged into the working copy | |
|
1257 | commitinfo is a mapping of information which needs to be stored somewhere | |||
|
1258 | (probably mergestate) so that it can be used at commit time. | |||
1250 |
|
1259 | |||
1251 | Return a tuple of (counts, filedata), where counts is a tuple |
|
1260 | Return a tuple of (counts, filedata), where counts is a tuple | |
1252 | (updated, merged, removed, unresolved) that describes how many |
|
1261 | (updated, merged, removed, unresolved) that describes how many | |
1253 | files were affected by the update, and filedata is as described in |
|
1262 | files were affected by the update, and filedata is as described in | |
1254 | batchget. |
|
1263 | batchget. | |
1255 | """ |
|
1264 | """ | |
1256 |
|
1265 | |||
1257 | _prefetchfiles(repo, mctx, actions) |
|
1266 | _prefetchfiles(repo, mctx, actions) | |
1258 |
|
1267 | |||
1259 | updated, merged, removed = 0, 0, 0 |
|
1268 | updated, merged, removed = 0, 0, 0 | |
1260 | ms = mergestatemod.mergestate.clean( |
|
1269 | ms = mergestatemod.mergestate.clean( | |
1261 | repo, wctx.p1().node(), mctx.node(), labels |
|
1270 | repo, wctx.p1().node(), mctx.node(), labels | |
1262 | ) |
|
1271 | ) | |
1263 |
|
1272 | |||
|
1273 | if commitinfo is None: | |||
|
1274 | commitinfo = {} | |||
|
1275 | ||||
|
1276 | for f, op in pycompat.iteritems(commitinfo): | |||
|
1277 | # the other side of filenode was choosen while merging, store this in | |||
|
1278 | # mergestate so that it can be reused on commit | |||
|
1279 | if op == b'other': | |||
|
1280 | ms.addmergedother(f) | |||
|
1281 | ||||
1264 | # add ACTION_GET_OTHER_AND_STORE to mergestate |
|
1282 | # add ACTION_GET_OTHER_AND_STORE to mergestate | |
1265 | for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]: |
|
1283 | for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]: | |
1266 | ms.addmergedother(e[0]) |
|
1284 | ms.addmergedother(e[0]) | |
1267 |
|
1285 | |||
1268 | moves = [] |
|
1286 | moves = [] | |
1269 | for m, l in actions.items(): |
|
1287 | for m, l in actions.items(): | |
1270 | l.sort() |
|
1288 | l.sort() | |
1271 |
|
1289 | |||
1272 | # 'cd' and 'dc' actions are treated like other merge conflicts |
|
1290 | # 'cd' and 'dc' actions are treated like other merge conflicts | |
1273 | mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED]) |
|
1291 | mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED]) | |
1274 | mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED])) |
|
1292 | mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED])) | |
1275 | mergeactions.extend(actions[mergestatemod.ACTION_MERGE]) |
|
1293 | mergeactions.extend(actions[mergestatemod.ACTION_MERGE]) | |
1276 | for f, args, msg in mergeactions: |
|
1294 | for f, args, msg in mergeactions: | |
1277 | f1, f2, fa, move, anc = args |
|
1295 | f1, f2, fa, move, anc = args | |
1278 | if f == b'.hgsubstate': # merged internally |
|
1296 | if f == b'.hgsubstate': # merged internally | |
1279 | continue |
|
1297 | continue | |
1280 | if f1 is None: |
|
1298 | if f1 is None: | |
1281 | fcl = filemerge.absentfilectx(wctx, fa) |
|
1299 | fcl = filemerge.absentfilectx(wctx, fa) | |
1282 | else: |
|
1300 | else: | |
1283 | repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f)) |
|
1301 | repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f)) | |
1284 | fcl = wctx[f1] |
|
1302 | fcl = wctx[f1] | |
1285 | if f2 is None: |
|
1303 | if f2 is None: | |
1286 | fco = filemerge.absentfilectx(mctx, fa) |
|
1304 | fco = filemerge.absentfilectx(mctx, fa) | |
1287 | else: |
|
1305 | else: | |
1288 | fco = mctx[f2] |
|
1306 | fco = mctx[f2] | |
1289 | actx = repo[anc] |
|
1307 | actx = repo[anc] | |
1290 | if fa in actx: |
|
1308 | if fa in actx: | |
1291 | fca = actx[fa] |
|
1309 | fca = actx[fa] | |
1292 | else: |
|
1310 | else: | |
1293 | # TODO: move to absentfilectx |
|
1311 | # TODO: move to absentfilectx | |
1294 | fca = repo.filectx(f1, fileid=nullrev) |
|
1312 | fca = repo.filectx(f1, fileid=nullrev) | |
1295 | ms.add(fcl, fco, fca, f) |
|
1313 | ms.add(fcl, fco, fca, f) | |
1296 | if f1 != f and move: |
|
1314 | if f1 != f and move: | |
1297 | moves.append(f1) |
|
1315 | moves.append(f1) | |
1298 |
|
1316 | |||
1299 | # remove renamed files after safely stored |
|
1317 | # remove renamed files after safely stored | |
1300 | for f in moves: |
|
1318 | for f in moves: | |
1301 | if wctx[f].lexists(): |
|
1319 | if wctx[f].lexists(): | |
1302 | repo.ui.debug(b"removing %s\n" % f) |
|
1320 | repo.ui.debug(b"removing %s\n" % f) | |
1303 | wctx[f].audit() |
|
1321 | wctx[f].audit() | |
1304 | wctx[f].remove() |
|
1322 | wctx[f].remove() | |
1305 |
|
1323 | |||
1306 | numupdates = sum( |
|
1324 | numupdates = sum( | |
1307 | len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP |
|
1325 | len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP | |
1308 | ) |
|
1326 | ) | |
1309 | progress = repo.ui.makeprogress( |
|
1327 | progress = repo.ui.makeprogress( | |
1310 | _(b'updating'), unit=_(b'files'), total=numupdates |
|
1328 | _(b'updating'), unit=_(b'files'), total=numupdates | |
1311 | ) |
|
1329 | ) | |
1312 |
|
1330 | |||
1313 | if [ |
|
1331 | if [ | |
1314 | a |
|
1332 | a | |
1315 | for a in actions[mergestatemod.ACTION_REMOVE] |
|
1333 | for a in actions[mergestatemod.ACTION_REMOVE] | |
1316 | if a[0] == b'.hgsubstate' |
|
1334 | if a[0] == b'.hgsubstate' | |
1317 | ]: |
|
1335 | ]: | |
1318 | subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) |
|
1336 | subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) | |
1319 |
|
1337 | |||
1320 | # record path conflicts |
|
1338 | # record path conflicts | |
1321 | for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]: |
|
1339 | for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]: | |
1322 | f1, fo = args |
|
1340 | f1, fo = args | |
1323 | s = repo.ui.status |
|
1341 | s = repo.ui.status | |
1324 | s( |
|
1342 | s( | |
1325 | _( |
|
1343 | _( | |
1326 | b"%s: path conflict - a file or link has the same name as a " |
|
1344 | b"%s: path conflict - a file or link has the same name as a " | |
1327 | b"directory\n" |
|
1345 | b"directory\n" | |
1328 | ) |
|
1346 | ) | |
1329 | % f |
|
1347 | % f | |
1330 | ) |
|
1348 | ) | |
1331 | if fo == b'l': |
|
1349 | if fo == b'l': | |
1332 | s(_(b"the local file has been renamed to %s\n") % f1) |
|
1350 | s(_(b"the local file has been renamed to %s\n") % f1) | |
1333 | else: |
|
1351 | else: | |
1334 | s(_(b"the remote file has been renamed to %s\n") % f1) |
|
1352 | s(_(b"the remote file has been renamed to %s\n") % f1) | |
1335 | s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f) |
|
1353 | s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f) | |
1336 | ms.addpathconflict(f, f1, fo) |
|
1354 | ms.addpathconflict(f, f1, fo) | |
1337 | progress.increment(item=f) |
|
1355 | progress.increment(item=f) | |
1338 |
|
1356 | |||
1339 | # When merging in-memory, we can't support worker processes, so set the |
|
1357 | # When merging in-memory, we can't support worker processes, so set the | |
1340 | # per-item cost at 0 in that case. |
|
1358 | # per-item cost at 0 in that case. | |
1341 | cost = 0 if wctx.isinmemory() else 0.001 |
|
1359 | cost = 0 if wctx.isinmemory() else 0.001 | |
1342 |
|
1360 | |||
1343 | # remove in parallel (must come before resolving path conflicts and getting) |
|
1361 | # remove in parallel (must come before resolving path conflicts and getting) | |
1344 | prog = worker.worker( |
|
1362 | prog = worker.worker( | |
1345 | repo.ui, |
|
1363 | repo.ui, | |
1346 | cost, |
|
1364 | cost, | |
1347 | batchremove, |
|
1365 | batchremove, | |
1348 | (repo, wctx), |
|
1366 | (repo, wctx), | |
1349 | actions[mergestatemod.ACTION_REMOVE], |
|
1367 | actions[mergestatemod.ACTION_REMOVE], | |
1350 | ) |
|
1368 | ) | |
1351 | for i, item in prog: |
|
1369 | for i, item in prog: | |
1352 | progress.increment(step=i, item=item) |
|
1370 | progress.increment(step=i, item=item) | |
1353 | removed = len(actions[mergestatemod.ACTION_REMOVE]) |
|
1371 | removed = len(actions[mergestatemod.ACTION_REMOVE]) | |
1354 |
|
1372 | |||
1355 | # resolve path conflicts (must come before getting) |
|
1373 | # resolve path conflicts (must come before getting) | |
1356 | for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]: |
|
1374 | for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]: | |
1357 | repo.ui.debug(b" %s: %s -> pr\n" % (f, msg)) |
|
1375 | repo.ui.debug(b" %s: %s -> pr\n" % (f, msg)) | |
1358 | (f0, origf0) = args |
|
1376 | (f0, origf0) = args | |
1359 | if wctx[f0].lexists(): |
|
1377 | if wctx[f0].lexists(): | |
1360 | repo.ui.note(_(b"moving %s to %s\n") % (f0, f)) |
|
1378 | repo.ui.note(_(b"moving %s to %s\n") % (f0, f)) | |
1361 | wctx[f].audit() |
|
1379 | wctx[f].audit() | |
1362 | wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags()) |
|
1380 | wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags()) | |
1363 | wctx[f0].remove() |
|
1381 | wctx[f0].remove() | |
1364 | progress.increment(item=f) |
|
1382 | progress.increment(item=f) | |
1365 |
|
1383 | |||
1366 | # get in parallel. |
|
1384 | # get in parallel. | |
1367 | threadsafe = repo.ui.configbool( |
|
1385 | threadsafe = repo.ui.configbool( | |
1368 | b'experimental', b'worker.wdir-get-thread-safe' |
|
1386 | b'experimental', b'worker.wdir-get-thread-safe' | |
1369 | ) |
|
1387 | ) | |
1370 | prog = worker.worker( |
|
1388 | prog = worker.worker( | |
1371 | repo.ui, |
|
1389 | repo.ui, | |
1372 | cost, |
|
1390 | cost, | |
1373 | batchget, |
|
1391 | batchget, | |
1374 | (repo, mctx, wctx, wantfiledata), |
|
1392 | (repo, mctx, wctx, wantfiledata), | |
1375 | actions[mergestatemod.ACTION_GET], |
|
1393 | actions[mergestatemod.ACTION_GET], | |
1376 | threadsafe=threadsafe, |
|
1394 | threadsafe=threadsafe, | |
1377 | hasretval=True, |
|
1395 | hasretval=True, | |
1378 | ) |
|
1396 | ) | |
1379 | getfiledata = {} |
|
1397 | getfiledata = {} | |
1380 | for final, res in prog: |
|
1398 | for final, res in prog: | |
1381 | if final: |
|
1399 | if final: | |
1382 | getfiledata = res |
|
1400 | getfiledata = res | |
1383 | else: |
|
1401 | else: | |
1384 | i, item = res |
|
1402 | i, item = res | |
1385 | progress.increment(step=i, item=item) |
|
1403 | progress.increment(step=i, item=item) | |
1386 | updated = len(actions[mergestatemod.ACTION_GET]) |
|
1404 | updated = len(actions[mergestatemod.ACTION_GET]) | |
1387 |
|
1405 | |||
1388 | if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']: |
|
1406 | if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']: | |
1389 | subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) |
|
1407 | subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) | |
1390 |
|
1408 | |||
1391 | # forget (manifest only, just log it) (must come first) |
|
1409 | # forget (manifest only, just log it) (must come first) | |
1392 | for f, args, msg in actions[mergestatemod.ACTION_FORGET]: |
|
1410 | for f, args, msg in actions[mergestatemod.ACTION_FORGET]: | |
1393 | repo.ui.debug(b" %s: %s -> f\n" % (f, msg)) |
|
1411 | repo.ui.debug(b" %s: %s -> f\n" % (f, msg)) | |
1394 | progress.increment(item=f) |
|
1412 | progress.increment(item=f) | |
1395 |
|
1413 | |||
1396 | # re-add (manifest only, just log it) |
|
1414 | # re-add (manifest only, just log it) | |
1397 | for f, args, msg in actions[mergestatemod.ACTION_ADD]: |
|
1415 | for f, args, msg in actions[mergestatemod.ACTION_ADD]: | |
1398 | repo.ui.debug(b" %s: %s -> a\n" % (f, msg)) |
|
1416 | repo.ui.debug(b" %s: %s -> a\n" % (f, msg)) | |
1399 | progress.increment(item=f) |
|
1417 | progress.increment(item=f) | |
1400 |
|
1418 | |||
1401 | # re-add/mark as modified (manifest only, just log it) |
|
1419 | # re-add/mark as modified (manifest only, just log it) | |
1402 | for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]: |
|
1420 | for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]: | |
1403 | repo.ui.debug(b" %s: %s -> am\n" % (f, msg)) |
|
1421 | repo.ui.debug(b" %s: %s -> am\n" % (f, msg)) | |
1404 | progress.increment(item=f) |
|
1422 | progress.increment(item=f) | |
1405 |
|
1423 | |||
1406 | # keep (noop, just log it) |
|
1424 | # keep (noop, just log it) | |
1407 | for f, args, msg in actions[mergestatemod.ACTION_KEEP]: |
|
1425 | for f, args, msg in actions[mergestatemod.ACTION_KEEP]: | |
1408 | repo.ui.debug(b" %s: %s -> k\n" % (f, msg)) |
|
1426 | repo.ui.debug(b" %s: %s -> k\n" % (f, msg)) | |
1409 | # no progress |
|
1427 | # no progress | |
1410 |
|
1428 | |||
1411 | # directory rename, move local |
|
1429 | # directory rename, move local | |
1412 | for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]: |
|
1430 | for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]: | |
1413 | repo.ui.debug(b" %s: %s -> dm\n" % (f, msg)) |
|
1431 | repo.ui.debug(b" %s: %s -> dm\n" % (f, msg)) | |
1414 | progress.increment(item=f) |
|
1432 | progress.increment(item=f) | |
1415 | f0, flags = args |
|
1433 | f0, flags = args | |
1416 | repo.ui.note(_(b"moving %s to %s\n") % (f0, f)) |
|
1434 | repo.ui.note(_(b"moving %s to %s\n") % (f0, f)) | |
1417 | wctx[f].audit() |
|
1435 | wctx[f].audit() | |
1418 | wctx[f].write(wctx.filectx(f0).data(), flags) |
|
1436 | wctx[f].write(wctx.filectx(f0).data(), flags) | |
1419 | wctx[f0].remove() |
|
1437 | wctx[f0].remove() | |
1420 | updated += 1 |
|
1438 | updated += 1 | |
1421 |
|
1439 | |||
1422 | # local directory rename, get |
|
1440 | # local directory rename, get | |
1423 | for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]: |
|
1441 | for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]: | |
1424 | repo.ui.debug(b" %s: %s -> dg\n" % (f, msg)) |
|
1442 | repo.ui.debug(b" %s: %s -> dg\n" % (f, msg)) | |
1425 | progress.increment(item=f) |
|
1443 | progress.increment(item=f) | |
1426 | f0, flags = args |
|
1444 | f0, flags = args | |
1427 | repo.ui.note(_(b"getting %s to %s\n") % (f0, f)) |
|
1445 | repo.ui.note(_(b"getting %s to %s\n") % (f0, f)) | |
1428 | wctx[f].write(mctx.filectx(f0).data(), flags) |
|
1446 | wctx[f].write(mctx.filectx(f0).data(), flags) | |
1429 | updated += 1 |
|
1447 | updated += 1 | |
1430 |
|
1448 | |||
1431 | # exec |
|
1449 | # exec | |
1432 | for f, args, msg in actions[mergestatemod.ACTION_EXEC]: |
|
1450 | for f, args, msg in actions[mergestatemod.ACTION_EXEC]: | |
1433 | repo.ui.debug(b" %s: %s -> e\n" % (f, msg)) |
|
1451 | repo.ui.debug(b" %s: %s -> e\n" % (f, msg)) | |
1434 | progress.increment(item=f) |
|
1452 | progress.increment(item=f) | |
1435 | (flags,) = args |
|
1453 | (flags,) = args | |
1436 | wctx[f].audit() |
|
1454 | wctx[f].audit() | |
1437 | wctx[f].setflags(b'l' in flags, b'x' in flags) |
|
1455 | wctx[f].setflags(b'l' in flags, b'x' in flags) | |
1438 | updated += 1 |
|
1456 | updated += 1 | |
1439 |
|
1457 | |||
1440 | # the ordering is important here -- ms.mergedriver will raise if the merge |
|
1458 | # the ordering is important here -- ms.mergedriver will raise if the merge | |
1441 | # driver has changed, and we want to be able to bypass it when overwrite is |
|
1459 | # driver has changed, and we want to be able to bypass it when overwrite is | |
1442 | # True |
|
1460 | # True | |
1443 | usemergedriver = not overwrite and mergeactions and ms.mergedriver |
|
1461 | usemergedriver = not overwrite and mergeactions and ms.mergedriver | |
1444 |
|
1462 | |||
1445 | if usemergedriver: |
|
1463 | if usemergedriver: | |
1446 | if wctx.isinmemory(): |
|
1464 | if wctx.isinmemory(): | |
1447 | raise error.InMemoryMergeConflictsError( |
|
1465 | raise error.InMemoryMergeConflictsError( | |
1448 | b"in-memory merge does not support mergedriver" |
|
1466 | b"in-memory merge does not support mergedriver" | |
1449 | ) |
|
1467 | ) | |
1450 | ms.commit() |
|
1468 | ms.commit() | |
1451 | proceed = driverpreprocess(repo, ms, wctx, labels=labels) |
|
1469 | proceed = driverpreprocess(repo, ms, wctx, labels=labels) | |
1452 | # the driver might leave some files unresolved |
|
1470 | # the driver might leave some files unresolved | |
1453 | unresolvedf = set(ms.unresolved()) |
|
1471 | unresolvedf = set(ms.unresolved()) | |
1454 | if not proceed: |
|
1472 | if not proceed: | |
1455 | # XXX setting unresolved to at least 1 is a hack to make sure we |
|
1473 | # XXX setting unresolved to at least 1 is a hack to make sure we | |
1456 | # error out |
|
1474 | # error out | |
1457 | return updateresult( |
|
1475 | return updateresult( | |
1458 | updated, merged, removed, max(len(unresolvedf), 1) |
|
1476 | updated, merged, removed, max(len(unresolvedf), 1) | |
1459 | ) |
|
1477 | ) | |
1460 | newactions = [] |
|
1478 | newactions = [] | |
1461 | for f, args, msg in mergeactions: |
|
1479 | for f, args, msg in mergeactions: | |
1462 | if f in unresolvedf: |
|
1480 | if f in unresolvedf: | |
1463 | newactions.append((f, args, msg)) |
|
1481 | newactions.append((f, args, msg)) | |
1464 | mergeactions = newactions |
|
1482 | mergeactions = newactions | |
1465 |
|
1483 | |||
1466 | try: |
|
1484 | try: | |
1467 | # premerge |
|
1485 | # premerge | |
1468 | tocomplete = [] |
|
1486 | tocomplete = [] | |
1469 | for f, args, msg in mergeactions: |
|
1487 | for f, args, msg in mergeactions: | |
1470 | repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg)) |
|
1488 | repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg)) | |
1471 | progress.increment(item=f) |
|
1489 | progress.increment(item=f) | |
1472 | if f == b'.hgsubstate': # subrepo states need updating |
|
1490 | if f == b'.hgsubstate': # subrepo states need updating | |
1473 | subrepoutil.submerge( |
|
1491 | subrepoutil.submerge( | |
1474 | repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels |
|
1492 | repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels | |
1475 | ) |
|
1493 | ) | |
1476 | continue |
|
1494 | continue | |
1477 | wctx[f].audit() |
|
1495 | wctx[f].audit() | |
1478 | complete, r = ms.preresolve(f, wctx) |
|
1496 | complete, r = ms.preresolve(f, wctx) | |
1479 | if not complete: |
|
1497 | if not complete: | |
1480 | numupdates += 1 |
|
1498 | numupdates += 1 | |
1481 | tocomplete.append((f, args, msg)) |
|
1499 | tocomplete.append((f, args, msg)) | |
1482 |
|
1500 | |||
1483 | # merge |
|
1501 | # merge | |
1484 | for f, args, msg in tocomplete: |
|
1502 | for f, args, msg in tocomplete: | |
1485 | repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg)) |
|
1503 | repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg)) | |
1486 | progress.increment(item=f, total=numupdates) |
|
1504 | progress.increment(item=f, total=numupdates) | |
1487 | ms.resolve(f, wctx) |
|
1505 | ms.resolve(f, wctx) | |
1488 |
|
1506 | |||
1489 | finally: |
|
1507 | finally: | |
1490 | ms.commit() |
|
1508 | ms.commit() | |
1491 |
|
1509 | |||
1492 | unresolved = ms.unresolvedcount() |
|
1510 | unresolved = ms.unresolvedcount() | |
1493 |
|
1511 | |||
1494 | if ( |
|
1512 | if ( | |
1495 | usemergedriver |
|
1513 | usemergedriver | |
1496 | and not unresolved |
|
1514 | and not unresolved | |
1497 | and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS |
|
1515 | and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS | |
1498 | ): |
|
1516 | ): | |
1499 | if not driverconclude(repo, ms, wctx, labels=labels): |
|
1517 | if not driverconclude(repo, ms, wctx, labels=labels): | |
1500 | # XXX setting unresolved to at least 1 is a hack to make sure we |
|
1518 | # XXX setting unresolved to at least 1 is a hack to make sure we | |
1501 | # error out |
|
1519 | # error out | |
1502 | unresolved = max(unresolved, 1) |
|
1520 | unresolved = max(unresolved, 1) | |
1503 |
|
1521 | |||
1504 | ms.commit() |
|
1522 | ms.commit() | |
1505 |
|
1523 | |||
1506 | msupdated, msmerged, msremoved = ms.counts() |
|
1524 | msupdated, msmerged, msremoved = ms.counts() | |
1507 | updated += msupdated |
|
1525 | updated += msupdated | |
1508 | merged += msmerged |
|
1526 | merged += msmerged | |
1509 | removed += msremoved |
|
1527 | removed += msremoved | |
1510 |
|
1528 | |||
1511 | extraactions = ms.actions() |
|
1529 | extraactions = ms.actions() | |
1512 | if extraactions: |
|
1530 | if extraactions: | |
1513 | mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]} |
|
1531 | mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]} | |
1514 | for k, acts in pycompat.iteritems(extraactions): |
|
1532 | for k, acts in pycompat.iteritems(extraactions): | |
1515 | actions[k].extend(acts) |
|
1533 | actions[k].extend(acts) | |
1516 | if k == mergestatemod.ACTION_GET and wantfiledata: |
|
1534 | if k == mergestatemod.ACTION_GET and wantfiledata: | |
1517 | # no filedata until mergestate is updated to provide it |
|
1535 | # no filedata until mergestate is updated to provide it | |
1518 | for a in acts: |
|
1536 | for a in acts: | |
1519 | getfiledata[a[0]] = None |
|
1537 | getfiledata[a[0]] = None | |
1520 | # Remove these files from actions[ACTION_MERGE] as well. This is |
|
1538 | # Remove these files from actions[ACTION_MERGE] as well. This is | |
1521 | # important because in recordupdates, files in actions[ACTION_MERGE] |
|
1539 | # important because in recordupdates, files in actions[ACTION_MERGE] | |
1522 | # are processed after files in other actions, and the merge driver |
|
1540 | # are processed after files in other actions, and the merge driver | |
1523 | # might add files to those actions via extraactions above. This can |
|
1541 | # might add files to those actions via extraactions above. This can | |
1524 | # lead to a file being recorded twice, with poor results. This is |
|
1542 | # lead to a file being recorded twice, with poor results. This is | |
1525 | # especially problematic for actions[ACTION_REMOVE] (currently only |
|
1543 | # especially problematic for actions[ACTION_REMOVE] (currently only | |
1526 | # possible with the merge driver in the initial merge process; |
|
1544 | # possible with the merge driver in the initial merge process; | |
1527 | # interrupted merges don't go through this flow). |
|
1545 | # interrupted merges don't go through this flow). | |
1528 | # |
|
1546 | # | |
1529 | # The real fix here is to have indexes by both file and action so |
|
1547 | # The real fix here is to have indexes by both file and action so | |
1530 | # that when the action for a file is changed it is automatically |
|
1548 | # that when the action for a file is changed it is automatically | |
1531 | # reflected in the other action lists. But that involves a more |
|
1549 | # reflected in the other action lists. But that involves a more | |
1532 | # complex data structure, so this will do for now. |
|
1550 | # complex data structure, so this will do for now. | |
1533 | # |
|
1551 | # | |
1534 | # We don't need to do the same operation for 'dc' and 'cd' because |
|
1552 | # We don't need to do the same operation for 'dc' and 'cd' because | |
1535 | # those lists aren't consulted again. |
|
1553 | # those lists aren't consulted again. | |
1536 | mfiles.difference_update(a[0] for a in acts) |
|
1554 | mfiles.difference_update(a[0] for a in acts) | |
1537 |
|
1555 | |||
1538 | actions[mergestatemod.ACTION_MERGE] = [ |
|
1556 | actions[mergestatemod.ACTION_MERGE] = [ | |
1539 | a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles |
|
1557 | a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles | |
1540 | ] |
|
1558 | ] | |
1541 |
|
1559 | |||
1542 | progress.complete() |
|
1560 | progress.complete() | |
1543 | assert len(getfiledata) == ( |
|
1561 | assert len(getfiledata) == ( | |
1544 | len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0 |
|
1562 | len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0 | |
1545 | ) |
|
1563 | ) | |
1546 | return updateresult(updated, merged, removed, unresolved), getfiledata |
|
1564 | return updateresult(updated, merged, removed, unresolved), getfiledata | |
1547 |
|
1565 | |||
1548 |
|
1566 | |||
1549 | def _advertisefsmonitor(repo, num_gets, p1node): |
|
1567 | def _advertisefsmonitor(repo, num_gets, p1node): | |
1550 | # Advertise fsmonitor when its presence could be useful. |
|
1568 | # Advertise fsmonitor when its presence could be useful. | |
1551 | # |
|
1569 | # | |
1552 | # We only advertise when performing an update from an empty working |
|
1570 | # We only advertise when performing an update from an empty working | |
1553 | # directory. This typically only occurs during initial clone. |
|
1571 | # directory. This typically only occurs during initial clone. | |
1554 | # |
|
1572 | # | |
1555 | # We give users a mechanism to disable the warning in case it is |
|
1573 | # We give users a mechanism to disable the warning in case it is | |
1556 | # annoying. |
|
1574 | # annoying. | |
1557 | # |
|
1575 | # | |
1558 | # We only allow on Linux and MacOS because that's where fsmonitor is |
|
1576 | # We only allow on Linux and MacOS because that's where fsmonitor is | |
1559 | # considered stable. |
|
1577 | # considered stable. | |
1560 | fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused') |
|
1578 | fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused') | |
1561 | fsmonitorthreshold = repo.ui.configint( |
|
1579 | fsmonitorthreshold = repo.ui.configint( | |
1562 | b'fsmonitor', b'warn_update_file_count' |
|
1580 | b'fsmonitor', b'warn_update_file_count' | |
1563 | ) |
|
1581 | ) | |
1564 | try: |
|
1582 | try: | |
1565 | # avoid cycle: extensions -> cmdutil -> merge |
|
1583 | # avoid cycle: extensions -> cmdutil -> merge | |
1566 | from . import extensions |
|
1584 | from . import extensions | |
1567 |
|
1585 | |||
1568 | extensions.find(b'fsmonitor') |
|
1586 | extensions.find(b'fsmonitor') | |
1569 | fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off' |
|
1587 | fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off' | |
1570 | # We intentionally don't look at whether fsmonitor has disabled |
|
1588 | # We intentionally don't look at whether fsmonitor has disabled | |
1571 | # itself because a) fsmonitor may have already printed a warning |
|
1589 | # itself because a) fsmonitor may have already printed a warning | |
1572 | # b) we only care about the config state here. |
|
1590 | # b) we only care about the config state here. | |
1573 | except KeyError: |
|
1591 | except KeyError: | |
1574 | fsmonitorenabled = False |
|
1592 | fsmonitorenabled = False | |
1575 |
|
1593 | |||
1576 | if ( |
|
1594 | if ( | |
1577 | fsmonitorwarning |
|
1595 | fsmonitorwarning | |
1578 | and not fsmonitorenabled |
|
1596 | and not fsmonitorenabled | |
1579 | and p1node == nullid |
|
1597 | and p1node == nullid | |
1580 | and num_gets >= fsmonitorthreshold |
|
1598 | and num_gets >= fsmonitorthreshold | |
1581 | and pycompat.sysplatform.startswith((b'linux', b'darwin')) |
|
1599 | and pycompat.sysplatform.startswith((b'linux', b'darwin')) | |
1582 | ): |
|
1600 | ): | |
1583 | repo.ui.warn( |
|
1601 | repo.ui.warn( | |
1584 | _( |
|
1602 | _( | |
1585 | b'(warning: large working directory being used without ' |
|
1603 | b'(warning: large working directory being used without ' | |
1586 | b'fsmonitor enabled; enable fsmonitor to improve performance; ' |
|
1604 | b'fsmonitor enabled; enable fsmonitor to improve performance; ' | |
1587 | b'see "hg help -e fsmonitor")\n' |
|
1605 | b'see "hg help -e fsmonitor")\n' | |
1588 | ) |
|
1606 | ) | |
1589 | ) |
|
1607 | ) | |
1590 |
|
1608 | |||
1591 |
|
1609 | |||
1592 | UPDATECHECK_ABORT = b'abort' # handled at higher layers |
|
1610 | UPDATECHECK_ABORT = b'abort' # handled at higher layers | |
1593 | UPDATECHECK_NONE = b'none' |
|
1611 | UPDATECHECK_NONE = b'none' | |
1594 | UPDATECHECK_LINEAR = b'linear' |
|
1612 | UPDATECHECK_LINEAR = b'linear' | |
1595 | UPDATECHECK_NO_CONFLICT = b'noconflict' |
|
1613 | UPDATECHECK_NO_CONFLICT = b'noconflict' | |
1596 |
|
1614 | |||
1597 |
|
1615 | |||
1598 | def update( |
|
1616 | def update( | |
1599 | repo, |
|
1617 | repo, | |
1600 | node, |
|
1618 | node, | |
1601 | branchmerge, |
|
1619 | branchmerge, | |
1602 | force, |
|
1620 | force, | |
1603 | ancestor=None, |
|
1621 | ancestor=None, | |
1604 | mergeancestor=False, |
|
1622 | mergeancestor=False, | |
1605 | labels=None, |
|
1623 | labels=None, | |
1606 | matcher=None, |
|
1624 | matcher=None, | |
1607 | mergeforce=False, |
|
1625 | mergeforce=False, | |
1608 | updatedirstate=True, |
|
1626 | updatedirstate=True, | |
1609 | updatecheck=None, |
|
1627 | updatecheck=None, | |
1610 | wc=None, |
|
1628 | wc=None, | |
1611 | ): |
|
1629 | ): | |
1612 | """ |
|
1630 | """ | |
1613 | Perform a merge between the working directory and the given node |
|
1631 | Perform a merge between the working directory and the given node | |
1614 |
|
1632 | |||
1615 | node = the node to update to |
|
1633 | node = the node to update to | |
1616 | branchmerge = whether to merge between branches |
|
1634 | branchmerge = whether to merge between branches | |
1617 | force = whether to force branch merging or file overwriting |
|
1635 | force = whether to force branch merging or file overwriting | |
1618 | matcher = a matcher to filter file lists (dirstate not updated) |
|
1636 | matcher = a matcher to filter file lists (dirstate not updated) | |
1619 | mergeancestor = whether it is merging with an ancestor. If true, |
|
1637 | mergeancestor = whether it is merging with an ancestor. If true, | |
1620 | we should accept the incoming changes for any prompts that occur. |
|
1638 | we should accept the incoming changes for any prompts that occur. | |
1621 | If false, merging with an ancestor (fast-forward) is only allowed |
|
1639 | If false, merging with an ancestor (fast-forward) is only allowed | |
1622 | between different named branches. This flag is used by rebase extension |
|
1640 | between different named branches. This flag is used by rebase extension | |
1623 | as a temporary fix and should be avoided in general. |
|
1641 | as a temporary fix and should be avoided in general. | |
1624 | labels = labels to use for base, local and other |
|
1642 | labels = labels to use for base, local and other | |
1625 | mergeforce = whether the merge was run with 'merge --force' (deprecated): if |
|
1643 | mergeforce = whether the merge was run with 'merge --force' (deprecated): if | |
1626 | this is True, then 'force' should be True as well. |
|
1644 | this is True, then 'force' should be True as well. | |
1627 |
|
1645 | |||
1628 | The table below shows all the behaviors of the update command given the |
|
1646 | The table below shows all the behaviors of the update command given the | |
1629 | -c/--check and -C/--clean or no options, whether the working directory is |
|
1647 | -c/--check and -C/--clean or no options, whether the working directory is | |
1630 | dirty, whether a revision is specified, and the relationship of the parent |
|
1648 | dirty, whether a revision is specified, and the relationship of the parent | |
1631 | rev to the target rev (linear or not). Match from top first. The -n |
|
1649 | rev to the target rev (linear or not). Match from top first. The -n | |
1632 | option doesn't exist on the command line, but represents the |
|
1650 | option doesn't exist on the command line, but represents the | |
1633 | experimental.updatecheck=noconflict option. |
|
1651 | experimental.updatecheck=noconflict option. | |
1634 |
|
1652 | |||
1635 | This logic is tested by test-update-branches.t. |
|
1653 | This logic is tested by test-update-branches.t. | |
1636 |
|
1654 | |||
1637 | -c -C -n -m dirty rev linear | result |
|
1655 | -c -C -n -m dirty rev linear | result | |
1638 | y y * * * * * | (1) |
|
1656 | y y * * * * * | (1) | |
1639 | y * y * * * * | (1) |
|
1657 | y * y * * * * | (1) | |
1640 | y * * y * * * | (1) |
|
1658 | y * * y * * * | (1) | |
1641 | * y y * * * * | (1) |
|
1659 | * y y * * * * | (1) | |
1642 | * y * y * * * | (1) |
|
1660 | * y * y * * * | (1) | |
1643 | * * y y * * * | (1) |
|
1661 | * * y y * * * | (1) | |
1644 | * * * * * n n | x |
|
1662 | * * * * * n n | x | |
1645 | * * * * n * * | ok |
|
1663 | * * * * n * * | ok | |
1646 | n n n n y * y | merge |
|
1664 | n n n n y * y | merge | |
1647 | n n n n y y n | (2) |
|
1665 | n n n n y y n | (2) | |
1648 | n n n y y * * | merge |
|
1666 | n n n y y * * | merge | |
1649 | n n y n y * * | merge if no conflict |
|
1667 | n n y n y * * | merge if no conflict | |
1650 | n y n n y * * | discard |
|
1668 | n y n n y * * | discard | |
1651 | y n n n y * * | (3) |
|
1669 | y n n n y * * | (3) | |
1652 |
|
1670 | |||
1653 | x = can't happen |
|
1671 | x = can't happen | |
1654 | * = don't-care |
|
1672 | * = don't-care | |
1655 | 1 = incompatible options (checked in commands.py) |
|
1673 | 1 = incompatible options (checked in commands.py) | |
1656 | 2 = abort: uncommitted changes (commit or update --clean to discard changes) |
|
1674 | 2 = abort: uncommitted changes (commit or update --clean to discard changes) | |
1657 | 3 = abort: uncommitted changes (checked in commands.py) |
|
1675 | 3 = abort: uncommitted changes (checked in commands.py) | |
1658 |
|
1676 | |||
1659 | The merge is performed inside ``wc``, a workingctx-like objects. It defaults |
|
1677 | The merge is performed inside ``wc``, a workingctx-like objects. It defaults | |
1660 | to repo[None] if None is passed. |
|
1678 | to repo[None] if None is passed. | |
1661 |
|
1679 | |||
1662 | Return the same tuple as applyupdates(). |
|
1680 | Return the same tuple as applyupdates(). | |
1663 | """ |
|
1681 | """ | |
1664 | # Avoid cycle. |
|
1682 | # Avoid cycle. | |
1665 | from . import sparse |
|
1683 | from . import sparse | |
1666 |
|
1684 | |||
1667 | # This function used to find the default destination if node was None, but |
|
1685 | # This function used to find the default destination if node was None, but | |
1668 | # that's now in destutil.py. |
|
1686 | # that's now in destutil.py. | |
1669 | assert node is not None |
|
1687 | assert node is not None | |
1670 | if not branchmerge and not force: |
|
1688 | if not branchmerge and not force: | |
1671 | # TODO: remove the default once all callers that pass branchmerge=False |
|
1689 | # TODO: remove the default once all callers that pass branchmerge=False | |
1672 | # and force=False pass a value for updatecheck. We may want to allow |
|
1690 | # and force=False pass a value for updatecheck. We may want to allow | |
1673 | # updatecheck='abort' to better suppport some of these callers. |
|
1691 | # updatecheck='abort' to better suppport some of these callers. | |
1674 | if updatecheck is None: |
|
1692 | if updatecheck is None: | |
1675 | updatecheck = UPDATECHECK_LINEAR |
|
1693 | updatecheck = UPDATECHECK_LINEAR | |
1676 | if updatecheck not in ( |
|
1694 | if updatecheck not in ( | |
1677 | UPDATECHECK_NONE, |
|
1695 | UPDATECHECK_NONE, | |
1678 | UPDATECHECK_LINEAR, |
|
1696 | UPDATECHECK_LINEAR, | |
1679 | UPDATECHECK_NO_CONFLICT, |
|
1697 | UPDATECHECK_NO_CONFLICT, | |
1680 | ): |
|
1698 | ): | |
1681 | raise ValueError( |
|
1699 | raise ValueError( | |
1682 | r'Invalid updatecheck %r (can accept %r)' |
|
1700 | r'Invalid updatecheck %r (can accept %r)' | |
1683 | % ( |
|
1701 | % ( | |
1684 | updatecheck, |
|
1702 | updatecheck, | |
1685 | ( |
|
1703 | ( | |
1686 | UPDATECHECK_NONE, |
|
1704 | UPDATECHECK_NONE, | |
1687 | UPDATECHECK_LINEAR, |
|
1705 | UPDATECHECK_LINEAR, | |
1688 | UPDATECHECK_NO_CONFLICT, |
|
1706 | UPDATECHECK_NO_CONFLICT, | |
1689 | ), |
|
1707 | ), | |
1690 | ) |
|
1708 | ) | |
1691 | ) |
|
1709 | ) | |
1692 | if wc is not None and wc.isinmemory(): |
|
1710 | if wc is not None and wc.isinmemory(): | |
1693 | maybe_wlock = util.nullcontextmanager() |
|
1711 | maybe_wlock = util.nullcontextmanager() | |
1694 | else: |
|
1712 | else: | |
1695 | maybe_wlock = repo.wlock() |
|
1713 | maybe_wlock = repo.wlock() | |
1696 | with maybe_wlock: |
|
1714 | with maybe_wlock: | |
1697 | if wc is None: |
|
1715 | if wc is None: | |
1698 | wc = repo[None] |
|
1716 | wc = repo[None] | |
1699 | pl = wc.parents() |
|
1717 | pl = wc.parents() | |
1700 | p1 = pl[0] |
|
1718 | p1 = pl[0] | |
1701 | p2 = repo[node] |
|
1719 | p2 = repo[node] | |
1702 | if ancestor is not None: |
|
1720 | if ancestor is not None: | |
1703 | pas = [repo[ancestor]] |
|
1721 | pas = [repo[ancestor]] | |
1704 | else: |
|
1722 | else: | |
1705 | if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']: |
|
1723 | if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']: | |
1706 | cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node()) |
|
1724 | cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node()) | |
1707 | pas = [repo[anc] for anc in (sorted(cahs) or [nullid])] |
|
1725 | pas = [repo[anc] for anc in (sorted(cahs) or [nullid])] | |
1708 | else: |
|
1726 | else: | |
1709 | pas = [p1.ancestor(p2, warn=branchmerge)] |
|
1727 | pas = [p1.ancestor(p2, warn=branchmerge)] | |
1710 |
|
1728 | |||
1711 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2) |
|
1729 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2) | |
1712 |
|
1730 | |||
1713 | overwrite = force and not branchmerge |
|
1731 | overwrite = force and not branchmerge | |
1714 | ### check phase |
|
1732 | ### check phase | |
1715 | if not overwrite: |
|
1733 | if not overwrite: | |
1716 | if len(pl) > 1: |
|
1734 | if len(pl) > 1: | |
1717 | raise error.Abort(_(b"outstanding uncommitted merge")) |
|
1735 | raise error.Abort(_(b"outstanding uncommitted merge")) | |
1718 | ms = mergestatemod.mergestate.read(repo) |
|
1736 | ms = mergestatemod.mergestate.read(repo) | |
1719 | if list(ms.unresolved()): |
|
1737 | if list(ms.unresolved()): | |
1720 | raise error.Abort( |
|
1738 | raise error.Abort( | |
1721 | _(b"outstanding merge conflicts"), |
|
1739 | _(b"outstanding merge conflicts"), | |
1722 | hint=_(b"use 'hg resolve' to resolve"), |
|
1740 | hint=_(b"use 'hg resolve' to resolve"), | |
1723 | ) |
|
1741 | ) | |
1724 | if branchmerge: |
|
1742 | if branchmerge: | |
1725 | if pas == [p2]: |
|
1743 | if pas == [p2]: | |
1726 | raise error.Abort( |
|
1744 | raise error.Abort( | |
1727 | _( |
|
1745 | _( | |
1728 | b"merging with a working directory ancestor" |
|
1746 | b"merging with a working directory ancestor" | |
1729 | b" has no effect" |
|
1747 | b" has no effect" | |
1730 | ) |
|
1748 | ) | |
1731 | ) |
|
1749 | ) | |
1732 | elif pas == [p1]: |
|
1750 | elif pas == [p1]: | |
1733 | if not mergeancestor and wc.branch() == p2.branch(): |
|
1751 | if not mergeancestor and wc.branch() == p2.branch(): | |
1734 | raise error.Abort( |
|
1752 | raise error.Abort( | |
1735 | _(b"nothing to merge"), |
|
1753 | _(b"nothing to merge"), | |
1736 | hint=_(b"use 'hg update' or check 'hg heads'"), |
|
1754 | hint=_(b"use 'hg update' or check 'hg heads'"), | |
1737 | ) |
|
1755 | ) | |
1738 | if not force and (wc.files() or wc.deleted()): |
|
1756 | if not force and (wc.files() or wc.deleted()): | |
1739 | raise error.Abort( |
|
1757 | raise error.Abort( | |
1740 | _(b"uncommitted changes"), |
|
1758 | _(b"uncommitted changes"), | |
1741 | hint=_(b"use 'hg status' to list changes"), |
|
1759 | hint=_(b"use 'hg status' to list changes"), | |
1742 | ) |
|
1760 | ) | |
1743 | if not wc.isinmemory(): |
|
1761 | if not wc.isinmemory(): | |
1744 | for s in sorted(wc.substate): |
|
1762 | for s in sorted(wc.substate): | |
1745 | wc.sub(s).bailifchanged() |
|
1763 | wc.sub(s).bailifchanged() | |
1746 |
|
1764 | |||
1747 | elif not overwrite: |
|
1765 | elif not overwrite: | |
1748 | if p1 == p2: # no-op update |
|
1766 | if p1 == p2: # no-op update | |
1749 | # call the hooks and exit early |
|
1767 | # call the hooks and exit early | |
1750 | repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'') |
|
1768 | repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'') | |
1751 | repo.hook(b'update', parent1=xp2, parent2=b'', error=0) |
|
1769 | repo.hook(b'update', parent1=xp2, parent2=b'', error=0) | |
1752 | return updateresult(0, 0, 0, 0) |
|
1770 | return updateresult(0, 0, 0, 0) | |
1753 |
|
1771 | |||
1754 | if updatecheck == UPDATECHECK_LINEAR and pas not in ( |
|
1772 | if updatecheck == UPDATECHECK_LINEAR and pas not in ( | |
1755 | [p1], |
|
1773 | [p1], | |
1756 | [p2], |
|
1774 | [p2], | |
1757 | ): # nonlinear |
|
1775 | ): # nonlinear | |
1758 | dirty = wc.dirty(missing=True) |
|
1776 | dirty = wc.dirty(missing=True) | |
1759 | if dirty: |
|
1777 | if dirty: | |
1760 | # Branching is a bit strange to ensure we do the minimal |
|
1778 | # Branching is a bit strange to ensure we do the minimal | |
1761 | # amount of call to obsutil.foreground. |
|
1779 | # amount of call to obsutil.foreground. | |
1762 | foreground = obsutil.foreground(repo, [p1.node()]) |
|
1780 | foreground = obsutil.foreground(repo, [p1.node()]) | |
1763 | # note: the <node> variable contains a random identifier |
|
1781 | # note: the <node> variable contains a random identifier | |
1764 | if repo[node].node() in foreground: |
|
1782 | if repo[node].node() in foreground: | |
1765 | pass # allow updating to successors |
|
1783 | pass # allow updating to successors | |
1766 | else: |
|
1784 | else: | |
1767 | msg = _(b"uncommitted changes") |
|
1785 | msg = _(b"uncommitted changes") | |
1768 | hint = _(b"commit or update --clean to discard changes") |
|
1786 | hint = _(b"commit or update --clean to discard changes") | |
1769 | raise error.UpdateAbort(msg, hint=hint) |
|
1787 | raise error.UpdateAbort(msg, hint=hint) | |
1770 | else: |
|
1788 | else: | |
1771 | # Allow jumping branches if clean and specific rev given |
|
1789 | # Allow jumping branches if clean and specific rev given | |
1772 | pass |
|
1790 | pass | |
1773 |
|
1791 | |||
1774 | if overwrite: |
|
1792 | if overwrite: | |
1775 | pas = [wc] |
|
1793 | pas = [wc] | |
1776 | elif not branchmerge: |
|
1794 | elif not branchmerge: | |
1777 | pas = [p1] |
|
1795 | pas = [p1] | |
1778 |
|
1796 | |||
1779 | # deprecated config: merge.followcopies |
|
1797 | # deprecated config: merge.followcopies | |
1780 | followcopies = repo.ui.configbool(b'merge', b'followcopies') |
|
1798 | followcopies = repo.ui.configbool(b'merge', b'followcopies') | |
1781 | if overwrite: |
|
1799 | if overwrite: | |
1782 | followcopies = False |
|
1800 | followcopies = False | |
1783 | elif not pas[0]: |
|
1801 | elif not pas[0]: | |
1784 | followcopies = False |
|
1802 | followcopies = False | |
1785 | if not branchmerge and not wc.dirty(missing=True): |
|
1803 | if not branchmerge and not wc.dirty(missing=True): | |
1786 | followcopies = False |
|
1804 | followcopies = False | |
1787 |
|
1805 | |||
1788 | ### calculate phase |
|
1806 | ### calculate phase | |
1789 | mresult = calculateupdates( |
|
1807 | mresult = calculateupdates( | |
1790 | repo, |
|
1808 | repo, | |
1791 | wc, |
|
1809 | wc, | |
1792 | p2, |
|
1810 | p2, | |
1793 | pas, |
|
1811 | pas, | |
1794 | branchmerge, |
|
1812 | branchmerge, | |
1795 | force, |
|
1813 | force, | |
1796 | mergeancestor, |
|
1814 | mergeancestor, | |
1797 | followcopies, |
|
1815 | followcopies, | |
1798 | matcher=matcher, |
|
1816 | matcher=matcher, | |
1799 | mergeforce=mergeforce, |
|
1817 | mergeforce=mergeforce, | |
1800 | ) |
|
1818 | ) | |
1801 |
|
1819 | |||
1802 | actionbyfile = mresult.actions |
|
1820 | actionbyfile = mresult.actions | |
1803 |
|
1821 | |||
1804 | if updatecheck == UPDATECHECK_NO_CONFLICT: |
|
1822 | if updatecheck == UPDATECHECK_NO_CONFLICT: | |
1805 | for f, (m, args, msg) in pycompat.iteritems(actionbyfile): |
|
1823 | for f, (m, args, msg) in pycompat.iteritems(actionbyfile): | |
1806 | if m not in ( |
|
1824 | if m not in ( | |
1807 | mergestatemod.ACTION_GET, |
|
1825 | mergestatemod.ACTION_GET, | |
1808 | mergestatemod.ACTION_KEEP, |
|
1826 | mergestatemod.ACTION_KEEP, | |
1809 | mergestatemod.ACTION_EXEC, |
|
1827 | mergestatemod.ACTION_EXEC, | |
1810 | mergestatemod.ACTION_REMOVE, |
|
1828 | mergestatemod.ACTION_REMOVE, | |
1811 | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, |
|
1829 | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, | |
1812 | mergestatemod.ACTION_GET_OTHER_AND_STORE, |
|
1830 | mergestatemod.ACTION_GET_OTHER_AND_STORE, | |
1813 | ): |
|
1831 | ): | |
1814 | msg = _(b"conflicting changes") |
|
1832 | msg = _(b"conflicting changes") | |
1815 | hint = _(b"commit or update --clean to discard changes") |
|
1833 | hint = _(b"commit or update --clean to discard changes") | |
1816 | raise error.Abort(msg, hint=hint) |
|
1834 | raise error.Abort(msg, hint=hint) | |
1817 |
|
1835 | |||
1818 | # Prompt and create actions. Most of this is in the resolve phase |
|
1836 | # Prompt and create actions. Most of this is in the resolve phase | |
1819 | # already, but we can't handle .hgsubstate in filemerge or |
|
1837 | # already, but we can't handle .hgsubstate in filemerge or | |
1820 | # subrepoutil.submerge yet so we have to keep prompting for it. |
|
1838 | # subrepoutil.submerge yet so we have to keep prompting for it. | |
1821 | if b'.hgsubstate' in actionbyfile: |
|
1839 | if b'.hgsubstate' in actionbyfile: | |
1822 | f = b'.hgsubstate' |
|
1840 | f = b'.hgsubstate' | |
1823 | m, args, msg = actionbyfile[f] |
|
1841 | m, args, msg = actionbyfile[f] | |
1824 | prompts = filemerge.partextras(labels) |
|
1842 | prompts = filemerge.partextras(labels) | |
1825 | prompts[b'f'] = f |
|
1843 | prompts[b'f'] = f | |
1826 | if m == mergestatemod.ACTION_CHANGED_DELETED: |
|
1844 | if m == mergestatemod.ACTION_CHANGED_DELETED: | |
1827 | if repo.ui.promptchoice( |
|
1845 | if repo.ui.promptchoice( | |
1828 | _( |
|
1846 | _( | |
1829 | b"local%(l)s changed %(f)s which other%(o)s deleted\n" |
|
1847 | b"local%(l)s changed %(f)s which other%(o)s deleted\n" | |
1830 | b"use (c)hanged version or (d)elete?" |
|
1848 | b"use (c)hanged version or (d)elete?" | |
1831 | b"$$ &Changed $$ &Delete" |
|
1849 | b"$$ &Changed $$ &Delete" | |
1832 | ) |
|
1850 | ) | |
1833 | % prompts, |
|
1851 | % prompts, | |
1834 | 0, |
|
1852 | 0, | |
1835 | ): |
|
1853 | ): | |
1836 | actionbyfile[f] = ( |
|
1854 | actionbyfile[f] = ( | |
1837 | mergestatemod.ACTION_REMOVE, |
|
1855 | mergestatemod.ACTION_REMOVE, | |
1838 | None, |
|
1856 | None, | |
1839 | b'prompt delete', |
|
1857 | b'prompt delete', | |
1840 | ) |
|
1858 | ) | |
1841 | elif f in p1: |
|
1859 | elif f in p1: | |
1842 | actionbyfile[f] = ( |
|
1860 | actionbyfile[f] = ( | |
1843 | mergestatemod.ACTION_ADD_MODIFIED, |
|
1861 | mergestatemod.ACTION_ADD_MODIFIED, | |
1844 | None, |
|
1862 | None, | |
1845 | b'prompt keep', |
|
1863 | b'prompt keep', | |
1846 | ) |
|
1864 | ) | |
1847 | else: |
|
1865 | else: | |
1848 | actionbyfile[f] = ( |
|
1866 | actionbyfile[f] = ( | |
1849 | mergestatemod.ACTION_ADD, |
|
1867 | mergestatemod.ACTION_ADD, | |
1850 | None, |
|
1868 | None, | |
1851 | b'prompt keep', |
|
1869 | b'prompt keep', | |
1852 | ) |
|
1870 | ) | |
1853 | elif m == mergestatemod.ACTION_DELETED_CHANGED: |
|
1871 | elif m == mergestatemod.ACTION_DELETED_CHANGED: | |
1854 | f1, f2, fa, move, anc = args |
|
1872 | f1, f2, fa, move, anc = args | |
1855 | flags = p2[f2].flags() |
|
1873 | flags = p2[f2].flags() | |
1856 | if ( |
|
1874 | if ( | |
1857 | repo.ui.promptchoice( |
|
1875 | repo.ui.promptchoice( | |
1858 | _( |
|
1876 | _( | |
1859 | b"other%(o)s changed %(f)s which local%(l)s deleted\n" |
|
1877 | b"other%(o)s changed %(f)s which local%(l)s deleted\n" | |
1860 | b"use (c)hanged version or leave (d)eleted?" |
|
1878 | b"use (c)hanged version or leave (d)eleted?" | |
1861 | b"$$ &Changed $$ &Deleted" |
|
1879 | b"$$ &Changed $$ &Deleted" | |
1862 | ) |
|
1880 | ) | |
1863 | % prompts, |
|
1881 | % prompts, | |
1864 | 0, |
|
1882 | 0, | |
1865 | ) |
|
1883 | ) | |
1866 | == 0 |
|
1884 | == 0 | |
1867 | ): |
|
1885 | ): | |
1868 | actionbyfile[f] = ( |
|
1886 | actionbyfile[f] = ( | |
1869 | mergestatemod.ACTION_GET, |
|
1887 | mergestatemod.ACTION_GET, | |
1870 | (flags, False), |
|
1888 | (flags, False), | |
1871 | b'prompt recreating', |
|
1889 | b'prompt recreating', | |
1872 | ) |
|
1890 | ) | |
1873 | else: |
|
1891 | else: | |
1874 | del actionbyfile[f] |
|
1892 | del actionbyfile[f] | |
1875 |
|
1893 | |||
1876 | # Convert to dictionary-of-lists format |
|
1894 | # Convert to dictionary-of-lists format | |
1877 | actions = emptyactions() |
|
1895 | actions = emptyactions() | |
1878 | for f, (m, args, msg) in pycompat.iteritems(actionbyfile): |
|
1896 | for f, (m, args, msg) in pycompat.iteritems(actionbyfile): | |
1879 | if m not in actions: |
|
1897 | if m not in actions: | |
1880 | actions[m] = [] |
|
1898 | actions[m] = [] | |
1881 | actions[m].append((f, args, msg)) |
|
1899 | actions[m].append((f, args, msg)) | |
1882 |
|
1900 | |||
1883 | # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate |
|
1901 | # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate | |
1884 | for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]: |
|
1902 | for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]: | |
1885 | actions[mergestatemod.ACTION_GET].append(e) |
|
1903 | actions[mergestatemod.ACTION_GET].append(e) | |
1886 |
|
1904 | |||
1887 | if not util.fscasesensitive(repo.path): |
|
1905 | if not util.fscasesensitive(repo.path): | |
1888 | # check collision between files only in p2 for clean update |
|
1906 | # check collision between files only in p2 for clean update | |
1889 | if not branchmerge and ( |
|
1907 | if not branchmerge and ( | |
1890 | force or not wc.dirty(missing=True, branch=False) |
|
1908 | force or not wc.dirty(missing=True, branch=False) | |
1891 | ): |
|
1909 | ): | |
1892 | _checkcollision(repo, p2.manifest(), None) |
|
1910 | _checkcollision(repo, p2.manifest(), None) | |
1893 | else: |
|
1911 | else: | |
1894 | _checkcollision(repo, wc.manifest(), actions) |
|
1912 | _checkcollision(repo, wc.manifest(), actions) | |
1895 |
|
1913 | |||
1896 | # divergent renames |
|
1914 | # divergent renames | |
1897 | for f, fl in sorted(pycompat.iteritems(mresult.diverge)): |
|
1915 | for f, fl in sorted(pycompat.iteritems(mresult.diverge)): | |
1898 | repo.ui.warn( |
|
1916 | repo.ui.warn( | |
1899 | _( |
|
1917 | _( | |
1900 | b"note: possible conflict - %s was renamed " |
|
1918 | b"note: possible conflict - %s was renamed " | |
1901 | b"multiple times to:\n" |
|
1919 | b"multiple times to:\n" | |
1902 | ) |
|
1920 | ) | |
1903 | % f |
|
1921 | % f | |
1904 | ) |
|
1922 | ) | |
1905 | for nf in sorted(fl): |
|
1923 | for nf in sorted(fl): | |
1906 | repo.ui.warn(b" %s\n" % nf) |
|
1924 | repo.ui.warn(b" %s\n" % nf) | |
1907 |
|
1925 | |||
1908 | # rename and delete |
|
1926 | # rename and delete | |
1909 | for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)): |
|
1927 | for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)): | |
1910 | repo.ui.warn( |
|
1928 | repo.ui.warn( | |
1911 | _( |
|
1929 | _( | |
1912 | b"note: possible conflict - %s was deleted " |
|
1930 | b"note: possible conflict - %s was deleted " | |
1913 | b"and renamed to:\n" |
|
1931 | b"and renamed to:\n" | |
1914 | ) |
|
1932 | ) | |
1915 | % f |
|
1933 | % f | |
1916 | ) |
|
1934 | ) | |
1917 | for nf in sorted(fl): |
|
1935 | for nf in sorted(fl): | |
1918 | repo.ui.warn(b" %s\n" % nf) |
|
1936 | repo.ui.warn(b" %s\n" % nf) | |
1919 |
|
1937 | |||
1920 | ### apply phase |
|
1938 | ### apply phase | |
1921 | if not branchmerge: # just jump to the new rev |
|
1939 | if not branchmerge: # just jump to the new rev | |
1922 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b'' |
|
1940 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b'' | |
1923 | # If we're doing a partial update, we need to skip updating |
|
1941 | # If we're doing a partial update, we need to skip updating | |
1924 | # the dirstate. |
|
1942 | # the dirstate. | |
1925 | always = matcher is None or matcher.always() |
|
1943 | always = matcher is None or matcher.always() | |
1926 | updatedirstate = updatedirstate and always and not wc.isinmemory() |
|
1944 | updatedirstate = updatedirstate and always and not wc.isinmemory() | |
1927 | if updatedirstate: |
|
1945 | if updatedirstate: | |
1928 | repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
1946 | repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2) | |
1929 | # note that we're in the middle of an update |
|
1947 | # note that we're in the middle of an update | |
1930 | repo.vfs.write(b'updatestate', p2.hex()) |
|
1948 | repo.vfs.write(b'updatestate', p2.hex()) | |
1931 |
|
1949 | |||
1932 | _advertisefsmonitor( |
|
1950 | _advertisefsmonitor( | |
1933 | repo, len(actions[mergestatemod.ACTION_GET]), p1.node() |
|
1951 | repo, len(actions[mergestatemod.ACTION_GET]), p1.node() | |
1934 | ) |
|
1952 | ) | |
1935 |
|
1953 | |||
1936 | wantfiledata = updatedirstate and not branchmerge |
|
1954 | wantfiledata = updatedirstate and not branchmerge | |
1937 | stats, getfiledata = applyupdates( |
|
1955 | stats, getfiledata = applyupdates( | |
1938 | repo, actions, wc, p2, overwrite, wantfiledata, labels=labels |
|
1956 | repo, | |
|
1957 | actions, | |||
|
1958 | wc, | |||
|
1959 | p2, | |||
|
1960 | overwrite, | |||
|
1961 | wantfiledata, | |||
|
1962 | labels=labels, | |||
|
1963 | commitinfo=mresult.commitinfo, | |||
1939 | ) |
|
1964 | ) | |
1940 |
|
1965 | |||
1941 | if updatedirstate: |
|
1966 | if updatedirstate: | |
1942 | with repo.dirstate.parentchange(): |
|
1967 | with repo.dirstate.parentchange(): | |
1943 | repo.setparents(fp1, fp2) |
|
1968 | repo.setparents(fp1, fp2) | |
1944 | mergestatemod.recordupdates( |
|
1969 | mergestatemod.recordupdates( | |
1945 | repo, actions, branchmerge, getfiledata |
|
1970 | repo, actions, branchmerge, getfiledata | |
1946 | ) |
|
1971 | ) | |
1947 | # update completed, clear state |
|
1972 | # update completed, clear state | |
1948 | util.unlink(repo.vfs.join(b'updatestate')) |
|
1973 | util.unlink(repo.vfs.join(b'updatestate')) | |
1949 |
|
1974 | |||
1950 | if not branchmerge: |
|
1975 | if not branchmerge: | |
1951 | repo.dirstate.setbranch(p2.branch()) |
|
1976 | repo.dirstate.setbranch(p2.branch()) | |
1952 |
|
1977 | |||
1953 | # If we're updating to a location, clean up any stale temporary includes |
|
1978 | # If we're updating to a location, clean up any stale temporary includes | |
1954 | # (ex: this happens during hg rebase --abort). |
|
1979 | # (ex: this happens during hg rebase --abort). | |
1955 | if not branchmerge: |
|
1980 | if not branchmerge: | |
1956 | sparse.prunetemporaryincludes(repo) |
|
1981 | sparse.prunetemporaryincludes(repo) | |
1957 |
|
1982 | |||
1958 | if updatedirstate: |
|
1983 | if updatedirstate: | |
1959 | repo.hook( |
|
1984 | repo.hook( | |
1960 | b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount |
|
1985 | b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount | |
1961 | ) |
|
1986 | ) | |
1962 | return stats |
|
1987 | return stats | |
1963 |
|
1988 | |||
1964 |
|
1989 | |||
1965 | def merge(ctx, labels=None, force=False, wc=None): |
|
1990 | def merge(ctx, labels=None, force=False, wc=None): | |
1966 | """Merge another topological branch into the working copy. |
|
1991 | """Merge another topological branch into the working copy. | |
1967 |
|
1992 | |||
1968 | force = whether the merge was run with 'merge --force' (deprecated) |
|
1993 | force = whether the merge was run with 'merge --force' (deprecated) | |
1969 | """ |
|
1994 | """ | |
1970 |
|
1995 | |||
1971 | return update( |
|
1996 | return update( | |
1972 | ctx.repo(), |
|
1997 | ctx.repo(), | |
1973 | ctx.rev(), |
|
1998 | ctx.rev(), | |
1974 | labels=labels, |
|
1999 | labels=labels, | |
1975 | branchmerge=True, |
|
2000 | branchmerge=True, | |
1976 | force=force, |
|
2001 | force=force, | |
1977 | mergeforce=force, |
|
2002 | mergeforce=force, | |
1978 | wc=wc, |
|
2003 | wc=wc, | |
1979 | ) |
|
2004 | ) | |
1980 |
|
2005 | |||
1981 |
|
2006 | |||
1982 | def clean_update(ctx, wc=None): |
|
2007 | def clean_update(ctx, wc=None): | |
1983 | """Do a clean update to the given commit. |
|
2008 | """Do a clean update to the given commit. | |
1984 |
|
2009 | |||
1985 | This involves updating to the commit and discarding any changes in the |
|
2010 | This involves updating to the commit and discarding any changes in the | |
1986 | working copy. |
|
2011 | working copy. | |
1987 | """ |
|
2012 | """ | |
1988 | return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc) |
|
2013 | return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc) | |
1989 |
|
2014 | |||
1990 |
|
2015 | |||
1991 | def revert_to(ctx, matcher=None, wc=None): |
|
2016 | def revert_to(ctx, matcher=None, wc=None): | |
1992 | """Revert the working copy to the given commit. |
|
2017 | """Revert the working copy to the given commit. | |
1993 |
|
2018 | |||
1994 | The working copy will keep its current parent(s) but its content will |
|
2019 | The working copy will keep its current parent(s) but its content will | |
1995 | be the same as in the given commit. |
|
2020 | be the same as in the given commit. | |
1996 | """ |
|
2021 | """ | |
1997 |
|
2022 | |||
1998 | return update( |
|
2023 | return update( | |
1999 | ctx.repo(), |
|
2024 | ctx.repo(), | |
2000 | ctx.rev(), |
|
2025 | ctx.rev(), | |
2001 | branchmerge=False, |
|
2026 | branchmerge=False, | |
2002 | force=True, |
|
2027 | force=True, | |
2003 | updatedirstate=False, |
|
2028 | updatedirstate=False, | |
2004 | matcher=matcher, |
|
2029 | matcher=matcher, | |
2005 | wc=wc, |
|
2030 | wc=wc, | |
2006 | ) |
|
2031 | ) | |
2007 |
|
2032 | |||
2008 |
|
2033 | |||
2009 | def graft( |
|
2034 | def graft( | |
2010 | repo, |
|
2035 | repo, | |
2011 | ctx, |
|
2036 | ctx, | |
2012 | base=None, |
|
2037 | base=None, | |
2013 | labels=None, |
|
2038 | labels=None, | |
2014 | keepparent=False, |
|
2039 | keepparent=False, | |
2015 | keepconflictparent=False, |
|
2040 | keepconflictparent=False, | |
2016 | wctx=None, |
|
2041 | wctx=None, | |
2017 | ): |
|
2042 | ): | |
2018 | """Do a graft-like merge. |
|
2043 | """Do a graft-like merge. | |
2019 |
|
2044 | |||
2020 | This is a merge where the merge ancestor is chosen such that one |
|
2045 | This is a merge where the merge ancestor is chosen such that one | |
2021 | or more changesets are grafted onto the current changeset. In |
|
2046 | or more changesets are grafted onto the current changeset. In | |
2022 | addition to the merge, this fixes up the dirstate to include only |
|
2047 | addition to the merge, this fixes up the dirstate to include only | |
2023 | a single parent (if keepparent is False) and tries to duplicate any |
|
2048 | a single parent (if keepparent is False) and tries to duplicate any | |
2024 | renames/copies appropriately. |
|
2049 | renames/copies appropriately. | |
2025 |
|
2050 | |||
2026 | ctx - changeset to rebase |
|
2051 | ctx - changeset to rebase | |
2027 | base - merge base, or ctx.p1() if not specified |
|
2052 | base - merge base, or ctx.p1() if not specified | |
2028 | labels - merge labels eg ['local', 'graft'] |
|
2053 | labels - merge labels eg ['local', 'graft'] | |
2029 | keepparent - keep second parent if any |
|
2054 | keepparent - keep second parent if any | |
2030 | keepconflictparent - if unresolved, keep parent used for the merge |
|
2055 | keepconflictparent - if unresolved, keep parent used for the merge | |
2031 |
|
2056 | |||
2032 | """ |
|
2057 | """ | |
2033 | # If we're grafting a descendant onto an ancestor, be sure to pass |
|
2058 | # If we're grafting a descendant onto an ancestor, be sure to pass | |
2034 | # mergeancestor=True to update. This does two things: 1) allows the merge if |
|
2059 | # mergeancestor=True to update. This does two things: 1) allows the merge if | |
2035 | # the destination is the same as the parent of the ctx (so we can use graft |
|
2060 | # the destination is the same as the parent of the ctx (so we can use graft | |
2036 | # to copy commits), and 2) informs update that the incoming changes are |
|
2061 | # to copy commits), and 2) informs update that the incoming changes are | |
2037 | # newer than the destination so it doesn't prompt about "remote changed foo |
|
2062 | # newer than the destination so it doesn't prompt about "remote changed foo | |
2038 | # which local deleted". |
|
2063 | # which local deleted". | |
2039 | # We also pass mergeancestor=True when base is the same revision as p1. 2) |
|
2064 | # We also pass mergeancestor=True when base is the same revision as p1. 2) | |
2040 | # doesn't matter as there can't possibly be conflicts, but 1) is necessary. |
|
2065 | # doesn't matter as there can't possibly be conflicts, but 1) is necessary. | |
2041 | wctx = wctx or repo[None] |
|
2066 | wctx = wctx or repo[None] | |
2042 | pctx = wctx.p1() |
|
2067 | pctx = wctx.p1() | |
2043 | base = base or ctx.p1() |
|
2068 | base = base or ctx.p1() | |
2044 | mergeancestor = ( |
|
2069 | mergeancestor = ( | |
2045 | repo.changelog.isancestor(pctx.node(), ctx.node()) |
|
2070 | repo.changelog.isancestor(pctx.node(), ctx.node()) | |
2046 | or pctx.rev() == base.rev() |
|
2071 | or pctx.rev() == base.rev() | |
2047 | ) |
|
2072 | ) | |
2048 |
|
2073 | |||
2049 | stats = update( |
|
2074 | stats = update( | |
2050 | repo, |
|
2075 | repo, | |
2051 | ctx.node(), |
|
2076 | ctx.node(), | |
2052 | True, |
|
2077 | True, | |
2053 | True, |
|
2078 | True, | |
2054 | base.node(), |
|
2079 | base.node(), | |
2055 | mergeancestor=mergeancestor, |
|
2080 | mergeancestor=mergeancestor, | |
2056 | labels=labels, |
|
2081 | labels=labels, | |
2057 | wc=wctx, |
|
2082 | wc=wctx, | |
2058 | ) |
|
2083 | ) | |
2059 |
|
2084 | |||
2060 | if keepconflictparent and stats.unresolvedcount: |
|
2085 | if keepconflictparent and stats.unresolvedcount: | |
2061 | pother = ctx.node() |
|
2086 | pother = ctx.node() | |
2062 | else: |
|
2087 | else: | |
2063 | pother = nullid |
|
2088 | pother = nullid | |
2064 | parents = ctx.parents() |
|
2089 | parents = ctx.parents() | |
2065 | if keepparent and len(parents) == 2 and base in parents: |
|
2090 | if keepparent and len(parents) == 2 and base in parents: | |
2066 | parents.remove(base) |
|
2091 | parents.remove(base) | |
2067 | pother = parents[0].node() |
|
2092 | pother = parents[0].node() | |
2068 | # Never set both parents equal to each other |
|
2093 | # Never set both parents equal to each other | |
2069 | if pother == pctx.node(): |
|
2094 | if pother == pctx.node(): | |
2070 | pother = nullid |
|
2095 | pother = nullid | |
2071 |
|
2096 | |||
2072 | if wctx.isinmemory(): |
|
2097 | if wctx.isinmemory(): | |
2073 | wctx.setparents(pctx.node(), pother) |
|
2098 | wctx.setparents(pctx.node(), pother) | |
2074 | # fix up dirstate for copies and renames |
|
2099 | # fix up dirstate for copies and renames | |
2075 | copies.graftcopies(wctx, ctx, base) |
|
2100 | copies.graftcopies(wctx, ctx, base) | |
2076 | else: |
|
2101 | else: | |
2077 | with repo.dirstate.parentchange(): |
|
2102 | with repo.dirstate.parentchange(): | |
2078 | repo.setparents(pctx.node(), pother) |
|
2103 | repo.setparents(pctx.node(), pother) | |
2079 | repo.dirstate.write(repo.currenttransaction()) |
|
2104 | repo.dirstate.write(repo.currenttransaction()) | |
2080 | # fix up dirstate for copies and renames |
|
2105 | # fix up dirstate for copies and renames | |
2081 | copies.graftcopies(wctx, ctx, base) |
|
2106 | copies.graftcopies(wctx, ctx, base) | |
2082 | return stats |
|
2107 | return stats | |
2083 |
|
2108 | |||
2084 |
|
2109 | |||
2085 | def purge( |
|
2110 | def purge( | |
2086 | repo, |
|
2111 | repo, | |
2087 | matcher, |
|
2112 | matcher, | |
2088 | unknown=True, |
|
2113 | unknown=True, | |
2089 | ignored=False, |
|
2114 | ignored=False, | |
2090 | removeemptydirs=True, |
|
2115 | removeemptydirs=True, | |
2091 | removefiles=True, |
|
2116 | removefiles=True, | |
2092 | abortonerror=False, |
|
2117 | abortonerror=False, | |
2093 | noop=False, |
|
2118 | noop=False, | |
2094 | ): |
|
2119 | ): | |
2095 | """Purge the working directory of untracked files. |
|
2120 | """Purge the working directory of untracked files. | |
2096 |
|
2121 | |||
2097 | ``matcher`` is a matcher configured to scan the working directory - |
|
2122 | ``matcher`` is a matcher configured to scan the working directory - | |
2098 | potentially a subset. |
|
2123 | potentially a subset. | |
2099 |
|
2124 | |||
2100 | ``unknown`` controls whether unknown files should be purged. |
|
2125 | ``unknown`` controls whether unknown files should be purged. | |
2101 |
|
2126 | |||
2102 | ``ignored`` controls whether ignored files should be purged. |
|
2127 | ``ignored`` controls whether ignored files should be purged. | |
2103 |
|
2128 | |||
2104 | ``removeemptydirs`` controls whether empty directories should be removed. |
|
2129 | ``removeemptydirs`` controls whether empty directories should be removed. | |
2105 |
|
2130 | |||
2106 | ``removefiles`` controls whether files are removed. |
|
2131 | ``removefiles`` controls whether files are removed. | |
2107 |
|
2132 | |||
2108 | ``abortonerror`` causes an exception to be raised if an error occurs |
|
2133 | ``abortonerror`` causes an exception to be raised if an error occurs | |
2109 | deleting a file or directory. |
|
2134 | deleting a file or directory. | |
2110 |
|
2135 | |||
2111 | ``noop`` controls whether to actually remove files. If not defined, actions |
|
2136 | ``noop`` controls whether to actually remove files. If not defined, actions | |
2112 | will be taken. |
|
2137 | will be taken. | |
2113 |
|
2138 | |||
2114 | Returns an iterable of relative paths in the working directory that were |
|
2139 | Returns an iterable of relative paths in the working directory that were | |
2115 | or would be removed. |
|
2140 | or would be removed. | |
2116 | """ |
|
2141 | """ | |
2117 |
|
2142 | |||
2118 | def remove(removefn, path): |
|
2143 | def remove(removefn, path): | |
2119 | try: |
|
2144 | try: | |
2120 | removefn(path) |
|
2145 | removefn(path) | |
2121 | except OSError: |
|
2146 | except OSError: | |
2122 | m = _(b'%s cannot be removed') % path |
|
2147 | m = _(b'%s cannot be removed') % path | |
2123 | if abortonerror: |
|
2148 | if abortonerror: | |
2124 | raise error.Abort(m) |
|
2149 | raise error.Abort(m) | |
2125 | else: |
|
2150 | else: | |
2126 | repo.ui.warn(_(b'warning: %s\n') % m) |
|
2151 | repo.ui.warn(_(b'warning: %s\n') % m) | |
2127 |
|
2152 | |||
2128 | # There's no API to copy a matcher. So mutate the passed matcher and |
|
2153 | # There's no API to copy a matcher. So mutate the passed matcher and | |
2129 | # restore it when we're done. |
|
2154 | # restore it when we're done. | |
2130 | oldtraversedir = matcher.traversedir |
|
2155 | oldtraversedir = matcher.traversedir | |
2131 |
|
2156 | |||
2132 | res = [] |
|
2157 | res = [] | |
2133 |
|
2158 | |||
2134 | try: |
|
2159 | try: | |
2135 | if removeemptydirs: |
|
2160 | if removeemptydirs: | |
2136 | directories = [] |
|
2161 | directories = [] | |
2137 | matcher.traversedir = directories.append |
|
2162 | matcher.traversedir = directories.append | |
2138 |
|
2163 | |||
2139 | status = repo.status(match=matcher, ignored=ignored, unknown=unknown) |
|
2164 | status = repo.status(match=matcher, ignored=ignored, unknown=unknown) | |
2140 |
|
2165 | |||
2141 | if removefiles: |
|
2166 | if removefiles: | |
2142 | for f in sorted(status.unknown + status.ignored): |
|
2167 | for f in sorted(status.unknown + status.ignored): | |
2143 | if not noop: |
|
2168 | if not noop: | |
2144 | repo.ui.note(_(b'removing file %s\n') % f) |
|
2169 | repo.ui.note(_(b'removing file %s\n') % f) | |
2145 | remove(repo.wvfs.unlink, f) |
|
2170 | remove(repo.wvfs.unlink, f) | |
2146 | res.append(f) |
|
2171 | res.append(f) | |
2147 |
|
2172 | |||
2148 | if removeemptydirs: |
|
2173 | if removeemptydirs: | |
2149 | for f in sorted(directories, reverse=True): |
|
2174 | for f in sorted(directories, reverse=True): | |
2150 | if matcher(f) and not repo.wvfs.listdir(f): |
|
2175 | if matcher(f) and not repo.wvfs.listdir(f): | |
2151 | if not noop: |
|
2176 | if not noop: | |
2152 | repo.ui.note(_(b'removing directory %s\n') % f) |
|
2177 | repo.ui.note(_(b'removing directory %s\n') % f) | |
2153 | remove(repo.wvfs.rmdir, f) |
|
2178 | remove(repo.wvfs.rmdir, f) | |
2154 | res.append(f) |
|
2179 | res.append(f) | |
2155 |
|
2180 | |||
2156 | return res |
|
2181 | return res | |
2157 |
|
2182 | |||
2158 | finally: |
|
2183 | finally: | |
2159 | matcher.traversedir = oldtraversedir |
|
2184 | matcher.traversedir = oldtraversedir |
General Comments 0
You need to be logged in to leave comments.
Login now