Show More
@@ -1,1186 +1,1186 b'' | |||||
1 | # Infinite push |
|
1 | # Infinite push | |
2 | # |
|
2 | # | |
3 | # Copyright 2016 Facebook, Inc. |
|
3 | # Copyright 2016 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | """ store some pushes in a remote blob store on the server (EXPERIMENTAL) |
|
7 | """ store some pushes in a remote blob store on the server (EXPERIMENTAL) | |
8 |
|
8 | |||
9 | [infinitepush] |
|
9 | [infinitepush] | |
10 | # Server-side and client-side option. Pattern of the infinitepush bookmark |
|
10 | # Server-side and client-side option. Pattern of the infinitepush bookmark | |
11 | branchpattern = PATTERN |
|
11 | branchpattern = PATTERN | |
12 |
|
12 | |||
13 | # Server or client |
|
13 | # Server or client | |
14 | server = False |
|
14 | server = False | |
15 |
|
15 | |||
16 | # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set |
|
16 | # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set | |
17 | indextype = disk |
|
17 | indextype = disk | |
18 |
|
18 | |||
19 | # Server-side option. Used only if indextype=sql. |
|
19 | # Server-side option. Used only if indextype=sql. | |
20 | # Format: 'IP:PORT:DB_NAME:USER:PASSWORD' |
|
20 | # Format: 'IP:PORT:DB_NAME:USER:PASSWORD' | |
21 | sqlhost = IP:PORT:DB_NAME:USER:PASSWORD |
|
21 | sqlhost = IP:PORT:DB_NAME:USER:PASSWORD | |
22 |
|
22 | |||
23 | # Server-side option. Used only if indextype=disk. |
|
23 | # Server-side option. Used only if indextype=disk. | |
24 | # Filesystem path to the index store |
|
24 | # Filesystem path to the index store | |
25 | indexpath = PATH |
|
25 | indexpath = PATH | |
26 |
|
26 | |||
27 | # Server-side option. Possible values: 'disk' or 'external' |
|
27 | # Server-side option. Possible values: 'disk' or 'external' | |
28 | # Fails if not set |
|
28 | # Fails if not set | |
29 | storetype = disk |
|
29 | storetype = disk | |
30 |
|
30 | |||
31 | # Server-side option. |
|
31 | # Server-side option. | |
32 | # Path to the binary that will save bundle to the bundlestore |
|
32 | # Path to the binary that will save bundle to the bundlestore | |
33 | # Formatted cmd line will be passed to it (see `put_args`) |
|
33 | # Formatted cmd line will be passed to it (see `put_args`) | |
34 | put_binary = put |
|
34 | put_binary = put | |
35 |
|
35 | |||
36 | # Serser-side option. Used only if storetype=external. |
|
36 | # Serser-side option. Used only if storetype=external. | |
37 | # Format cmd-line string for put binary. Placeholder: {filename} |
|
37 | # Format cmd-line string for put binary. Placeholder: {filename} | |
38 | put_args = {filename} |
|
38 | put_args = {filename} | |
39 |
|
39 | |||
40 | # Server-side option. |
|
40 | # Server-side option. | |
41 | # Path to the binary that get bundle from the bundlestore. |
|
41 | # Path to the binary that get bundle from the bundlestore. | |
42 | # Formatted cmd line will be passed to it (see `get_args`) |
|
42 | # Formatted cmd line will be passed to it (see `get_args`) | |
43 | get_binary = get |
|
43 | get_binary = get | |
44 |
|
44 | |||
45 | # Serser-side option. Used only if storetype=external. |
|
45 | # Serser-side option. Used only if storetype=external. | |
46 | # Format cmd-line string for get binary. Placeholders: {filename} {handle} |
|
46 | # Format cmd-line string for get binary. Placeholders: {filename} {handle} | |
47 | get_args = {filename} {handle} |
|
47 | get_args = {filename} {handle} | |
48 |
|
48 | |||
49 | # Server-side option |
|
49 | # Server-side option | |
50 | logfile = FIlE |
|
50 | logfile = FIlE | |
51 |
|
51 | |||
52 | # Server-side option |
|
52 | # Server-side option | |
53 | loglevel = DEBUG |
|
53 | loglevel = DEBUG | |
54 |
|
54 | |||
55 | # Server-side option. Used only if indextype=sql. |
|
55 | # Server-side option. Used only if indextype=sql. | |
56 | # Sets mysql wait_timeout option. |
|
56 | # Sets mysql wait_timeout option. | |
57 | waittimeout = 300 |
|
57 | waittimeout = 300 | |
58 |
|
58 | |||
59 | # Server-side option. Used only if indextype=sql. |
|
59 | # Server-side option. Used only if indextype=sql. | |
60 | # Sets mysql innodb_lock_wait_timeout option. |
|
60 | # Sets mysql innodb_lock_wait_timeout option. | |
61 | locktimeout = 120 |
|
61 | locktimeout = 120 | |
62 |
|
62 | |||
63 | # Server-side option. Used only if indextype=sql. |
|
63 | # Server-side option. Used only if indextype=sql. | |
64 | # Name of the repository |
|
64 | # Name of the repository | |
65 | reponame = '' |
|
65 | reponame = '' | |
66 |
|
66 | |||
67 | # Client-side option. Used by --list-remote option. List of remote scratch |
|
67 | # Client-side option. Used by --list-remote option. List of remote scratch | |
68 | # patterns to list if no patterns are specified. |
|
68 | # patterns to list if no patterns are specified. | |
69 | defaultremotepatterns = ['*'] |
|
69 | defaultremotepatterns = ['*'] | |
70 |
|
70 | |||
71 | # Instructs infinitepush to forward all received bundle2 parts to the |
|
71 | # Instructs infinitepush to forward all received bundle2 parts to the | |
72 | # bundle for storage. Defaults to False. |
|
72 | # bundle for storage. Defaults to False. | |
73 | storeallparts = True |
|
73 | storeallparts = True | |
74 |
|
74 | |||
75 | # routes each incoming push to the bundlestore. defaults to False |
|
75 | # routes each incoming push to the bundlestore. defaults to False | |
76 | pushtobundlestore = True |
|
76 | pushtobundlestore = True | |
77 |
|
77 | |||
78 | [remotenames] |
|
78 | [remotenames] | |
79 | # Client-side option |
|
79 | # Client-side option | |
80 | # This option should be set only if remotenames extension is enabled. |
|
80 | # This option should be set only if remotenames extension is enabled. | |
81 | # Whether remote bookmarks are tracked by remotenames extension. |
|
81 | # Whether remote bookmarks are tracked by remotenames extension. | |
82 | bookmarks = True |
|
82 | bookmarks = True | |
83 | """ |
|
83 | """ | |
84 |
|
84 | |||
85 | from __future__ import absolute_import |
|
85 | from __future__ import absolute_import | |
86 |
|
86 | |||
87 | import collections |
|
87 | import collections | |
88 | import contextlib |
|
88 | import contextlib | |
89 | import errno |
|
89 | import errno | |
90 | import functools |
|
90 | import functools | |
91 | import logging |
|
91 | import logging | |
92 | import os |
|
92 | import os | |
93 | import random |
|
93 | import random | |
94 | import re |
|
94 | import re | |
95 | import socket |
|
95 | import socket | |
96 | import subprocess |
|
96 | import subprocess | |
97 | import tempfile |
|
97 | import tempfile | |
98 | import time |
|
98 | import time | |
99 |
|
99 | |||
100 | from mercurial.node import ( |
|
100 | from mercurial.node import ( | |
101 | bin, |
|
101 | bin, | |
102 | hex, |
|
102 | hex, | |
103 | ) |
|
103 | ) | |
104 |
|
104 | |||
105 | from mercurial.i18n import _ |
|
105 | from mercurial.i18n import _ | |
106 |
|
106 | |||
107 | from mercurial.utils import ( |
|
107 | from mercurial.utils import ( | |
108 | procutil, |
|
108 | procutil, | |
109 | stringutil, |
|
109 | stringutil, | |
110 | ) |
|
110 | ) | |
111 |
|
111 | |||
112 | from mercurial import ( |
|
112 | from mercurial import ( | |
113 | bundle2, |
|
113 | bundle2, | |
114 | changegroup, |
|
114 | changegroup, | |
115 | commands, |
|
115 | commands, | |
116 | discovery, |
|
116 | discovery, | |
117 | encoding, |
|
117 | encoding, | |
118 | error, |
|
118 | error, | |
119 | exchange, |
|
119 | exchange, | |
120 | extensions, |
|
120 | extensions, | |
121 | hg, |
|
121 | hg, | |
122 | localrepo, |
|
122 | localrepo, | |
123 | peer, |
|
123 | peer, | |
124 | phases, |
|
124 | phases, | |
125 | pushkey, |
|
125 | pushkey, | |
126 | pycompat, |
|
126 | pycompat, | |
127 | registrar, |
|
127 | registrar, | |
128 | util, |
|
128 | util, | |
129 | wireproto, |
|
129 | wireproto, | |
130 | ) |
|
130 | ) | |
131 |
|
131 | |||
132 | from . import ( |
|
132 | from . import ( | |
133 | bundleparts, |
|
133 | bundleparts, | |
134 | common, |
|
134 | common, | |
135 | ) |
|
135 | ) | |
136 |
|
136 | |||
137 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
137 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
138 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
138 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
139 | # be specifying the version(s) of Mercurial they are tested with, or |
|
139 | # be specifying the version(s) of Mercurial they are tested with, or | |
140 | # leave the attribute unspecified. |
|
140 | # leave the attribute unspecified. | |
141 | testedwith = 'ships-with-hg-core' |
|
141 | testedwith = 'ships-with-hg-core' | |
142 |
|
142 | |||
143 | configtable = {} |
|
143 | configtable = {} | |
144 | configitem = registrar.configitem(configtable) |
|
144 | configitem = registrar.configitem(configtable) | |
145 |
|
145 | |||
146 | configitem('infinitepush', 'server', |
|
146 | configitem('infinitepush', 'server', | |
147 | default=False, |
|
147 | default=False, | |
148 | ) |
|
148 | ) | |
149 | configitem('infinitepush', 'storetype', |
|
149 | configitem('infinitepush', 'storetype', | |
150 | default='', |
|
150 | default='', | |
151 | ) |
|
151 | ) | |
152 | configitem('infinitepush', 'indextype', |
|
152 | configitem('infinitepush', 'indextype', | |
153 | default='', |
|
153 | default='', | |
154 | ) |
|
154 | ) | |
155 | configitem('infinitepush', 'indexpath', |
|
155 | configitem('infinitepush', 'indexpath', | |
156 | default='', |
|
156 | default='', | |
157 | ) |
|
157 | ) | |
158 | configitem('infinitepush', 'storeallparts', |
|
158 | configitem('infinitepush', 'storeallparts', | |
159 | default=False, |
|
159 | default=False, | |
160 | ) |
|
160 | ) | |
161 | configitem('infinitepush', 'reponame', |
|
161 | configitem('infinitepush', 'reponame', | |
162 | default='', |
|
162 | default='', | |
163 | ) |
|
163 | ) | |
164 | configitem('scratchbranch', 'storepath', |
|
164 | configitem('scratchbranch', 'storepath', | |
165 | default='', |
|
165 | default='', | |
166 | ) |
|
166 | ) | |
167 | configitem('infinitepush', 'branchpattern', |
|
167 | configitem('infinitepush', 'branchpattern', | |
168 | default='', |
|
168 | default='', | |
169 | ) |
|
169 | ) | |
170 | configitem('infinitepush', 'pushtobundlestore', |
|
170 | configitem('infinitepush', 'pushtobundlestore', | |
171 | default=False, |
|
171 | default=False, | |
172 | ) |
|
172 | ) | |
173 | configitem('experimental', 'server-bundlestore-bookmark', |
|
173 | configitem('experimental', 'server-bundlestore-bookmark', | |
174 | default='', |
|
174 | default='', | |
175 | ) |
|
175 | ) | |
176 | configitem('experimental', 'infinitepush-scratchpush', |
|
176 | configitem('experimental', 'infinitepush-scratchpush', | |
177 | default=False, |
|
177 | default=False, | |
178 | ) |
|
178 | ) | |
179 |
|
179 | |||
180 | experimental = 'experimental' |
|
180 | experimental = 'experimental' | |
181 | configbookmark = 'server-bundlestore-bookmark' |
|
181 | configbookmark = 'server-bundlestore-bookmark' | |
182 | configscratchpush = 'infinitepush-scratchpush' |
|
182 | configscratchpush = 'infinitepush-scratchpush' | |
183 |
|
183 | |||
184 | scratchbranchparttype = bundleparts.scratchbranchparttype |
|
184 | scratchbranchparttype = bundleparts.scratchbranchparttype | |
185 | revsetpredicate = registrar.revsetpredicate() |
|
185 | revsetpredicate = registrar.revsetpredicate() | |
186 | templatekeyword = registrar.templatekeyword() |
|
186 | templatekeyword = registrar.templatekeyword() | |
187 | _scratchbranchmatcher = lambda x: False |
|
187 | _scratchbranchmatcher = lambda x: False | |
188 | _maybehash = re.compile(r'^[a-f0-9]+$').search |
|
188 | _maybehash = re.compile(r'^[a-f0-9]+$').search | |
189 |
|
189 | |||
190 | def _buildexternalbundlestore(ui): |
|
190 | def _buildexternalbundlestore(ui): | |
191 | put_args = ui.configlist('infinitepush', 'put_args', []) |
|
191 | put_args = ui.configlist('infinitepush', 'put_args', []) | |
192 | put_binary = ui.config('infinitepush', 'put_binary') |
|
192 | put_binary = ui.config('infinitepush', 'put_binary') | |
193 | if not put_binary: |
|
193 | if not put_binary: | |
194 | raise error.Abort('put binary is not specified') |
|
194 | raise error.Abort('put binary is not specified') | |
195 | get_args = ui.configlist('infinitepush', 'get_args', []) |
|
195 | get_args = ui.configlist('infinitepush', 'get_args', []) | |
196 | get_binary = ui.config('infinitepush', 'get_binary') |
|
196 | get_binary = ui.config('infinitepush', 'get_binary') | |
197 | if not get_binary: |
|
197 | if not get_binary: | |
198 | raise error.Abort('get binary is not specified') |
|
198 | raise error.Abort('get binary is not specified') | |
199 | from . import store |
|
199 | from . import store | |
200 | return store.externalbundlestore(put_binary, put_args, get_binary, get_args) |
|
200 | return store.externalbundlestore(put_binary, put_args, get_binary, get_args) | |
201 |
|
201 | |||
202 | def _buildsqlindex(ui): |
|
202 | def _buildsqlindex(ui): | |
203 | sqlhost = ui.config('infinitepush', 'sqlhost') |
|
203 | sqlhost = ui.config('infinitepush', 'sqlhost') | |
204 | if not sqlhost: |
|
204 | if not sqlhost: | |
205 | raise error.Abort(_('please set infinitepush.sqlhost')) |
|
205 | raise error.Abort(_('please set infinitepush.sqlhost')) | |
206 | host, port, db, user, password = sqlhost.split(':') |
|
206 | host, port, db, user, password = sqlhost.split(':') | |
207 | reponame = ui.config('infinitepush', 'reponame') |
|
207 | reponame = ui.config('infinitepush', 'reponame') | |
208 | if not reponame: |
|
208 | if not reponame: | |
209 | raise error.Abort(_('please set infinitepush.reponame')) |
|
209 | raise error.Abort(_('please set infinitepush.reponame')) | |
210 |
|
210 | |||
211 | logfile = ui.config('infinitepush', 'logfile', '') |
|
211 | logfile = ui.config('infinitepush', 'logfile', '') | |
212 | waittimeout = ui.configint('infinitepush', 'waittimeout', 300) |
|
212 | waittimeout = ui.configint('infinitepush', 'waittimeout', 300) | |
213 | locktimeout = ui.configint('infinitepush', 'locktimeout', 120) |
|
213 | locktimeout = ui.configint('infinitepush', 'locktimeout', 120) | |
214 | from . import sqlindexapi |
|
214 | from . import sqlindexapi | |
215 | return sqlindexapi.sqlindexapi( |
|
215 | return sqlindexapi.sqlindexapi( | |
216 | reponame, host, port, db, user, password, |
|
216 | reponame, host, port, db, user, password, | |
217 | logfile, _getloglevel(ui), waittimeout=waittimeout, |
|
217 | logfile, _getloglevel(ui), waittimeout=waittimeout, | |
218 | locktimeout=locktimeout) |
|
218 | locktimeout=locktimeout) | |
219 |
|
219 | |||
220 | def _getloglevel(ui): |
|
220 | def _getloglevel(ui): | |
221 | loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG') |
|
221 | loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG') | |
222 | numeric_loglevel = getattr(logging, loglevel.upper(), None) |
|
222 | numeric_loglevel = getattr(logging, loglevel.upper(), None) | |
223 | if not isinstance(numeric_loglevel, int): |
|
223 | if not isinstance(numeric_loglevel, int): | |
224 | raise error.Abort(_('invalid log level %s') % loglevel) |
|
224 | raise error.Abort(_('invalid log level %s') % loglevel) | |
225 | return numeric_loglevel |
|
225 | return numeric_loglevel | |
226 |
|
226 | |||
227 | def _tryhoist(ui, remotebookmark): |
|
227 | def _tryhoist(ui, remotebookmark): | |
228 | '''returns a bookmarks with hoisted part removed |
|
228 | '''returns a bookmarks with hoisted part removed | |
229 |
|
229 | |||
230 | Remotenames extension has a 'hoist' config that allows to use remote |
|
230 | Remotenames extension has a 'hoist' config that allows to use remote | |
231 | bookmarks without specifying remote path. For example, 'hg update master' |
|
231 | bookmarks without specifying remote path. For example, 'hg update master' | |
232 | works as well as 'hg update remote/master'. We want to allow the same in |
|
232 | works as well as 'hg update remote/master'. We want to allow the same in | |
233 | infinitepush. |
|
233 | infinitepush. | |
234 | ''' |
|
234 | ''' | |
235 |
|
235 | |||
236 | if common.isremotebooksenabled(ui): |
|
236 | if common.isremotebooksenabled(ui): | |
237 | hoist = ui.config('remotenames', 'hoistedpeer') + '/' |
|
237 | hoist = ui.config('remotenames', 'hoistedpeer') + '/' | |
238 | if remotebookmark.startswith(hoist): |
|
238 | if remotebookmark.startswith(hoist): | |
239 | return remotebookmark[len(hoist):] |
|
239 | return remotebookmark[len(hoist):] | |
240 | return remotebookmark |
|
240 | return remotebookmark | |
241 |
|
241 | |||
242 | class bundlestore(object): |
|
242 | class bundlestore(object): | |
243 | def __init__(self, repo): |
|
243 | def __init__(self, repo): | |
244 | self._repo = repo |
|
244 | self._repo = repo | |
245 | storetype = self._repo.ui.config('infinitepush', 'storetype') |
|
245 | storetype = self._repo.ui.config('infinitepush', 'storetype') | |
246 | if storetype == 'disk': |
|
246 | if storetype == 'disk': | |
247 | from . import store |
|
247 | from . import store | |
248 | self.store = store.filebundlestore(self._repo.ui, self._repo) |
|
248 | self.store = store.filebundlestore(self._repo.ui, self._repo) | |
249 | elif storetype == 'external': |
|
249 | elif storetype == 'external': | |
250 | self.store = _buildexternalbundlestore(self._repo.ui) |
|
250 | self.store = _buildexternalbundlestore(self._repo.ui) | |
251 | else: |
|
251 | else: | |
252 | raise error.Abort( |
|
252 | raise error.Abort( | |
253 | _('unknown infinitepush store type specified %s') % storetype) |
|
253 | _('unknown infinitepush store type specified %s') % storetype) | |
254 |
|
254 | |||
255 | indextype = self._repo.ui.config('infinitepush', 'indextype') |
|
255 | indextype = self._repo.ui.config('infinitepush', 'indextype') | |
256 | if indextype == 'disk': |
|
256 | if indextype == 'disk': | |
257 | from . import fileindexapi |
|
257 | from . import fileindexapi | |
258 | self.index = fileindexapi.fileindexapi(self._repo) |
|
258 | self.index = fileindexapi.fileindexapi(self._repo) | |
259 | elif indextype == 'sql': |
|
259 | elif indextype == 'sql': | |
260 | self.index = _buildsqlindex(self._repo.ui) |
|
260 | self.index = _buildsqlindex(self._repo.ui) | |
261 | else: |
|
261 | else: | |
262 | raise error.Abort( |
|
262 | raise error.Abort( | |
263 | _('unknown infinitepush index type specified %s') % indextype) |
|
263 | _('unknown infinitepush index type specified %s') % indextype) | |
264 |
|
264 | |||
265 | def _isserver(ui): |
|
265 | def _isserver(ui): | |
266 | return ui.configbool('infinitepush', 'server') |
|
266 | return ui.configbool('infinitepush', 'server') | |
267 |
|
267 | |||
268 | def reposetup(ui, repo): |
|
268 | def reposetup(ui, repo): | |
269 | if _isserver(ui) and repo.local(): |
|
269 | if _isserver(ui) and repo.local(): | |
270 | repo.bundlestore = bundlestore(repo) |
|
270 | repo.bundlestore = bundlestore(repo) | |
271 |
|
271 | |||
272 | def extsetup(ui): |
|
272 | def extsetup(ui): | |
273 | commonsetup(ui) |
|
273 | commonsetup(ui) | |
274 | if _isserver(ui): |
|
274 | if _isserver(ui): | |
275 | serverextsetup(ui) |
|
275 | serverextsetup(ui) | |
276 | else: |
|
276 | else: | |
277 | clientextsetup(ui) |
|
277 | clientextsetup(ui) | |
278 |
|
278 | |||
279 | def commonsetup(ui): |
|
279 | def commonsetup(ui): | |
280 | wireproto.commands['listkeyspatterns'] = ( |
|
280 | wireproto.commands['listkeyspatterns'] = ( | |
281 | wireprotolistkeyspatterns, 'namespace patterns') |
|
281 | wireprotolistkeyspatterns, 'namespace patterns') | |
282 | scratchbranchpat = ui.config('infinitepush', 'branchpattern') |
|
282 | scratchbranchpat = ui.config('infinitepush', 'branchpattern') | |
283 | if scratchbranchpat: |
|
283 | if scratchbranchpat: | |
284 | global _scratchbranchmatcher |
|
284 | global _scratchbranchmatcher | |
285 | kind, pat, _scratchbranchmatcher = \ |
|
285 | kind, pat, _scratchbranchmatcher = \ | |
286 | stringutil.stringmatcher(scratchbranchpat) |
|
286 | stringutil.stringmatcher(scratchbranchpat) | |
287 |
|
287 | |||
288 | def serverextsetup(ui): |
|
288 | def serverextsetup(ui): | |
289 | origpushkeyhandler = bundle2.parthandlermapping['pushkey'] |
|
289 | origpushkeyhandler = bundle2.parthandlermapping['pushkey'] | |
290 |
|
290 | |||
291 | def newpushkeyhandler(*args, **kwargs): |
|
291 | def newpushkeyhandler(*args, **kwargs): | |
292 | bundle2pushkey(origpushkeyhandler, *args, **kwargs) |
|
292 | bundle2pushkey(origpushkeyhandler, *args, **kwargs) | |
293 | newpushkeyhandler.params = origpushkeyhandler.params |
|
293 | newpushkeyhandler.params = origpushkeyhandler.params | |
294 | bundle2.parthandlermapping['pushkey'] = newpushkeyhandler |
|
294 | bundle2.parthandlermapping['pushkey'] = newpushkeyhandler | |
295 |
|
295 | |||
296 | orighandlephasehandler = bundle2.parthandlermapping['phase-heads'] |
|
296 | orighandlephasehandler = bundle2.parthandlermapping['phase-heads'] | |
297 | newphaseheadshandler = lambda *args, **kwargs: \ |
|
297 | newphaseheadshandler = lambda *args, **kwargs: \ | |
298 | bundle2handlephases(orighandlephasehandler, *args, **kwargs) |
|
298 | bundle2handlephases(orighandlephasehandler, *args, **kwargs) | |
299 | newphaseheadshandler.params = orighandlephasehandler.params |
|
299 | newphaseheadshandler.params = orighandlephasehandler.params | |
300 | bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler |
|
300 | bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler | |
301 |
|
301 | |||
302 | extensions.wrapfunction(localrepo.localrepository, 'listkeys', |
|
302 | extensions.wrapfunction(localrepo.localrepository, 'listkeys', | |
303 | localrepolistkeys) |
|
303 | localrepolistkeys) | |
304 | wireproto.commands['lookup'] = ( |
|
304 | wireproto.commands['lookup'] = ( | |
305 | _lookupwrap(wireproto.commands['lookup'][0]), 'key') |
|
305 | _lookupwrap(wireproto.commands['lookup'][0]), 'key') | |
306 | extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks) |
|
306 | extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks) | |
307 |
|
307 | |||
308 | extensions.wrapfunction(bundle2, 'processparts', processparts) |
|
308 | extensions.wrapfunction(bundle2, 'processparts', processparts) | |
309 |
|
309 | |||
310 | def clientextsetup(ui): |
|
310 | def clientextsetup(ui): | |
311 | entry = extensions.wrapcommand(commands.table, 'push', _push) |
|
311 | entry = extensions.wrapcommand(commands.table, 'push', _push) | |
312 |
|
312 | |||
313 | entry[1].append( |
|
313 | entry[1].append( | |
314 | ('', 'bundle-store', None, |
|
314 | ('', 'bundle-store', None, | |
315 | _('force push to go to bundle store (EXPERIMENTAL)'))) |
|
315 | _('force push to go to bundle store (EXPERIMENTAL)'))) | |
316 |
|
316 | |||
317 | extensions.wrapcommand(commands.table, 'pull', _pull) |
|
317 | extensions.wrapcommand(commands.table, 'pull', _pull) | |
318 |
|
318 | |||
319 | extensions.wrapfunction(discovery, 'checkheads', _checkheads) |
|
319 | extensions.wrapfunction(discovery, 'checkheads', _checkheads) | |
320 |
|
320 | |||
321 | wireproto.wirepeer.listkeyspatterns = listkeyspatterns |
|
321 | wireproto.wirepeer.listkeyspatterns = listkeyspatterns | |
322 |
|
322 | |||
323 | partorder = exchange.b2partsgenorder |
|
323 | partorder = exchange.b2partsgenorder | |
324 | index = partorder.index('changeset') |
|
324 | index = partorder.index('changeset') | |
325 | partorder.insert( |
|
325 | partorder.insert( | |
326 | index, partorder.pop(partorder.index(scratchbranchparttype))) |
|
326 | index, partorder.pop(partorder.index(scratchbranchparttype))) | |
327 |
|
327 | |||
328 | def _checkheads(orig, pushop): |
|
328 | def _checkheads(orig, pushop): | |
329 | if pushop.ui.configbool(experimental, configscratchpush, False): |
|
329 | if pushop.ui.configbool(experimental, configscratchpush, False): | |
330 | return |
|
330 | return | |
331 | return orig(pushop) |
|
331 | return orig(pushop) | |
332 |
|
332 | |||
333 | def wireprotolistkeyspatterns(repo, proto, namespace, patterns): |
|
333 | def wireprotolistkeyspatterns(repo, proto, namespace, patterns): | |
334 | patterns = wireproto.decodelist(patterns) |
|
334 | patterns = wireproto.decodelist(patterns) | |
335 | d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems() |
|
335 | d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems() | |
336 | return pushkey.encodekeys(d) |
|
336 | return pushkey.encodekeys(d) | |
337 |
|
337 | |||
338 | def localrepolistkeys(orig, self, namespace, patterns=None): |
|
338 | def localrepolistkeys(orig, self, namespace, patterns=None): | |
339 | if namespace == 'bookmarks' and patterns: |
|
339 | if namespace == 'bookmarks' and patterns: | |
340 | index = self.bundlestore.index |
|
340 | index = self.bundlestore.index | |
341 | results = {} |
|
341 | results = {} | |
342 | bookmarks = orig(self, namespace) |
|
342 | bookmarks = orig(self, namespace) | |
343 | for pattern in patterns: |
|
343 | for pattern in patterns: | |
344 | results.update(index.getbookmarks(pattern)) |
|
344 | results.update(index.getbookmarks(pattern)) | |
345 | if pattern.endswith('*'): |
|
345 | if pattern.endswith('*'): | |
346 | pattern = 're:^' + pattern[:-1] + '.*' |
|
346 | pattern = 're:^' + pattern[:-1] + '.*' | |
347 | kind, pat, matcher = stringutil.stringmatcher(pattern) |
|
347 | kind, pat, matcher = stringutil.stringmatcher(pattern) | |
348 | for bookmark, node in bookmarks.iteritems(): |
|
348 | for bookmark, node in bookmarks.iteritems(): | |
349 | if matcher(bookmark): |
|
349 | if matcher(bookmark): | |
350 | results[bookmark] = node |
|
350 | results[bookmark] = node | |
351 | return results |
|
351 | return results | |
352 | else: |
|
352 | else: | |
353 | return orig(self, namespace) |
|
353 | return orig(self, namespace) | |
354 |
|
354 | |||
355 | @peer.batchable |
|
355 | @peer.batchable | |
356 | def listkeyspatterns(self, namespace, patterns): |
|
356 | def listkeyspatterns(self, namespace, patterns): | |
357 | if not self.capable('pushkey'): |
|
357 | if not self.capable('pushkey'): | |
358 | yield {}, None |
|
358 | yield {}, None | |
359 | f = peer.future() |
|
359 | f = peer.future() | |
360 | self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' % |
|
360 | self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' % | |
361 | (namespace, patterns)) |
|
361 | (namespace, patterns)) | |
362 | yield { |
|
362 | yield { | |
363 | 'namespace': encoding.fromlocal(namespace), |
|
363 | 'namespace': encoding.fromlocal(namespace), | |
364 | 'patterns': wireproto.encodelist(patterns) |
|
364 | 'patterns': wireproto.encodelist(patterns) | |
365 | }, f |
|
365 | }, f | |
366 | d = f.value |
|
366 | d = f.value | |
367 | self.ui.debug('received listkey for "%s": %i bytes\n' |
|
367 | self.ui.debug('received listkey for "%s": %i bytes\n' | |
368 | % (namespace, len(d))) |
|
368 | % (namespace, len(d))) | |
369 | yield pushkey.decodekeys(d) |
|
369 | yield pushkey.decodekeys(d) | |
370 |
|
370 | |||
371 | def _readbundlerevs(bundlerepo): |
|
371 | def _readbundlerevs(bundlerepo): | |
372 | return list(bundlerepo.revs('bundle()')) |
|
372 | return list(bundlerepo.revs('bundle()')) | |
373 |
|
373 | |||
374 | def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui): |
|
374 | def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui): | |
375 | '''Tells remotefilelog to include all changed files to the changegroup |
|
375 | '''Tells remotefilelog to include all changed files to the changegroup | |
376 |
|
376 | |||
377 | By default remotefilelog doesn't include file content to the changegroup. |
|
377 | By default remotefilelog doesn't include file content to the changegroup. | |
378 | But we need to include it if we are fetching from bundlestore. |
|
378 | But we need to include it if we are fetching from bundlestore. | |
379 | ''' |
|
379 | ''' | |
380 | changedfiles = set() |
|
380 | changedfiles = set() | |
381 | cl = bundlerepo.changelog |
|
381 | cl = bundlerepo.changelog | |
382 | for r in bundlerevs: |
|
382 | for r in bundlerevs: | |
383 | # [3] means changed files |
|
383 | # [3] means changed files | |
384 | changedfiles.update(cl.read(r)[3]) |
|
384 | changedfiles.update(cl.read(r)[3]) | |
385 | if not changedfiles: |
|
385 | if not changedfiles: | |
386 | return bundlecaps |
|
386 | return bundlecaps | |
387 |
|
387 | |||
388 | changedfiles = '\0'.join(changedfiles) |
|
388 | changedfiles = '\0'.join(changedfiles) | |
389 | newcaps = [] |
|
389 | newcaps = [] | |
390 | appended = False |
|
390 | appended = False | |
391 | for cap in (bundlecaps or []): |
|
391 | for cap in (bundlecaps or []): | |
392 | if cap.startswith('excludepattern='): |
|
392 | if cap.startswith('excludepattern='): | |
393 | newcaps.append('\0'.join((cap, changedfiles))) |
|
393 | newcaps.append('\0'.join((cap, changedfiles))) | |
394 | appended = True |
|
394 | appended = True | |
395 | else: |
|
395 | else: | |
396 | newcaps.append(cap) |
|
396 | newcaps.append(cap) | |
397 | if not appended: |
|
397 | if not appended: | |
398 | # Not found excludepattern cap. Just append it |
|
398 | # Not found excludepattern cap. Just append it | |
399 | newcaps.append('excludepattern=' + changedfiles) |
|
399 | newcaps.append('excludepattern=' + changedfiles) | |
400 |
|
400 | |||
401 | return newcaps |
|
401 | return newcaps | |
402 |
|
402 | |||
403 | def _rebundle(bundlerepo, bundleroots, unknownhead): |
|
403 | def _rebundle(bundlerepo, bundleroots, unknownhead): | |
404 | ''' |
|
404 | ''' | |
405 | Bundle may include more revision then user requested. For example, |
|
405 | Bundle may include more revision then user requested. For example, | |
406 | if user asks for revision but bundle also consists its descendants. |
|
406 | if user asks for revision but bundle also consists its descendants. | |
407 | This function will filter out all revision that user is not requested. |
|
407 | This function will filter out all revision that user is not requested. | |
408 | ''' |
|
408 | ''' | |
409 | parts = [] |
|
409 | parts = [] | |
410 |
|
410 | |||
411 | version = '02' |
|
411 | version = '02' | |
412 | outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots, |
|
412 | outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots, | |
413 | missingheads=[unknownhead]) |
|
413 | missingheads=[unknownhead]) | |
414 | cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull') |
|
414 | cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull') | |
415 | cgstream = util.chunkbuffer(cgstream).read() |
|
415 | cgstream = util.chunkbuffer(cgstream).read() | |
416 | cgpart = bundle2.bundlepart('changegroup', data=cgstream) |
|
416 | cgpart = bundle2.bundlepart('changegroup', data=cgstream) | |
417 | cgpart.addparam('version', version) |
|
417 | cgpart.addparam('version', version) | |
418 | parts.append(cgpart) |
|
418 | parts.append(cgpart) | |
419 |
|
419 | |||
420 | return parts |
|
420 | return parts | |
421 |
|
421 | |||
422 | def _getbundleroots(oldrepo, bundlerepo, bundlerevs): |
|
422 | def _getbundleroots(oldrepo, bundlerepo, bundlerevs): | |
423 | cl = bundlerepo.changelog |
|
423 | cl = bundlerepo.changelog | |
424 | bundleroots = [] |
|
424 | bundleroots = [] | |
425 | for rev in bundlerevs: |
|
425 | for rev in bundlerevs: | |
426 | node = cl.node(rev) |
|
426 | node = cl.node(rev) | |
427 | parents = cl.parents(node) |
|
427 | parents = cl.parents(node) | |
428 | for parent in parents: |
|
428 | for parent in parents: | |
429 | # include all revs that exist in the main repo |
|
429 | # include all revs that exist in the main repo | |
430 | # to make sure that bundle may apply client-side |
|
430 | # to make sure that bundle may apply client-side | |
431 | if parent in oldrepo: |
|
431 | if parent in oldrepo: | |
432 | bundleroots.append(parent) |
|
432 | bundleroots.append(parent) | |
433 | return bundleroots |
|
433 | return bundleroots | |
434 |
|
434 | |||
435 | def _needsrebundling(head, bundlerepo): |
|
435 | def _needsrebundling(head, bundlerepo): | |
436 | bundleheads = list(bundlerepo.revs('heads(bundle())')) |
|
436 | bundleheads = list(bundlerepo.revs('heads(bundle())')) | |
437 | return not (len(bundleheads) == 1 and |
|
437 | return not (len(bundleheads) == 1 and | |
438 | bundlerepo[bundleheads[0]].node() == head) |
|
438 | bundlerepo[bundleheads[0]].node() == head) | |
439 |
|
439 | |||
440 | def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): |
|
440 | def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): | |
441 | '''generates bundle that will be send to the user |
|
441 | '''generates bundle that will be send to the user | |
442 |
|
442 | |||
443 | returns tuple with raw bundle string and bundle type |
|
443 | returns tuple with raw bundle string and bundle type | |
444 | ''' |
|
444 | ''' | |
445 | parts = [] |
|
445 | parts = [] | |
446 | if not _needsrebundling(head, bundlerepo): |
|
446 | if not _needsrebundling(head, bundlerepo): | |
447 | with util.posixfile(bundlefile, "rb") as f: |
|
447 | with util.posixfile(bundlefile, "rb") as f: | |
448 | unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) |
|
448 | unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) | |
449 | if isinstance(unbundler, changegroup.cg1unpacker): |
|
449 | if isinstance(unbundler, changegroup.cg1unpacker): | |
450 | part = bundle2.bundlepart('changegroup', |
|
450 | part = bundle2.bundlepart('changegroup', | |
451 | data=unbundler._stream.read()) |
|
451 | data=unbundler._stream.read()) | |
452 | part.addparam('version', '01') |
|
452 | part.addparam('version', '01') | |
453 | parts.append(part) |
|
453 | parts.append(part) | |
454 | elif isinstance(unbundler, bundle2.unbundle20): |
|
454 | elif isinstance(unbundler, bundle2.unbundle20): | |
455 | haschangegroup = False |
|
455 | haschangegroup = False | |
456 | for part in unbundler.iterparts(): |
|
456 | for part in unbundler.iterparts(): | |
457 | if part.type == 'changegroup': |
|
457 | if part.type == 'changegroup': | |
458 | haschangegroup = True |
|
458 | haschangegroup = True | |
459 | newpart = bundle2.bundlepart(part.type, data=part.read()) |
|
459 | newpart = bundle2.bundlepart(part.type, data=part.read()) | |
460 | for key, value in part.params.iteritems(): |
|
460 | for key, value in part.params.iteritems(): | |
461 | newpart.addparam(key, value) |
|
461 | newpart.addparam(key, value) | |
462 | parts.append(newpart) |
|
462 | parts.append(newpart) | |
463 |
|
463 | |||
464 | if not haschangegroup: |
|
464 | if not haschangegroup: | |
465 | raise error.Abort( |
|
465 | raise error.Abort( | |
466 | 'unexpected bundle without changegroup part, ' + |
|
466 | 'unexpected bundle without changegroup part, ' + | |
467 | 'head: %s' % hex(head), |
|
467 | 'head: %s' % hex(head), | |
468 | hint='report to administrator') |
|
468 | hint='report to administrator') | |
469 | else: |
|
469 | else: | |
470 | raise error.Abort('unknown bundle type') |
|
470 | raise error.Abort('unknown bundle type') | |
471 | else: |
|
471 | else: | |
472 | parts = _rebundle(bundlerepo, bundleroots, head) |
|
472 | parts = _rebundle(bundlerepo, bundleroots, head) | |
473 |
|
473 | |||
474 | return parts |
|
474 | return parts | |
475 |
|
475 | |||
476 | def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs): |
|
476 | def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs): | |
477 | heads = heads or [] |
|
477 | heads = heads or [] | |
478 | # newheads are parents of roots of scratch bundles that were requested |
|
478 | # newheads are parents of roots of scratch bundles that were requested | |
479 | newphases = {} |
|
479 | newphases = {} | |
480 | scratchbundles = [] |
|
480 | scratchbundles = [] | |
481 | newheads = [] |
|
481 | newheads = [] | |
482 | scratchheads = [] |
|
482 | scratchheads = [] | |
483 | nodestobundle = {} |
|
483 | nodestobundle = {} | |
484 | allbundlestocleanup = [] |
|
484 | allbundlestocleanup = [] | |
485 | try: |
|
485 | try: | |
486 | for head in heads: |
|
486 | for head in heads: | |
487 | if head not in repo.changelog.nodemap: |
|
487 | if head not in repo.changelog.nodemap: | |
488 | if head not in nodestobundle: |
|
488 | if head not in nodestobundle: | |
489 | newbundlefile = common.downloadbundle(repo, head) |
|
489 | newbundlefile = common.downloadbundle(repo, head) | |
490 | bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile) |
|
490 | bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile) | |
491 | bundlerepo = hg.repository(repo.ui, bundlepath) |
|
491 | bundlerepo = hg.repository(repo.ui, bundlepath) | |
492 |
|
492 | |||
493 | allbundlestocleanup.append((bundlerepo, newbundlefile)) |
|
493 | allbundlestocleanup.append((bundlerepo, newbundlefile)) | |
494 | bundlerevs = set(_readbundlerevs(bundlerepo)) |
|
494 | bundlerevs = set(_readbundlerevs(bundlerepo)) | |
495 | bundlecaps = _includefilelogstobundle( |
|
495 | bundlecaps = _includefilelogstobundle( | |
496 | bundlecaps, bundlerepo, bundlerevs, repo.ui) |
|
496 | bundlecaps, bundlerepo, bundlerevs, repo.ui) | |
497 | cl = bundlerepo.changelog |
|
497 | cl = bundlerepo.changelog | |
498 | bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs) |
|
498 | bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs) | |
499 | for rev in bundlerevs: |
|
499 | for rev in bundlerevs: | |
500 | node = cl.node(rev) |
|
500 | node = cl.node(rev) | |
501 | newphases[hex(node)] = str(phases.draft) |
|
501 | newphases[hex(node)] = str(phases.draft) | |
502 | nodestobundle[node] = (bundlerepo, bundleroots, |
|
502 | nodestobundle[node] = (bundlerepo, bundleroots, | |
503 | newbundlefile) |
|
503 | newbundlefile) | |
504 |
|
504 | |||
505 | scratchbundles.append( |
|
505 | scratchbundles.append( | |
506 | _generateoutputparts(head, *nodestobundle[head])) |
|
506 | _generateoutputparts(head, *nodestobundle[head])) | |
507 | newheads.extend(bundleroots) |
|
507 | newheads.extend(bundleroots) | |
508 | scratchheads.append(head) |
|
508 | scratchheads.append(head) | |
509 | finally: |
|
509 | finally: | |
510 | for bundlerepo, bundlefile in allbundlestocleanup: |
|
510 | for bundlerepo, bundlefile in allbundlestocleanup: | |
511 | bundlerepo.close() |
|
511 | bundlerepo.close() | |
512 | try: |
|
512 | try: | |
513 | os.unlink(bundlefile) |
|
513 | os.unlink(bundlefile) | |
514 | except (IOError, OSError): |
|
514 | except (IOError, OSError): | |
515 | # if we can't cleanup the file then just ignore the error, |
|
515 | # if we can't cleanup the file then just ignore the error, | |
516 | # no need to fail |
|
516 | # no need to fail | |
517 | pass |
|
517 | pass | |
518 |
|
518 | |||
519 | pullfrombundlestore = bool(scratchbundles) |
|
519 | pullfrombundlestore = bool(scratchbundles) | |
520 | wrappedchangegrouppart = False |
|
520 | wrappedchangegrouppart = False | |
521 | wrappedlistkeys = False |
|
521 | wrappedlistkeys = False | |
522 | oldchangegrouppart = exchange.getbundle2partsmapping['changegroup'] |
|
522 | oldchangegrouppart = exchange.getbundle2partsmapping['changegroup'] | |
523 | try: |
|
523 | try: | |
524 | def _changegrouppart(bundler, *args, **kwargs): |
|
524 | def _changegrouppart(bundler, *args, **kwargs): | |
525 | # Order is important here. First add non-scratch part |
|
525 | # Order is important here. First add non-scratch part | |
526 | # and only then add parts with scratch bundles because |
|
526 | # and only then add parts with scratch bundles because | |
527 | # non-scratch part contains parents of roots of scratch bundles. |
|
527 | # non-scratch part contains parents of roots of scratch bundles. | |
528 | result = oldchangegrouppart(bundler, *args, **kwargs) |
|
528 | result = oldchangegrouppart(bundler, *args, **kwargs) | |
529 | for bundle in scratchbundles: |
|
529 | for bundle in scratchbundles: | |
530 | for part in bundle: |
|
530 | for part in bundle: | |
531 | bundler.addpart(part) |
|
531 | bundler.addpart(part) | |
532 | return result |
|
532 | return result | |
533 |
|
533 | |||
534 | exchange.getbundle2partsmapping['changegroup'] = _changegrouppart |
|
534 | exchange.getbundle2partsmapping['changegroup'] = _changegrouppart | |
535 | wrappedchangegrouppart = True |
|
535 | wrappedchangegrouppart = True | |
536 |
|
536 | |||
537 | def _listkeys(orig, self, namespace): |
|
537 | def _listkeys(orig, self, namespace): | |
538 | origvalues = orig(self, namespace) |
|
538 | origvalues = orig(self, namespace) | |
539 | if namespace == 'phases' and pullfrombundlestore: |
|
539 | if namespace == 'phases' and pullfrombundlestore: | |
540 | if origvalues.get('publishing') == 'True': |
|
540 | if origvalues.get('publishing') == 'True': | |
541 | # Make repo non-publishing to preserve draft phase |
|
541 | # Make repo non-publishing to preserve draft phase | |
542 | del origvalues['publishing'] |
|
542 | del origvalues['publishing'] | |
543 | origvalues.update(newphases) |
|
543 | origvalues.update(newphases) | |
544 | return origvalues |
|
544 | return origvalues | |
545 |
|
545 | |||
546 | extensions.wrapfunction(localrepo.localrepository, 'listkeys', |
|
546 | extensions.wrapfunction(localrepo.localrepository, 'listkeys', | |
547 | _listkeys) |
|
547 | _listkeys) | |
548 | wrappedlistkeys = True |
|
548 | wrappedlistkeys = True | |
549 | heads = list((set(newheads) | set(heads)) - set(scratchheads)) |
|
549 | heads = list((set(newheads) | set(heads)) - set(scratchheads)) | |
550 | result = orig(repo, source, heads=heads, |
|
550 | result = orig(repo, source, heads=heads, | |
551 | bundlecaps=bundlecaps, **kwargs) |
|
551 | bundlecaps=bundlecaps, **kwargs) | |
552 | finally: |
|
552 | finally: | |
553 | if wrappedchangegrouppart: |
|
553 | if wrappedchangegrouppart: | |
554 | exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart |
|
554 | exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart | |
555 | if wrappedlistkeys: |
|
555 | if wrappedlistkeys: | |
556 | extensions.unwrapfunction(localrepo.localrepository, 'listkeys', |
|
556 | extensions.unwrapfunction(localrepo.localrepository, 'listkeys', | |
557 | _listkeys) |
|
557 | _listkeys) | |
558 | return result |
|
558 | return result | |
559 |
|
559 | |||
560 | def _lookupwrap(orig): |
|
560 | def _lookupwrap(orig): | |
561 | def _lookup(repo, proto, key): |
|
561 | def _lookup(repo, proto, key): | |
562 | localkey = encoding.tolocal(key) |
|
562 | localkey = encoding.tolocal(key) | |
563 |
|
563 | |||
564 | if isinstance(localkey, str) and _scratchbranchmatcher(localkey): |
|
564 | if isinstance(localkey, str) and _scratchbranchmatcher(localkey): | |
565 | scratchnode = repo.bundlestore.index.getnode(localkey) |
|
565 | scratchnode = repo.bundlestore.index.getnode(localkey) | |
566 | if scratchnode: |
|
566 | if scratchnode: | |
567 | return "%s %s\n" % (1, scratchnode) |
|
567 | return "%s %s\n" % (1, scratchnode) | |
568 | else: |
|
568 | else: | |
569 | return "%s %s\n" % (0, 'scratch branch %s not found' % localkey) |
|
569 | return "%s %s\n" % (0, 'scratch branch %s not found' % localkey) | |
570 | else: |
|
570 | else: | |
571 | try: |
|
571 | try: | |
572 | r = hex(repo.lookup(localkey)) |
|
572 | r = hex(repo.lookup(localkey)) | |
573 | return "%s %s\n" % (1, r) |
|
573 | return "%s %s\n" % (1, r) | |
574 | except Exception as inst: |
|
574 | except Exception as inst: | |
575 | if repo.bundlestore.index.getbundle(localkey): |
|
575 | if repo.bundlestore.index.getbundle(localkey): | |
576 | return "%s %s\n" % (1, localkey) |
|
576 | return "%s %s\n" % (1, localkey) | |
577 | else: |
|
577 | else: | |
578 | r = str(inst) |
|
578 | r = str(inst) | |
579 | return "%s %s\n" % (0, r) |
|
579 | return "%s %s\n" % (0, r) | |
580 | return _lookup |
|
580 | return _lookup | |
581 |
|
581 | |||
582 | def _pull(orig, ui, repo, source="default", **opts): |
|
582 | def _pull(orig, ui, repo, source="default", **opts): | |
583 | opts = pycompat.byteskwargs(opts) |
|
583 | opts = pycompat.byteskwargs(opts) | |
584 | # Copy paste from `pull` command |
|
584 | # Copy paste from `pull` command | |
585 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) |
|
585 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) | |
586 |
|
586 | |||
587 | scratchbookmarks = {} |
|
587 | scratchbookmarks = {} | |
588 | unfi = repo.unfiltered() |
|
588 | unfi = repo.unfiltered() | |
589 | unknownnodes = [] |
|
589 | unknownnodes = [] | |
590 | for rev in opts.get('rev', []): |
|
590 | for rev in opts.get('rev', []): | |
591 | if rev not in unfi: |
|
591 | if rev not in unfi: | |
592 | unknownnodes.append(rev) |
|
592 | unknownnodes.append(rev) | |
593 | if opts.get('bookmark'): |
|
593 | if opts.get('bookmark'): | |
594 | bookmarks = [] |
|
594 | bookmarks = [] | |
595 | revs = opts.get('rev') or [] |
|
595 | revs = opts.get('rev') or [] | |
596 | for bookmark in opts.get('bookmark'): |
|
596 | for bookmark in opts.get('bookmark'): | |
597 | if _scratchbranchmatcher(bookmark): |
|
597 | if _scratchbranchmatcher(bookmark): | |
598 | # rev is not known yet |
|
598 | # rev is not known yet | |
599 | # it will be fetched with listkeyspatterns next |
|
599 | # it will be fetched with listkeyspatterns next | |
600 | scratchbookmarks[bookmark] = 'REVTOFETCH' |
|
600 | scratchbookmarks[bookmark] = 'REVTOFETCH' | |
601 | else: |
|
601 | else: | |
602 | bookmarks.append(bookmark) |
|
602 | bookmarks.append(bookmark) | |
603 |
|
603 | |||
604 | if scratchbookmarks: |
|
604 | if scratchbookmarks: | |
605 | other = hg.peer(repo, opts, source) |
|
605 | other = hg.peer(repo, opts, source) | |
606 | fetchedbookmarks = other.listkeyspatterns( |
|
606 | fetchedbookmarks = other.listkeyspatterns( | |
607 | 'bookmarks', patterns=scratchbookmarks) |
|
607 | 'bookmarks', patterns=scratchbookmarks) | |
608 | for bookmark in scratchbookmarks: |
|
608 | for bookmark in scratchbookmarks: | |
609 | if bookmark not in fetchedbookmarks: |
|
609 | if bookmark not in fetchedbookmarks: | |
610 | raise error.Abort('remote bookmark %s not found!' % |
|
610 | raise error.Abort('remote bookmark %s not found!' % | |
611 | bookmark) |
|
611 | bookmark) | |
612 | scratchbookmarks[bookmark] = fetchedbookmarks[bookmark] |
|
612 | scratchbookmarks[bookmark] = fetchedbookmarks[bookmark] | |
613 | revs.append(fetchedbookmarks[bookmark]) |
|
613 | revs.append(fetchedbookmarks[bookmark]) | |
614 | opts['bookmark'] = bookmarks |
|
614 | opts['bookmark'] = bookmarks | |
615 | opts['rev'] = revs |
|
615 | opts['rev'] = revs | |
616 |
|
616 | |||
617 | if scratchbookmarks or unknownnodes: |
|
617 | if scratchbookmarks or unknownnodes: | |
618 | # Set anyincoming to True |
|
618 | # Set anyincoming to True | |
619 | extensions.wrapfunction(discovery, 'findcommonincoming', |
|
619 | extensions.wrapfunction(discovery, 'findcommonincoming', | |
620 | _findcommonincoming) |
|
620 | _findcommonincoming) | |
621 | try: |
|
621 | try: | |
622 | # Remote scratch bookmarks will be deleted because remotenames doesn't |
|
622 | # Remote scratch bookmarks will be deleted because remotenames doesn't | |
623 | # know about them. Let's save it before pull and restore after |
|
623 | # know about them. Let's save it before pull and restore after | |
624 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source) |
|
624 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source) | |
625 | result = orig(ui, repo, source, **pycompat.strkwargs(opts)) |
|
625 | result = orig(ui, repo, source, **pycompat.strkwargs(opts)) | |
626 | # TODO(stash): race condition is possible |
|
626 | # TODO(stash): race condition is possible | |
627 | # if scratch bookmarks was updated right after orig. |
|
627 | # if scratch bookmarks was updated right after orig. | |
628 | # But that's unlikely and shouldn't be harmful. |
|
628 | # But that's unlikely and shouldn't be harmful. | |
629 | if common.isremotebooksenabled(ui): |
|
629 | if common.isremotebooksenabled(ui): | |
630 | remotescratchbookmarks.update(scratchbookmarks) |
|
630 | remotescratchbookmarks.update(scratchbookmarks) | |
631 | _saveremotebookmarks(repo, remotescratchbookmarks, source) |
|
631 | _saveremotebookmarks(repo, remotescratchbookmarks, source) | |
632 | else: |
|
632 | else: | |
633 | _savelocalbookmarks(repo, scratchbookmarks) |
|
633 | _savelocalbookmarks(repo, scratchbookmarks) | |
634 | return result |
|
634 | return result | |
635 | finally: |
|
635 | finally: | |
636 | if scratchbookmarks: |
|
636 | if scratchbookmarks: | |
637 | extensions.unwrapfunction(discovery, 'findcommonincoming') |
|
637 | extensions.unwrapfunction(discovery, 'findcommonincoming') | |
638 |
|
638 | |||
639 | def _readscratchremotebookmarks(ui, repo, other): |
|
639 | def _readscratchremotebookmarks(ui, repo, other): | |
640 | if common.isremotebooksenabled(ui): |
|
640 | if common.isremotebooksenabled(ui): | |
641 | remotenamesext = extensions.find('remotenames') |
|
641 | remotenamesext = extensions.find('remotenames') | |
642 | remotepath = remotenamesext.activepath(repo.ui, other) |
|
642 | remotepath = remotenamesext.activepath(repo.ui, other) | |
643 | result = {} |
|
643 | result = {} | |
644 | # Let's refresh remotenames to make sure we have it up to date |
|
644 | # Let's refresh remotenames to make sure we have it up to date | |
645 | # Seems that `repo.names['remotebookmarks']` may return stale bookmarks |
|
645 | # Seems that `repo.names['remotebookmarks']` may return stale bookmarks | |
646 | # and it results in deleting scratch bookmarks. Our best guess how to |
|
646 | # and it results in deleting scratch bookmarks. Our best guess how to | |
647 | # fix it is to use `clearnames()` |
|
647 | # fix it is to use `clearnames()` | |
648 | repo._remotenames.clearnames() |
|
648 | repo._remotenames.clearnames() | |
649 | for remotebookmark in repo.names['remotebookmarks'].listnames(repo): |
|
649 | for remotebookmark in repo.names['remotebookmarks'].listnames(repo): | |
650 | path, bookname = remotenamesext.splitremotename(remotebookmark) |
|
650 | path, bookname = remotenamesext.splitremotename(remotebookmark) | |
651 | if path == remotepath and _scratchbranchmatcher(bookname): |
|
651 | if path == remotepath and _scratchbranchmatcher(bookname): | |
652 | nodes = repo.names['remotebookmarks'].nodes(repo, |
|
652 | nodes = repo.names['remotebookmarks'].nodes(repo, | |
653 | remotebookmark) |
|
653 | remotebookmark) | |
654 | if nodes: |
|
654 | if nodes: | |
655 | result[bookname] = hex(nodes[0]) |
|
655 | result[bookname] = hex(nodes[0]) | |
656 | return result |
|
656 | return result | |
657 | else: |
|
657 | else: | |
658 | return {} |
|
658 | return {} | |
659 |
|
659 | |||
660 | def _saveremotebookmarks(repo, newbookmarks, remote): |
|
660 | def _saveremotebookmarks(repo, newbookmarks, remote): | |
661 | remotenamesext = extensions.find('remotenames') |
|
661 | remotenamesext = extensions.find('remotenames') | |
662 | remotepath = remotenamesext.activepath(repo.ui, remote) |
|
662 | remotepath = remotenamesext.activepath(repo.ui, remote) | |
663 | branches = collections.defaultdict(list) |
|
663 | branches = collections.defaultdict(list) | |
664 | bookmarks = {} |
|
664 | bookmarks = {} | |
665 | remotenames = remotenamesext.readremotenames(repo) |
|
665 | remotenames = remotenamesext.readremotenames(repo) | |
666 | for hexnode, nametype, remote, rname in remotenames: |
|
666 | for hexnode, nametype, remote, rname in remotenames: | |
667 | if remote != remotepath: |
|
667 | if remote != remotepath: | |
668 | continue |
|
668 | continue | |
669 | if nametype == 'bookmarks': |
|
669 | if nametype == 'bookmarks': | |
670 | if rname in newbookmarks: |
|
670 | if rname in newbookmarks: | |
671 | # It's possible if we have a normal bookmark that matches |
|
671 | # It's possible if we have a normal bookmark that matches | |
672 | # scratch branch pattern. In this case just use the current |
|
672 | # scratch branch pattern. In this case just use the current | |
673 | # bookmark node |
|
673 | # bookmark node | |
674 | del newbookmarks[rname] |
|
674 | del newbookmarks[rname] | |
675 | bookmarks[rname] = hexnode |
|
675 | bookmarks[rname] = hexnode | |
676 | elif nametype == 'branches': |
|
676 | elif nametype == 'branches': | |
677 | # saveremotenames expects 20 byte binary nodes for branches |
|
677 | # saveremotenames expects 20 byte binary nodes for branches | |
678 | branches[rname].append(bin(hexnode)) |
|
678 | branches[rname].append(bin(hexnode)) | |
679 |
|
679 | |||
680 | for bookmark, hexnode in newbookmarks.iteritems(): |
|
680 | for bookmark, hexnode in newbookmarks.iteritems(): | |
681 | bookmarks[bookmark] = hexnode |
|
681 | bookmarks[bookmark] = hexnode | |
682 | remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks) |
|
682 | remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks) | |
683 |
|
683 | |||
684 | def _savelocalbookmarks(repo, bookmarks): |
|
684 | def _savelocalbookmarks(repo, bookmarks): | |
685 | if not bookmarks: |
|
685 | if not bookmarks: | |
686 | return |
|
686 | return | |
687 | with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr: |
|
687 | with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr: | |
688 | changes = [] |
|
688 | changes = [] | |
689 | for scratchbook, node in bookmarks.iteritems(): |
|
689 | for scratchbook, node in bookmarks.iteritems(): | |
690 | changectx = repo[node] |
|
690 | changectx = repo[node] | |
691 | changes.append((scratchbook, changectx.node())) |
|
691 | changes.append((scratchbook, changectx.node())) | |
692 | repo._bookmarks.applychanges(repo, tr, changes) |
|
692 | repo._bookmarks.applychanges(repo, tr, changes) | |
693 |
|
693 | |||
694 | def _findcommonincoming(orig, *args, **kwargs): |
|
694 | def _findcommonincoming(orig, *args, **kwargs): | |
695 | common, inc, remoteheads = orig(*args, **kwargs) |
|
695 | common, inc, remoteheads = orig(*args, **kwargs) | |
696 | return common, True, remoteheads |
|
696 | return common, True, remoteheads | |
697 |
|
697 | |||
698 | def _push(orig, ui, repo, dest=None, *args, **opts): |
|
698 | def _push(orig, ui, repo, dest=None, *args, **opts): | |
699 |
|
699 | |||
700 | bookmark = opts.get('bookmark') |
|
700 | bookmark = opts.get(r'bookmark') | |
701 | # we only support pushing one infinitepush bookmark at once |
|
701 | # we only support pushing one infinitepush bookmark at once | |
702 | if len(bookmark) == 1: |
|
702 | if len(bookmark) == 1: | |
703 | bookmark = bookmark[0] |
|
703 | bookmark = bookmark[0] | |
704 | else: |
|
704 | else: | |
705 | bookmark = '' |
|
705 | bookmark = '' | |
706 |
|
706 | |||
707 | oldphasemove = None |
|
707 | oldphasemove = None | |
708 | overrides = {(experimental, configbookmark): bookmark} |
|
708 | overrides = {(experimental, configbookmark): bookmark} | |
709 |
|
709 | |||
710 | with ui.configoverride(overrides, 'infinitepush'): |
|
710 | with ui.configoverride(overrides, 'infinitepush'): | |
711 | scratchpush = opts.get('bundle_store') |
|
711 | scratchpush = opts.get('bundle_store') | |
712 | if _scratchbranchmatcher(bookmark): |
|
712 | if _scratchbranchmatcher(bookmark): | |
713 | scratchpush = True |
|
713 | scratchpush = True | |
714 | # bundle2 can be sent back after push (for example, bundle2 |
|
714 | # bundle2 can be sent back after push (for example, bundle2 | |
715 | # containing `pushkey` part to update bookmarks) |
|
715 | # containing `pushkey` part to update bookmarks) | |
716 | ui.setconfig(experimental, 'bundle2.pushback', True) |
|
716 | ui.setconfig(experimental, 'bundle2.pushback', True) | |
717 |
|
717 | |||
718 | if scratchpush: |
|
718 | if scratchpush: | |
719 | # this is an infinitepush, we don't want the bookmark to be applied |
|
719 | # this is an infinitepush, we don't want the bookmark to be applied | |
720 | # rather that should be stored in the bundlestore |
|
720 | # rather that should be stored in the bundlestore | |
721 | opts['bookmark'] = [] |
|
721 | opts[r'bookmark'] = [] | |
722 | ui.setconfig(experimental, configscratchpush, True) |
|
722 | ui.setconfig(experimental, configscratchpush, True) | |
723 | oldphasemove = extensions.wrapfunction(exchange, |
|
723 | oldphasemove = extensions.wrapfunction(exchange, | |
724 | '_localphasemove', |
|
724 | '_localphasemove', | |
725 | _phasemove) |
|
725 | _phasemove) | |
726 | # Copy-paste from `push` command |
|
726 | # Copy-paste from `push` command | |
727 | path = ui.paths.getpath(dest, default=('default-push', 'default')) |
|
727 | path = ui.paths.getpath(dest, default=('default-push', 'default')) | |
728 | if not path: |
|
728 | if not path: | |
729 | raise error.Abort(_('default repository not configured!'), |
|
729 | raise error.Abort(_('default repository not configured!'), | |
730 | hint=_("see 'hg help config.paths'")) |
|
730 | hint=_("see 'hg help config.paths'")) | |
731 | destpath = path.pushloc or path.loc |
|
731 | destpath = path.pushloc or path.loc | |
732 | # Remote scratch bookmarks will be deleted because remotenames doesn't |
|
732 | # Remote scratch bookmarks will be deleted because remotenames doesn't | |
733 | # know about them. Let's save it before push and restore after |
|
733 | # know about them. Let's save it before push and restore after | |
734 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath) |
|
734 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath) | |
735 | result = orig(ui, repo, dest, *args, **opts) |
|
735 | result = orig(ui, repo, dest, *args, **opts) | |
736 | if common.isremotebooksenabled(ui): |
|
736 | if common.isremotebooksenabled(ui): | |
737 | if bookmark and scratchpush: |
|
737 | if bookmark and scratchpush: | |
738 | other = hg.peer(repo, opts, destpath) |
|
738 | other = hg.peer(repo, opts, destpath) | |
739 | fetchedbookmarks = other.listkeyspatterns('bookmarks', |
|
739 | fetchedbookmarks = other.listkeyspatterns('bookmarks', | |
740 | patterns=[bookmark]) |
|
740 | patterns=[bookmark]) | |
741 | remotescratchbookmarks.update(fetchedbookmarks) |
|
741 | remotescratchbookmarks.update(fetchedbookmarks) | |
742 | _saveremotebookmarks(repo, remotescratchbookmarks, destpath) |
|
742 | _saveremotebookmarks(repo, remotescratchbookmarks, destpath) | |
743 | if oldphasemove: |
|
743 | if oldphasemove: | |
744 | exchange._localphasemove = oldphasemove |
|
744 | exchange._localphasemove = oldphasemove | |
745 | return result |
|
745 | return result | |
746 |
|
746 | |||
747 | def _deleteinfinitepushbookmarks(ui, repo, path, names): |
|
747 | def _deleteinfinitepushbookmarks(ui, repo, path, names): | |
748 | """Prune remote names by removing the bookmarks we don't want anymore, |
|
748 | """Prune remote names by removing the bookmarks we don't want anymore, | |
749 | then writing the result back to disk |
|
749 | then writing the result back to disk | |
750 | """ |
|
750 | """ | |
751 | remotenamesext = extensions.find('remotenames') |
|
751 | remotenamesext = extensions.find('remotenames') | |
752 |
|
752 | |||
753 | # remotename format is: |
|
753 | # remotename format is: | |
754 | # (node, nametype ("branches" or "bookmarks"), remote, name) |
|
754 | # (node, nametype ("branches" or "bookmarks"), remote, name) | |
755 | nametype_idx = 1 |
|
755 | nametype_idx = 1 | |
756 | remote_idx = 2 |
|
756 | remote_idx = 2 | |
757 | name_idx = 3 |
|
757 | name_idx = 3 | |
758 | remotenames = [remotename for remotename in \ |
|
758 | remotenames = [remotename for remotename in \ | |
759 | remotenamesext.readremotenames(repo) \ |
|
759 | remotenamesext.readremotenames(repo) \ | |
760 | if remotename[remote_idx] == path] |
|
760 | if remotename[remote_idx] == path] | |
761 | remote_bm_names = [remotename[name_idx] for remotename in \ |
|
761 | remote_bm_names = [remotename[name_idx] for remotename in \ | |
762 | remotenames if remotename[nametype_idx] == "bookmarks"] |
|
762 | remotenames if remotename[nametype_idx] == "bookmarks"] | |
763 |
|
763 | |||
764 | for name in names: |
|
764 | for name in names: | |
765 | if name not in remote_bm_names: |
|
765 | if name not in remote_bm_names: | |
766 | raise error.Abort(_("infinitepush bookmark '{}' does not exist " |
|
766 | raise error.Abort(_("infinitepush bookmark '{}' does not exist " | |
767 | "in path '{}'").format(name, path)) |
|
767 | "in path '{}'").format(name, path)) | |
768 |
|
768 | |||
769 | bookmarks = {} |
|
769 | bookmarks = {} | |
770 | branches = collections.defaultdict(list) |
|
770 | branches = collections.defaultdict(list) | |
771 | for node, nametype, remote, name in remotenames: |
|
771 | for node, nametype, remote, name in remotenames: | |
772 | if nametype == "bookmarks" and name not in names: |
|
772 | if nametype == "bookmarks" and name not in names: | |
773 | bookmarks[name] = node |
|
773 | bookmarks[name] = node | |
774 | elif nametype == "branches": |
|
774 | elif nametype == "branches": | |
775 | # saveremotenames wants binary nodes for branches |
|
775 | # saveremotenames wants binary nodes for branches | |
776 | branches[name].append(bin(node)) |
|
776 | branches[name].append(bin(node)) | |
777 |
|
777 | |||
778 | remotenamesext.saveremotenames(repo, path, branches, bookmarks) |
|
778 | remotenamesext.saveremotenames(repo, path, branches, bookmarks) | |
779 |
|
779 | |||
780 | def _phasemove(orig, pushop, nodes, phase=phases.public): |
|
780 | def _phasemove(orig, pushop, nodes, phase=phases.public): | |
781 | """prevent commits from being marked public |
|
781 | """prevent commits from being marked public | |
782 |
|
782 | |||
783 | Since these are going to a scratch branch, they aren't really being |
|
783 | Since these are going to a scratch branch, they aren't really being | |
784 | published.""" |
|
784 | published.""" | |
785 |
|
785 | |||
786 | if phase != phases.public: |
|
786 | if phase != phases.public: | |
787 | orig(pushop, nodes, phase) |
|
787 | orig(pushop, nodes, phase) | |
788 |
|
788 | |||
789 | @exchange.b2partsgenerator(scratchbranchparttype) |
|
789 | @exchange.b2partsgenerator(scratchbranchparttype) | |
790 | def partgen(pushop, bundler): |
|
790 | def partgen(pushop, bundler): | |
791 | bookmark = pushop.ui.config(experimental, configbookmark) |
|
791 | bookmark = pushop.ui.config(experimental, configbookmark) | |
792 | scratchpush = pushop.ui.configbool(experimental, configscratchpush) |
|
792 | scratchpush = pushop.ui.configbool(experimental, configscratchpush) | |
793 | if 'changesets' in pushop.stepsdone or not scratchpush: |
|
793 | if 'changesets' in pushop.stepsdone or not scratchpush: | |
794 | return |
|
794 | return | |
795 |
|
795 | |||
796 | if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote): |
|
796 | if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote): | |
797 | return |
|
797 | return | |
798 |
|
798 | |||
799 | pushop.stepsdone.add('changesets') |
|
799 | pushop.stepsdone.add('changesets') | |
800 | if not pushop.outgoing.missing: |
|
800 | if not pushop.outgoing.missing: | |
801 | pushop.ui.status(_('no changes found\n')) |
|
801 | pushop.ui.status(_('no changes found\n')) | |
802 | pushop.cgresult = 0 |
|
802 | pushop.cgresult = 0 | |
803 | return |
|
803 | return | |
804 |
|
804 | |||
805 | # This parameter tells the server that the following bundle is an |
|
805 | # This parameter tells the server that the following bundle is an | |
806 | # infinitepush. This let's it switch the part processing to our infinitepush |
|
806 | # infinitepush. This let's it switch the part processing to our infinitepush | |
807 | # code path. |
|
807 | # code path. | |
808 | bundler.addparam("infinitepush", "True") |
|
808 | bundler.addparam("infinitepush", "True") | |
809 |
|
809 | |||
810 | scratchparts = bundleparts.getscratchbranchparts(pushop.repo, |
|
810 | scratchparts = bundleparts.getscratchbranchparts(pushop.repo, | |
811 | pushop.remote, |
|
811 | pushop.remote, | |
812 | pushop.outgoing, |
|
812 | pushop.outgoing, | |
813 | pushop.ui, |
|
813 | pushop.ui, | |
814 | bookmark) |
|
814 | bookmark) | |
815 |
|
815 | |||
816 | for scratchpart in scratchparts: |
|
816 | for scratchpart in scratchparts: | |
817 | bundler.addpart(scratchpart) |
|
817 | bundler.addpart(scratchpart) | |
818 |
|
818 | |||
819 | def handlereply(op): |
|
819 | def handlereply(op): | |
820 | # server either succeeds or aborts; no code to read |
|
820 | # server either succeeds or aborts; no code to read | |
821 | pushop.cgresult = 1 |
|
821 | pushop.cgresult = 1 | |
822 |
|
822 | |||
823 | return handlereply |
|
823 | return handlereply | |
824 |
|
824 | |||
825 | bundle2.capabilities[bundleparts.scratchbranchparttype] = () |
|
825 | bundle2.capabilities[bundleparts.scratchbranchparttype] = () | |
826 |
|
826 | |||
827 | def _getrevs(bundle, oldnode, force, bookmark): |
|
827 | def _getrevs(bundle, oldnode, force, bookmark): | |
828 | 'extracts and validates the revs to be imported' |
|
828 | 'extracts and validates the revs to be imported' | |
829 | revs = [bundle[r] for r in bundle.revs('sort(bundle())')] |
|
829 | revs = [bundle[r] for r in bundle.revs('sort(bundle())')] | |
830 |
|
830 | |||
831 | # new bookmark |
|
831 | # new bookmark | |
832 | if oldnode is None: |
|
832 | if oldnode is None: | |
833 | return revs |
|
833 | return revs | |
834 |
|
834 | |||
835 | # Fast forward update |
|
835 | # Fast forward update | |
836 | if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)): |
|
836 | if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)): | |
837 | return revs |
|
837 | return revs | |
838 |
|
838 | |||
839 | return revs |
|
839 | return revs | |
840 |
|
840 | |||
841 | @contextlib.contextmanager |
|
841 | @contextlib.contextmanager | |
842 | def logservicecall(logger, service, **kwargs): |
|
842 | def logservicecall(logger, service, **kwargs): | |
843 | start = time.time() |
|
843 | start = time.time() | |
844 | logger(service, eventtype='start', **kwargs) |
|
844 | logger(service, eventtype='start', **kwargs) | |
845 | try: |
|
845 | try: | |
846 | yield |
|
846 | yield | |
847 | logger(service, eventtype='success', |
|
847 | logger(service, eventtype='success', | |
848 | elapsedms=(time.time() - start) * 1000, **kwargs) |
|
848 | elapsedms=(time.time() - start) * 1000, **kwargs) | |
849 | except Exception as e: |
|
849 | except Exception as e: | |
850 | logger(service, eventtype='failure', |
|
850 | logger(service, eventtype='failure', | |
851 | elapsedms=(time.time() - start) * 1000, errormsg=str(e), |
|
851 | elapsedms=(time.time() - start) * 1000, errormsg=str(e), | |
852 | **kwargs) |
|
852 | **kwargs) | |
853 | raise |
|
853 | raise | |
854 |
|
854 | |||
855 | def _getorcreateinfinitepushlogger(op): |
|
855 | def _getorcreateinfinitepushlogger(op): | |
856 | logger = op.records['infinitepushlogger'] |
|
856 | logger = op.records['infinitepushlogger'] | |
857 | if not logger: |
|
857 | if not logger: | |
858 | ui = op.repo.ui |
|
858 | ui = op.repo.ui | |
859 | try: |
|
859 | try: | |
860 | username = procutil.getuser() |
|
860 | username = procutil.getuser() | |
861 | except Exception: |
|
861 | except Exception: | |
862 | username = 'unknown' |
|
862 | username = 'unknown' | |
863 | # Generate random request id to be able to find all logged entries |
|
863 | # Generate random request id to be able to find all logged entries | |
864 | # for the same request. Since requestid is pseudo-generated it may |
|
864 | # for the same request. Since requestid is pseudo-generated it may | |
865 | # not be unique, but we assume that (hostname, username, requestid) |
|
865 | # not be unique, but we assume that (hostname, username, requestid) | |
866 | # is unique. |
|
866 | # is unique. | |
867 | random.seed() |
|
867 | random.seed() | |
868 | requestid = random.randint(0, 2000000000) |
|
868 | requestid = random.randint(0, 2000000000) | |
869 | hostname = socket.gethostname() |
|
869 | hostname = socket.gethostname() | |
870 | logger = functools.partial(ui.log, 'infinitepush', user=username, |
|
870 | logger = functools.partial(ui.log, 'infinitepush', user=username, | |
871 | requestid=requestid, hostname=hostname, |
|
871 | requestid=requestid, hostname=hostname, | |
872 | reponame=ui.config('infinitepush', |
|
872 | reponame=ui.config('infinitepush', | |
873 | 'reponame')) |
|
873 | 'reponame')) | |
874 | op.records.add('infinitepushlogger', logger) |
|
874 | op.records.add('infinitepushlogger', logger) | |
875 | else: |
|
875 | else: | |
876 | logger = logger[0] |
|
876 | logger = logger[0] | |
877 | return logger |
|
877 | return logger | |
878 |
|
878 | |||
879 | def storetobundlestore(orig, repo, op, unbundler): |
|
879 | def storetobundlestore(orig, repo, op, unbundler): | |
880 | """stores the incoming bundle coming from push command to the bundlestore |
|
880 | """stores the incoming bundle coming from push command to the bundlestore | |
881 | instead of applying on the revlogs""" |
|
881 | instead of applying on the revlogs""" | |
882 |
|
882 | |||
883 | repo.ui.status(_("storing changesets on the bundlestore\n")) |
|
883 | repo.ui.status(_("storing changesets on the bundlestore\n")) | |
884 | bundler = bundle2.bundle20(repo.ui) |
|
884 | bundler = bundle2.bundle20(repo.ui) | |
885 |
|
885 | |||
886 | # processing each part and storing it in bundler |
|
886 | # processing each part and storing it in bundler | |
887 | with bundle2.partiterator(repo, op, unbundler) as parts: |
|
887 | with bundle2.partiterator(repo, op, unbundler) as parts: | |
888 | for part in parts: |
|
888 | for part in parts: | |
889 | bundlepart = None |
|
889 | bundlepart = None | |
890 | if part.type == 'replycaps': |
|
890 | if part.type == 'replycaps': | |
891 | # This configures the current operation to allow reply parts. |
|
891 | # This configures the current operation to allow reply parts. | |
892 | bundle2._processpart(op, part) |
|
892 | bundle2._processpart(op, part) | |
893 | else: |
|
893 | else: | |
894 | bundlepart = bundle2.bundlepart(part.type, data=part.read()) |
|
894 | bundlepart = bundle2.bundlepart(part.type, data=part.read()) | |
895 | for key, value in part.params.iteritems(): |
|
895 | for key, value in part.params.iteritems(): | |
896 | bundlepart.addparam(key, value) |
|
896 | bundlepart.addparam(key, value) | |
897 |
|
897 | |||
898 | # Certain parts require a response |
|
898 | # Certain parts require a response | |
899 | if part.type in ('pushkey', 'changegroup'): |
|
899 | if part.type in ('pushkey', 'changegroup'): | |
900 | if op.reply is not None: |
|
900 | if op.reply is not None: | |
901 | rpart = op.reply.newpart('reply:%s' % part.type) |
|
901 | rpart = op.reply.newpart('reply:%s' % part.type) | |
902 | rpart.addparam('in-reply-to', str(part.id), |
|
902 | rpart.addparam('in-reply-to', str(part.id), | |
903 | mandatory=False) |
|
903 | mandatory=False) | |
904 | rpart.addparam('return', '1', mandatory=False) |
|
904 | rpart.addparam('return', '1', mandatory=False) | |
905 |
|
905 | |||
906 | op.records.add(part.type, { |
|
906 | op.records.add(part.type, { | |
907 | 'return': 1, |
|
907 | 'return': 1, | |
908 | }) |
|
908 | }) | |
909 | if bundlepart: |
|
909 | if bundlepart: | |
910 | bundler.addpart(bundlepart) |
|
910 | bundler.addpart(bundlepart) | |
911 |
|
911 | |||
912 | # storing the bundle in the bundlestore |
|
912 | # storing the bundle in the bundlestore | |
913 | buf = util.chunkbuffer(bundler.getchunks()) |
|
913 | buf = util.chunkbuffer(bundler.getchunks()) | |
914 | fd, bundlefile = tempfile.mkstemp() |
|
914 | fd, bundlefile = tempfile.mkstemp() | |
915 | try: |
|
915 | try: | |
916 | try: |
|
916 | try: | |
917 | fp = os.fdopen(fd, 'wb') |
|
917 | fp = os.fdopen(fd, r'wb') | |
918 | fp.write(buf.read()) |
|
918 | fp.write(buf.read()) | |
919 | finally: |
|
919 | finally: | |
920 | fp.close() |
|
920 | fp.close() | |
921 | storebundle(op, {}, bundlefile) |
|
921 | storebundle(op, {}, bundlefile) | |
922 | finally: |
|
922 | finally: | |
923 | try: |
|
923 | try: | |
924 | os.unlink(bundlefile) |
|
924 | os.unlink(bundlefile) | |
925 | except Exception: |
|
925 | except Exception: | |
926 | # we would rather see the original exception |
|
926 | # we would rather see the original exception | |
927 | pass |
|
927 | pass | |
928 |
|
928 | |||
929 | def processparts(orig, repo, op, unbundler): |
|
929 | def processparts(orig, repo, op, unbundler): | |
930 |
|
930 | |||
931 | # make sure we don't wrap processparts in case of `hg unbundle` |
|
931 | # make sure we don't wrap processparts in case of `hg unbundle` | |
932 | if op.source == 'unbundle': |
|
932 | if op.source == 'unbundle': | |
933 | return orig(repo, op, unbundler) |
|
933 | return orig(repo, op, unbundler) | |
934 |
|
934 | |||
935 | # this server routes each push to bundle store |
|
935 | # this server routes each push to bundle store | |
936 | if repo.ui.configbool('infinitepush', 'pushtobundlestore'): |
|
936 | if repo.ui.configbool('infinitepush', 'pushtobundlestore'): | |
937 | return storetobundlestore(orig, repo, op, unbundler) |
|
937 | return storetobundlestore(orig, repo, op, unbundler) | |
938 |
|
938 | |||
939 | if unbundler.params.get('infinitepush') != 'True': |
|
939 | if unbundler.params.get('infinitepush') != 'True': | |
940 | return orig(repo, op, unbundler) |
|
940 | return orig(repo, op, unbundler) | |
941 |
|
941 | |||
942 | handleallparts = repo.ui.configbool('infinitepush', 'storeallparts') |
|
942 | handleallparts = repo.ui.configbool('infinitepush', 'storeallparts') | |
943 |
|
943 | |||
944 | bundler = bundle2.bundle20(repo.ui) |
|
944 | bundler = bundle2.bundle20(repo.ui) | |
945 | cgparams = None |
|
945 | cgparams = None | |
946 | with bundle2.partiterator(repo, op, unbundler) as parts: |
|
946 | with bundle2.partiterator(repo, op, unbundler) as parts: | |
947 | for part in parts: |
|
947 | for part in parts: | |
948 | bundlepart = None |
|
948 | bundlepart = None | |
949 | if part.type == 'replycaps': |
|
949 | if part.type == 'replycaps': | |
950 | # This configures the current operation to allow reply parts. |
|
950 | # This configures the current operation to allow reply parts. | |
951 | bundle2._processpart(op, part) |
|
951 | bundle2._processpart(op, part) | |
952 | elif part.type == bundleparts.scratchbranchparttype: |
|
952 | elif part.type == bundleparts.scratchbranchparttype: | |
953 | # Scratch branch parts need to be converted to normal |
|
953 | # Scratch branch parts need to be converted to normal | |
954 | # changegroup parts, and the extra parameters stored for later |
|
954 | # changegroup parts, and the extra parameters stored for later | |
955 | # when we upload to the store. Eventually those parameters will |
|
955 | # when we upload to the store. Eventually those parameters will | |
956 | # be put on the actual bundle instead of this part, then we can |
|
956 | # be put on the actual bundle instead of this part, then we can | |
957 | # send a vanilla changegroup instead of the scratchbranch part. |
|
957 | # send a vanilla changegroup instead of the scratchbranch part. | |
958 | cgversion = part.params.get('cgversion', '01') |
|
958 | cgversion = part.params.get('cgversion', '01') | |
959 | bundlepart = bundle2.bundlepart('changegroup', data=part.read()) |
|
959 | bundlepart = bundle2.bundlepart('changegroup', data=part.read()) | |
960 | bundlepart.addparam('version', cgversion) |
|
960 | bundlepart.addparam('version', cgversion) | |
961 | cgparams = part.params |
|
961 | cgparams = part.params | |
962 |
|
962 | |||
963 | # If we're not dumping all parts into the new bundle, we need to |
|
963 | # If we're not dumping all parts into the new bundle, we need to | |
964 | # alert the future pushkey and phase-heads handler to skip |
|
964 | # alert the future pushkey and phase-heads handler to skip | |
965 | # the part. |
|
965 | # the part. | |
966 | if not handleallparts: |
|
966 | if not handleallparts: | |
967 | op.records.add(scratchbranchparttype + '_skippushkey', True) |
|
967 | op.records.add(scratchbranchparttype + '_skippushkey', True) | |
968 | op.records.add(scratchbranchparttype + '_skipphaseheads', |
|
968 | op.records.add(scratchbranchparttype + '_skipphaseheads', | |
969 | True) |
|
969 | True) | |
970 | else: |
|
970 | else: | |
971 | if handleallparts: |
|
971 | if handleallparts: | |
972 | # Ideally we would not process any parts, and instead just |
|
972 | # Ideally we would not process any parts, and instead just | |
973 | # forward them to the bundle for storage, but since this |
|
973 | # forward them to the bundle for storage, but since this | |
974 | # differs from previous behavior, we need to put it behind a |
|
974 | # differs from previous behavior, we need to put it behind a | |
975 | # config flag for incremental rollout. |
|
975 | # config flag for incremental rollout. | |
976 | bundlepart = bundle2.bundlepart(part.type, data=part.read()) |
|
976 | bundlepart = bundle2.bundlepart(part.type, data=part.read()) | |
977 | for key, value in part.params.iteritems(): |
|
977 | for key, value in part.params.iteritems(): | |
978 | bundlepart.addparam(key, value) |
|
978 | bundlepart.addparam(key, value) | |
979 |
|
979 | |||
980 | # Certain parts require a response |
|
980 | # Certain parts require a response | |
981 | if part.type == 'pushkey': |
|
981 | if part.type == 'pushkey': | |
982 | if op.reply is not None: |
|
982 | if op.reply is not None: | |
983 | rpart = op.reply.newpart('reply:pushkey') |
|
983 | rpart = op.reply.newpart('reply:pushkey') | |
984 | rpart.addparam('in-reply-to', str(part.id), |
|
984 | rpart.addparam('in-reply-to', str(part.id), | |
985 | mandatory=False) |
|
985 | mandatory=False) | |
986 | rpart.addparam('return', '1', mandatory=False) |
|
986 | rpart.addparam('return', '1', mandatory=False) | |
987 | else: |
|
987 | else: | |
988 | bundle2._processpart(op, part) |
|
988 | bundle2._processpart(op, part) | |
989 |
|
989 | |||
990 | if handleallparts: |
|
990 | if handleallparts: | |
991 | op.records.add(part.type, { |
|
991 | op.records.add(part.type, { | |
992 | 'return': 1, |
|
992 | 'return': 1, | |
993 | }) |
|
993 | }) | |
994 | if bundlepart: |
|
994 | if bundlepart: | |
995 | bundler.addpart(bundlepart) |
|
995 | bundler.addpart(bundlepart) | |
996 |
|
996 | |||
997 | # If commits were sent, store them |
|
997 | # If commits were sent, store them | |
998 | if cgparams: |
|
998 | if cgparams: | |
999 | buf = util.chunkbuffer(bundler.getchunks()) |
|
999 | buf = util.chunkbuffer(bundler.getchunks()) | |
1000 | fd, bundlefile = tempfile.mkstemp() |
|
1000 | fd, bundlefile = tempfile.mkstemp() | |
1001 | try: |
|
1001 | try: | |
1002 | try: |
|
1002 | try: | |
1003 | fp = os.fdopen(fd, 'wb') |
|
1003 | fp = os.fdopen(fd, r'wb') | |
1004 | fp.write(buf.read()) |
|
1004 | fp.write(buf.read()) | |
1005 | finally: |
|
1005 | finally: | |
1006 | fp.close() |
|
1006 | fp.close() | |
1007 | storebundle(op, cgparams, bundlefile) |
|
1007 | storebundle(op, cgparams, bundlefile) | |
1008 | finally: |
|
1008 | finally: | |
1009 | try: |
|
1009 | try: | |
1010 | os.unlink(bundlefile) |
|
1010 | os.unlink(bundlefile) | |
1011 | except Exception: |
|
1011 | except Exception: | |
1012 | # we would rather see the original exception |
|
1012 | # we would rather see the original exception | |
1013 | pass |
|
1013 | pass | |
1014 |
|
1014 | |||
1015 | def storebundle(op, params, bundlefile): |
|
1015 | def storebundle(op, params, bundlefile): | |
1016 | log = _getorcreateinfinitepushlogger(op) |
|
1016 | log = _getorcreateinfinitepushlogger(op) | |
1017 | parthandlerstart = time.time() |
|
1017 | parthandlerstart = time.time() | |
1018 | log(scratchbranchparttype, eventtype='start') |
|
1018 | log(scratchbranchparttype, eventtype='start') | |
1019 | index = op.repo.bundlestore.index |
|
1019 | index = op.repo.bundlestore.index | |
1020 | store = op.repo.bundlestore.store |
|
1020 | store = op.repo.bundlestore.store | |
1021 | op.records.add(scratchbranchparttype + '_skippushkey', True) |
|
1021 | op.records.add(scratchbranchparttype + '_skippushkey', True) | |
1022 |
|
1022 | |||
1023 | bundle = None |
|
1023 | bundle = None | |
1024 | try: # guards bundle |
|
1024 | try: # guards bundle | |
1025 | bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile) |
|
1025 | bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile) | |
1026 | bundle = hg.repository(op.repo.ui, bundlepath) |
|
1026 | bundle = hg.repository(op.repo.ui, bundlepath) | |
1027 |
|
1027 | |||
1028 | bookmark = params.get('bookmark') |
|
1028 | bookmark = params.get('bookmark') | |
1029 | bookprevnode = params.get('bookprevnode', '') |
|
1029 | bookprevnode = params.get('bookprevnode', '') | |
1030 | force = params.get('force') |
|
1030 | force = params.get('force') | |
1031 |
|
1031 | |||
1032 | if bookmark: |
|
1032 | if bookmark: | |
1033 | oldnode = index.getnode(bookmark) |
|
1033 | oldnode = index.getnode(bookmark) | |
1034 | else: |
|
1034 | else: | |
1035 | oldnode = None |
|
1035 | oldnode = None | |
1036 | bundleheads = bundle.revs('heads(bundle())') |
|
1036 | bundleheads = bundle.revs('heads(bundle())') | |
1037 | if bookmark and len(bundleheads) > 1: |
|
1037 | if bookmark and len(bundleheads) > 1: | |
1038 | raise error.Abort( |
|
1038 | raise error.Abort( | |
1039 | _('cannot push more than one head to a scratch branch')) |
|
1039 | _('cannot push more than one head to a scratch branch')) | |
1040 |
|
1040 | |||
1041 | revs = _getrevs(bundle, oldnode, force, bookmark) |
|
1041 | revs = _getrevs(bundle, oldnode, force, bookmark) | |
1042 |
|
1042 | |||
1043 | # Notify the user of what is being pushed |
|
1043 | # Notify the user of what is being pushed | |
1044 | plural = 's' if len(revs) > 1 else '' |
|
1044 | plural = 's' if len(revs) > 1 else '' | |
1045 | op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural)) |
|
1045 | op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural)) | |
1046 | maxoutput = 10 |
|
1046 | maxoutput = 10 | |
1047 | for i in range(0, min(len(revs), maxoutput)): |
|
1047 | for i in range(0, min(len(revs), maxoutput)): | |
1048 | firstline = bundle[revs[i]].description().split('\n')[0][:50] |
|
1048 | firstline = bundle[revs[i]].description().split('\n')[0][:50] | |
1049 | op.repo.ui.warn((" %s %s\n") % (revs[i], firstline)) |
|
1049 | op.repo.ui.warn((" %s %s\n") % (revs[i], firstline)) | |
1050 |
|
1050 | |||
1051 | if len(revs) > maxoutput + 1: |
|
1051 | if len(revs) > maxoutput + 1: | |
1052 | op.repo.ui.warn((" ...\n")) |
|
1052 | op.repo.ui.warn((" ...\n")) | |
1053 | firstline = bundle[revs[-1]].description().split('\n')[0][:50] |
|
1053 | firstline = bundle[revs[-1]].description().split('\n')[0][:50] | |
1054 | op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline)) |
|
1054 | op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline)) | |
1055 |
|
1055 | |||
1056 | nodesctx = [bundle[rev] for rev in revs] |
|
1056 | nodesctx = [bundle[rev] for rev in revs] | |
1057 | inindex = lambda rev: bool(index.getbundle(bundle[rev].hex())) |
|
1057 | inindex = lambda rev: bool(index.getbundle(bundle[rev].hex())) | |
1058 | if bundleheads: |
|
1058 | if bundleheads: | |
1059 | newheadscount = sum(not inindex(rev) for rev in bundleheads) |
|
1059 | newheadscount = sum(not inindex(rev) for rev in bundleheads) | |
1060 | else: |
|
1060 | else: | |
1061 | newheadscount = 0 |
|
1061 | newheadscount = 0 | |
1062 | # If there's a bookmark specified, there should be only one head, |
|
1062 | # If there's a bookmark specified, there should be only one head, | |
1063 | # so we choose the last node, which will be that head. |
|
1063 | # so we choose the last node, which will be that head. | |
1064 | # If a bug or malicious client allows there to be a bookmark |
|
1064 | # If a bug or malicious client allows there to be a bookmark | |
1065 | # with multiple heads, we will place the bookmark on the last head. |
|
1065 | # with multiple heads, we will place the bookmark on the last head. | |
1066 | bookmarknode = nodesctx[-1].hex() if nodesctx else None |
|
1066 | bookmarknode = nodesctx[-1].hex() if nodesctx else None | |
1067 | key = None |
|
1067 | key = None | |
1068 | if newheadscount: |
|
1068 | if newheadscount: | |
1069 | with open(bundlefile, 'r') as f: |
|
1069 | with open(bundlefile, 'r') as f: | |
1070 | bundledata = f.read() |
|
1070 | bundledata = f.read() | |
1071 | with logservicecall(log, 'bundlestore', |
|
1071 | with logservicecall(log, 'bundlestore', | |
1072 | bundlesize=len(bundledata)): |
|
1072 | bundlesize=len(bundledata)): | |
1073 | bundlesizelimit = 100 * 1024 * 1024 # 100 MB |
|
1073 | bundlesizelimit = 100 * 1024 * 1024 # 100 MB | |
1074 | if len(bundledata) > bundlesizelimit: |
|
1074 | if len(bundledata) > bundlesizelimit: | |
1075 | error_msg = ('bundle is too big: %d bytes. ' + |
|
1075 | error_msg = ('bundle is too big: %d bytes. ' + | |
1076 | 'max allowed size is 100 MB') |
|
1076 | 'max allowed size is 100 MB') | |
1077 | raise error.Abort(error_msg % (len(bundledata),)) |
|
1077 | raise error.Abort(error_msg % (len(bundledata),)) | |
1078 | key = store.write(bundledata) |
|
1078 | key = store.write(bundledata) | |
1079 |
|
1079 | |||
1080 | with logservicecall(log, 'index', newheadscount=newheadscount), index: |
|
1080 | with logservicecall(log, 'index', newheadscount=newheadscount), index: | |
1081 | if key: |
|
1081 | if key: | |
1082 | index.addbundle(key, nodesctx) |
|
1082 | index.addbundle(key, nodesctx) | |
1083 | if bookmark: |
|
1083 | if bookmark: | |
1084 | index.addbookmark(bookmark, bookmarknode) |
|
1084 | index.addbookmark(bookmark, bookmarknode) | |
1085 | _maybeaddpushbackpart(op, bookmark, bookmarknode, |
|
1085 | _maybeaddpushbackpart(op, bookmark, bookmarknode, | |
1086 | bookprevnode, params) |
|
1086 | bookprevnode, params) | |
1087 | log(scratchbranchparttype, eventtype='success', |
|
1087 | log(scratchbranchparttype, eventtype='success', | |
1088 | elapsedms=(time.time() - parthandlerstart) * 1000) |
|
1088 | elapsedms=(time.time() - parthandlerstart) * 1000) | |
1089 |
|
1089 | |||
1090 | except Exception as e: |
|
1090 | except Exception as e: | |
1091 | log(scratchbranchparttype, eventtype='failure', |
|
1091 | log(scratchbranchparttype, eventtype='failure', | |
1092 | elapsedms=(time.time() - parthandlerstart) * 1000, |
|
1092 | elapsedms=(time.time() - parthandlerstart) * 1000, | |
1093 | errormsg=str(e)) |
|
1093 | errormsg=str(e)) | |
1094 | raise |
|
1094 | raise | |
1095 | finally: |
|
1095 | finally: | |
1096 | if bundle: |
|
1096 | if bundle: | |
1097 | bundle.close() |
|
1097 | bundle.close() | |
1098 |
|
1098 | |||
1099 | @bundle2.parthandler(scratchbranchparttype, |
|
1099 | @bundle2.parthandler(scratchbranchparttype, | |
1100 | ('bookmark', 'bookprevnode', 'force', |
|
1100 | ('bookmark', 'bookprevnode', 'force', | |
1101 | 'pushbackbookmarks', 'cgversion')) |
|
1101 | 'pushbackbookmarks', 'cgversion')) | |
1102 | def bundle2scratchbranch(op, part): |
|
1102 | def bundle2scratchbranch(op, part): | |
1103 | '''unbundle a bundle2 part containing a changegroup to store''' |
|
1103 | '''unbundle a bundle2 part containing a changegroup to store''' | |
1104 |
|
1104 | |||
1105 | bundler = bundle2.bundle20(op.repo.ui) |
|
1105 | bundler = bundle2.bundle20(op.repo.ui) | |
1106 | cgversion = part.params.get('cgversion', '01') |
|
1106 | cgversion = part.params.get('cgversion', '01') | |
1107 | cgpart = bundle2.bundlepart('changegroup', data=part.read()) |
|
1107 | cgpart = bundle2.bundlepart('changegroup', data=part.read()) | |
1108 | cgpart.addparam('version', cgversion) |
|
1108 | cgpart.addparam('version', cgversion) | |
1109 | bundler.addpart(cgpart) |
|
1109 | bundler.addpart(cgpart) | |
1110 | buf = util.chunkbuffer(bundler.getchunks()) |
|
1110 | buf = util.chunkbuffer(bundler.getchunks()) | |
1111 |
|
1111 | |||
1112 | fd, bundlefile = tempfile.mkstemp() |
|
1112 | fd, bundlefile = tempfile.mkstemp() | |
1113 | try: |
|
1113 | try: | |
1114 | try: |
|
1114 | try: | |
1115 | fp = os.fdopen(fd, 'wb') |
|
1115 | fp = os.fdopen(fd, r'wb') | |
1116 | fp.write(buf.read()) |
|
1116 | fp.write(buf.read()) | |
1117 | finally: |
|
1117 | finally: | |
1118 | fp.close() |
|
1118 | fp.close() | |
1119 | storebundle(op, part.params, bundlefile) |
|
1119 | storebundle(op, part.params, bundlefile) | |
1120 | finally: |
|
1120 | finally: | |
1121 | try: |
|
1121 | try: | |
1122 | os.unlink(bundlefile) |
|
1122 | os.unlink(bundlefile) | |
1123 | except OSError as e: |
|
1123 | except OSError as e: | |
1124 | if e.errno != errno.ENOENT: |
|
1124 | if e.errno != errno.ENOENT: | |
1125 | raise |
|
1125 | raise | |
1126 |
|
1126 | |||
1127 | return 1 |
|
1127 | return 1 | |
1128 |
|
1128 | |||
1129 | def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params): |
|
1129 | def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params): | |
1130 | if params.get('pushbackbookmarks'): |
|
1130 | if params.get('pushbackbookmarks'): | |
1131 | if op.reply and 'pushback' in op.reply.capabilities: |
|
1131 | if op.reply and 'pushback' in op.reply.capabilities: | |
1132 | params = { |
|
1132 | params = { | |
1133 | 'namespace': 'bookmarks', |
|
1133 | 'namespace': 'bookmarks', | |
1134 | 'key': bookmark, |
|
1134 | 'key': bookmark, | |
1135 | 'new': newnode, |
|
1135 | 'new': newnode, | |
1136 | 'old': oldnode, |
|
1136 | 'old': oldnode, | |
1137 | } |
|
1137 | } | |
1138 | op.reply.newpart('pushkey', mandatoryparams=params.iteritems()) |
|
1138 | op.reply.newpart('pushkey', mandatoryparams=params.iteritems()) | |
1139 |
|
1139 | |||
1140 | def bundle2pushkey(orig, op, part): |
|
1140 | def bundle2pushkey(orig, op, part): | |
1141 | '''Wrapper of bundle2.handlepushkey() |
|
1141 | '''Wrapper of bundle2.handlepushkey() | |
1142 |
|
1142 | |||
1143 | The only goal is to skip calling the original function if flag is set. |
|
1143 | The only goal is to skip calling the original function if flag is set. | |
1144 | It's set if infinitepush push is happening. |
|
1144 | It's set if infinitepush push is happening. | |
1145 | ''' |
|
1145 | ''' | |
1146 | if op.records[scratchbranchparttype + '_skippushkey']: |
|
1146 | if op.records[scratchbranchparttype + '_skippushkey']: | |
1147 | if op.reply is not None: |
|
1147 | if op.reply is not None: | |
1148 | rpart = op.reply.newpart('reply:pushkey') |
|
1148 | rpart = op.reply.newpart('reply:pushkey') | |
1149 | rpart.addparam('in-reply-to', str(part.id), mandatory=False) |
|
1149 | rpart.addparam('in-reply-to', str(part.id), mandatory=False) | |
1150 | rpart.addparam('return', '1', mandatory=False) |
|
1150 | rpart.addparam('return', '1', mandatory=False) | |
1151 | return 1 |
|
1151 | return 1 | |
1152 |
|
1152 | |||
1153 | return orig(op, part) |
|
1153 | return orig(op, part) | |
1154 |
|
1154 | |||
1155 | def bundle2handlephases(orig, op, part): |
|
1155 | def bundle2handlephases(orig, op, part): | |
1156 | '''Wrapper of bundle2.handlephases() |
|
1156 | '''Wrapper of bundle2.handlephases() | |
1157 |
|
1157 | |||
1158 | The only goal is to skip calling the original function if flag is set. |
|
1158 | The only goal is to skip calling the original function if flag is set. | |
1159 | It's set if infinitepush push is happening. |
|
1159 | It's set if infinitepush push is happening. | |
1160 | ''' |
|
1160 | ''' | |
1161 |
|
1161 | |||
1162 | if op.records[scratchbranchparttype + '_skipphaseheads']: |
|
1162 | if op.records[scratchbranchparttype + '_skipphaseheads']: | |
1163 | return |
|
1163 | return | |
1164 |
|
1164 | |||
1165 | return orig(op, part) |
|
1165 | return orig(op, part) | |
1166 |
|
1166 | |||
1167 | def _asyncsavemetadata(root, nodes): |
|
1167 | def _asyncsavemetadata(root, nodes): | |
1168 | '''starts a separate process that fills metadata for the nodes |
|
1168 | '''starts a separate process that fills metadata for the nodes | |
1169 |
|
1169 | |||
1170 | This function creates a separate process and doesn't wait for it's |
|
1170 | This function creates a separate process and doesn't wait for it's | |
1171 | completion. This was done to avoid slowing down pushes |
|
1171 | completion. This was done to avoid slowing down pushes | |
1172 | ''' |
|
1172 | ''' | |
1173 |
|
1173 | |||
1174 | maxnodes = 50 |
|
1174 | maxnodes = 50 | |
1175 | if len(nodes) > maxnodes: |
|
1175 | if len(nodes) > maxnodes: | |
1176 | return |
|
1176 | return | |
1177 | nodesargs = [] |
|
1177 | nodesargs = [] | |
1178 | for node in nodes: |
|
1178 | for node in nodes: | |
1179 | nodesargs.append('--node') |
|
1179 | nodesargs.append('--node') | |
1180 | nodesargs.append(node) |
|
1180 | nodesargs.append(node) | |
1181 | with open(os.devnull, 'w+b') as devnull: |
|
1181 | with open(os.devnull, 'w+b') as devnull: | |
1182 | cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata', |
|
1182 | cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata', | |
1183 | '-R', root] + nodesargs |
|
1183 | '-R', root] + nodesargs | |
1184 | # Process will run in background. We don't care about the return code |
|
1184 | # Process will run in background. We don't care about the return code | |
1185 | subprocess.Popen(cmdline, close_fds=True, shell=False, |
|
1185 | subprocess.Popen(cmdline, close_fds=True, shell=False, | |
1186 | stdin=devnull, stdout=devnull, stderr=devnull) |
|
1186 | stdin=devnull, stdout=devnull, stderr=devnull) |
@@ -1,1814 +1,1814 b'' | |||||
1 | # subrepo.py - sub-repository classes and factory |
|
1 | # subrepo.py - sub-repository classes and factory | |
2 | # |
|
2 | # | |
3 | # Copyright 2009-2010 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2009-2010 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import copy |
|
10 | import copy | |
11 | import errno |
|
11 | import errno | |
12 | import hashlib |
|
12 | import hashlib | |
13 | import os |
|
13 | import os | |
14 | import posixpath |
|
14 | import posixpath | |
15 | import re |
|
15 | import re | |
16 | import stat |
|
16 | import stat | |
17 | import subprocess |
|
17 | import subprocess | |
18 | import sys |
|
18 | import sys | |
19 | import tarfile |
|
19 | import tarfile | |
20 | import xml.dom.minidom |
|
20 | import xml.dom.minidom | |
21 |
|
21 | |||
22 | from .i18n import _ |
|
22 | from .i18n import _ | |
23 | from . import ( |
|
23 | from . import ( | |
24 | cmdutil, |
|
24 | cmdutil, | |
25 | encoding, |
|
25 | encoding, | |
26 | error, |
|
26 | error, | |
27 | exchange, |
|
27 | exchange, | |
28 | logcmdutil, |
|
28 | logcmdutil, | |
29 | match as matchmod, |
|
29 | match as matchmod, | |
30 | node, |
|
30 | node, | |
31 | pathutil, |
|
31 | pathutil, | |
32 | phases, |
|
32 | phases, | |
33 | pycompat, |
|
33 | pycompat, | |
34 | scmutil, |
|
34 | scmutil, | |
35 | subrepoutil, |
|
35 | subrepoutil, | |
36 | util, |
|
36 | util, | |
37 | vfs as vfsmod, |
|
37 | vfs as vfsmod, | |
38 | ) |
|
38 | ) | |
39 | from .utils import ( |
|
39 | from .utils import ( | |
40 | dateutil, |
|
40 | dateutil, | |
41 | procutil, |
|
41 | procutil, | |
42 | stringutil, |
|
42 | stringutil, | |
43 | ) |
|
43 | ) | |
44 |
|
44 | |||
45 | hg = None |
|
45 | hg = None | |
46 | reporelpath = subrepoutil.reporelpath |
|
46 | reporelpath = subrepoutil.reporelpath | |
47 | subrelpath = subrepoutil.subrelpath |
|
47 | subrelpath = subrepoutil.subrelpath | |
48 | _abssource = subrepoutil._abssource |
|
48 | _abssource = subrepoutil._abssource | |
49 | propertycache = util.propertycache |
|
49 | propertycache = util.propertycache | |
50 |
|
50 | |||
51 | def _expandedabspath(path): |
|
51 | def _expandedabspath(path): | |
52 | ''' |
|
52 | ''' | |
53 | get a path or url and if it is a path expand it and return an absolute path |
|
53 | get a path or url and if it is a path expand it and return an absolute path | |
54 | ''' |
|
54 | ''' | |
55 | expandedpath = util.urllocalpath(util.expandpath(path)) |
|
55 | expandedpath = util.urllocalpath(util.expandpath(path)) | |
56 | u = util.url(expandedpath) |
|
56 | u = util.url(expandedpath) | |
57 | if not u.scheme: |
|
57 | if not u.scheme: | |
58 | path = util.normpath(os.path.abspath(u.path)) |
|
58 | path = util.normpath(os.path.abspath(u.path)) | |
59 | return path |
|
59 | return path | |
60 |
|
60 | |||
61 | def _getstorehashcachename(remotepath): |
|
61 | def _getstorehashcachename(remotepath): | |
62 | '''get a unique filename for the store hash cache of a remote repository''' |
|
62 | '''get a unique filename for the store hash cache of a remote repository''' | |
63 | return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12] |
|
63 | return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12] | |
64 |
|
64 | |||
65 | class SubrepoAbort(error.Abort): |
|
65 | class SubrepoAbort(error.Abort): | |
66 | """Exception class used to avoid handling a subrepo error more than once""" |
|
66 | """Exception class used to avoid handling a subrepo error more than once""" | |
67 | def __init__(self, *args, **kw): |
|
67 | def __init__(self, *args, **kw): | |
68 | self.subrepo = kw.pop(r'subrepo', None) |
|
68 | self.subrepo = kw.pop(r'subrepo', None) | |
69 | self.cause = kw.pop(r'cause', None) |
|
69 | self.cause = kw.pop(r'cause', None) | |
70 | error.Abort.__init__(self, *args, **kw) |
|
70 | error.Abort.__init__(self, *args, **kw) | |
71 |
|
71 | |||
72 | def annotatesubrepoerror(func): |
|
72 | def annotatesubrepoerror(func): | |
73 | def decoratedmethod(self, *args, **kargs): |
|
73 | def decoratedmethod(self, *args, **kargs): | |
74 | try: |
|
74 | try: | |
75 | res = func(self, *args, **kargs) |
|
75 | res = func(self, *args, **kargs) | |
76 | except SubrepoAbort as ex: |
|
76 | except SubrepoAbort as ex: | |
77 | # This exception has already been handled |
|
77 | # This exception has already been handled | |
78 | raise ex |
|
78 | raise ex | |
79 | except error.Abort as ex: |
|
79 | except error.Abort as ex: | |
80 | subrepo = subrelpath(self) |
|
80 | subrepo = subrelpath(self) | |
81 | errormsg = (stringutil.forcebytestr(ex) + ' ' |
|
81 | errormsg = (stringutil.forcebytestr(ex) + ' ' | |
82 | + _('(in subrepository "%s")') % subrepo) |
|
82 | + _('(in subrepository "%s")') % subrepo) | |
83 | # avoid handling this exception by raising a SubrepoAbort exception |
|
83 | # avoid handling this exception by raising a SubrepoAbort exception | |
84 | raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo, |
|
84 | raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo, | |
85 | cause=sys.exc_info()) |
|
85 | cause=sys.exc_info()) | |
86 | return res |
|
86 | return res | |
87 | return decoratedmethod |
|
87 | return decoratedmethod | |
88 |
|
88 | |||
89 | def _updateprompt(ui, sub, dirty, local, remote): |
|
89 | def _updateprompt(ui, sub, dirty, local, remote): | |
90 | if dirty: |
|
90 | if dirty: | |
91 | msg = (_(' subrepository sources for %s differ\n' |
|
91 | msg = (_(' subrepository sources for %s differ\n' | |
92 | 'use (l)ocal source (%s) or (r)emote source (%s)?' |
|
92 | 'use (l)ocal source (%s) or (r)emote source (%s)?' | |
93 | '$$ &Local $$ &Remote') |
|
93 | '$$ &Local $$ &Remote') | |
94 | % (subrelpath(sub), local, remote)) |
|
94 | % (subrelpath(sub), local, remote)) | |
95 | else: |
|
95 | else: | |
96 | msg = (_(' subrepository sources for %s differ (in checked out ' |
|
96 | msg = (_(' subrepository sources for %s differ (in checked out ' | |
97 | 'version)\n' |
|
97 | 'version)\n' | |
98 | 'use (l)ocal source (%s) or (r)emote source (%s)?' |
|
98 | 'use (l)ocal source (%s) or (r)emote source (%s)?' | |
99 | '$$ &Local $$ &Remote') |
|
99 | '$$ &Local $$ &Remote') | |
100 | % (subrelpath(sub), local, remote)) |
|
100 | % (subrelpath(sub), local, remote)) | |
101 | return ui.promptchoice(msg, 0) |
|
101 | return ui.promptchoice(msg, 0) | |
102 |
|
102 | |||
103 | def _sanitize(ui, vfs, ignore): |
|
103 | def _sanitize(ui, vfs, ignore): | |
104 | for dirname, dirs, names in vfs.walk(): |
|
104 | for dirname, dirs, names in vfs.walk(): | |
105 | for i, d in enumerate(dirs): |
|
105 | for i, d in enumerate(dirs): | |
106 | if d.lower() == ignore: |
|
106 | if d.lower() == ignore: | |
107 | del dirs[i] |
|
107 | del dirs[i] | |
108 | break |
|
108 | break | |
109 | if vfs.basename(dirname).lower() != '.hg': |
|
109 | if vfs.basename(dirname).lower() != '.hg': | |
110 | continue |
|
110 | continue | |
111 | for f in names: |
|
111 | for f in names: | |
112 | if f.lower() == 'hgrc': |
|
112 | if f.lower() == 'hgrc': | |
113 | ui.warn(_("warning: removing potentially hostile 'hgrc' " |
|
113 | ui.warn(_("warning: removing potentially hostile 'hgrc' " | |
114 | "in '%s'\n") % vfs.join(dirname)) |
|
114 | "in '%s'\n") % vfs.join(dirname)) | |
115 | vfs.unlink(vfs.reljoin(dirname, f)) |
|
115 | vfs.unlink(vfs.reljoin(dirname, f)) | |
116 |
|
116 | |||
117 | def _auditsubrepopath(repo, path): |
|
117 | def _auditsubrepopath(repo, path): | |
118 | # auditor doesn't check if the path itself is a symlink |
|
118 | # auditor doesn't check if the path itself is a symlink | |
119 | pathutil.pathauditor(repo.root)(path) |
|
119 | pathutil.pathauditor(repo.root)(path) | |
120 | if repo.wvfs.islink(path): |
|
120 | if repo.wvfs.islink(path): | |
121 | raise error.Abort(_("subrepo '%s' traverses symbolic link") % path) |
|
121 | raise error.Abort(_("subrepo '%s' traverses symbolic link") % path) | |
122 |
|
122 | |||
123 | SUBREPO_ALLOWED_DEFAULTS = { |
|
123 | SUBREPO_ALLOWED_DEFAULTS = { | |
124 | 'hg': True, |
|
124 | 'hg': True, | |
125 | 'git': False, |
|
125 | 'git': False, | |
126 | 'svn': False, |
|
126 | 'svn': False, | |
127 | } |
|
127 | } | |
128 |
|
128 | |||
129 | def _checktype(ui, kind): |
|
129 | def _checktype(ui, kind): | |
130 | # subrepos.allowed is a master kill switch. If disabled, subrepos are |
|
130 | # subrepos.allowed is a master kill switch. If disabled, subrepos are | |
131 | # disabled period. |
|
131 | # disabled period. | |
132 | if not ui.configbool('subrepos', 'allowed', True): |
|
132 | if not ui.configbool('subrepos', 'allowed', True): | |
133 | raise error.Abort(_('subrepos not enabled'), |
|
133 | raise error.Abort(_('subrepos not enabled'), | |
134 | hint=_("see 'hg help config.subrepos' for details")) |
|
134 | hint=_("see 'hg help config.subrepos' for details")) | |
135 |
|
135 | |||
136 | default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False) |
|
136 | default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False) | |
137 | if not ui.configbool('subrepos', '%s:allowed' % kind, default): |
|
137 | if not ui.configbool('subrepos', '%s:allowed' % kind, default): | |
138 | raise error.Abort(_('%s subrepos not allowed') % kind, |
|
138 | raise error.Abort(_('%s subrepos not allowed') % kind, | |
139 | hint=_("see 'hg help config.subrepos' for details")) |
|
139 | hint=_("see 'hg help config.subrepos' for details")) | |
140 |
|
140 | |||
141 | if kind not in types: |
|
141 | if kind not in types: | |
142 | raise error.Abort(_('unknown subrepo type %s') % kind) |
|
142 | raise error.Abort(_('unknown subrepo type %s') % kind) | |
143 |
|
143 | |||
144 | def subrepo(ctx, path, allowwdir=False, allowcreate=True): |
|
144 | def subrepo(ctx, path, allowwdir=False, allowcreate=True): | |
145 | """return instance of the right subrepo class for subrepo in path""" |
|
145 | """return instance of the right subrepo class for subrepo in path""" | |
146 | # subrepo inherently violates our import layering rules |
|
146 | # subrepo inherently violates our import layering rules | |
147 | # because it wants to make repo objects from deep inside the stack |
|
147 | # because it wants to make repo objects from deep inside the stack | |
148 | # so we manually delay the circular imports to not break |
|
148 | # so we manually delay the circular imports to not break | |
149 | # scripts that don't use our demand-loading |
|
149 | # scripts that don't use our demand-loading | |
150 | global hg |
|
150 | global hg | |
151 | from . import hg as h |
|
151 | from . import hg as h | |
152 | hg = h |
|
152 | hg = h | |
153 |
|
153 | |||
154 | repo = ctx.repo() |
|
154 | repo = ctx.repo() | |
155 | _auditsubrepopath(repo, path) |
|
155 | _auditsubrepopath(repo, path) | |
156 | state = ctx.substate[path] |
|
156 | state = ctx.substate[path] | |
157 | _checktype(repo.ui, state[2]) |
|
157 | _checktype(repo.ui, state[2]) | |
158 | if allowwdir: |
|
158 | if allowwdir: | |
159 | state = (state[0], ctx.subrev(path), state[2]) |
|
159 | state = (state[0], ctx.subrev(path), state[2]) | |
160 | return types[state[2]](ctx, path, state[:2], allowcreate) |
|
160 | return types[state[2]](ctx, path, state[:2], allowcreate) | |
161 |
|
161 | |||
162 | def nullsubrepo(ctx, path, pctx): |
|
162 | def nullsubrepo(ctx, path, pctx): | |
163 | """return an empty subrepo in pctx for the extant subrepo in ctx""" |
|
163 | """return an empty subrepo in pctx for the extant subrepo in ctx""" | |
164 | # subrepo inherently violates our import layering rules |
|
164 | # subrepo inherently violates our import layering rules | |
165 | # because it wants to make repo objects from deep inside the stack |
|
165 | # because it wants to make repo objects from deep inside the stack | |
166 | # so we manually delay the circular imports to not break |
|
166 | # so we manually delay the circular imports to not break | |
167 | # scripts that don't use our demand-loading |
|
167 | # scripts that don't use our demand-loading | |
168 | global hg |
|
168 | global hg | |
169 | from . import hg as h |
|
169 | from . import hg as h | |
170 | hg = h |
|
170 | hg = h | |
171 |
|
171 | |||
172 | repo = ctx.repo() |
|
172 | repo = ctx.repo() | |
173 | _auditsubrepopath(repo, path) |
|
173 | _auditsubrepopath(repo, path) | |
174 | state = ctx.substate[path] |
|
174 | state = ctx.substate[path] | |
175 | _checktype(repo.ui, state[2]) |
|
175 | _checktype(repo.ui, state[2]) | |
176 | subrev = '' |
|
176 | subrev = '' | |
177 | if state[2] == 'hg': |
|
177 | if state[2] == 'hg': | |
178 | subrev = "0" * 40 |
|
178 | subrev = "0" * 40 | |
179 | return types[state[2]](pctx, path, (state[0], subrev), True) |
|
179 | return types[state[2]](pctx, path, (state[0], subrev), True) | |
180 |
|
180 | |||
181 | # subrepo classes need to implement the following abstract class: |
|
181 | # subrepo classes need to implement the following abstract class: | |
182 |
|
182 | |||
183 | class abstractsubrepo(object): |
|
183 | class abstractsubrepo(object): | |
184 |
|
184 | |||
185 | def __init__(self, ctx, path): |
|
185 | def __init__(self, ctx, path): | |
186 | """Initialize abstractsubrepo part |
|
186 | """Initialize abstractsubrepo part | |
187 |
|
187 | |||
188 | ``ctx`` is the context referring this subrepository in the |
|
188 | ``ctx`` is the context referring this subrepository in the | |
189 | parent repository. |
|
189 | parent repository. | |
190 |
|
190 | |||
191 | ``path`` is the path to this subrepository as seen from |
|
191 | ``path`` is the path to this subrepository as seen from | |
192 | innermost repository. |
|
192 | innermost repository. | |
193 | """ |
|
193 | """ | |
194 | self.ui = ctx.repo().ui |
|
194 | self.ui = ctx.repo().ui | |
195 | self._ctx = ctx |
|
195 | self._ctx = ctx | |
196 | self._path = path |
|
196 | self._path = path | |
197 |
|
197 | |||
198 | def addwebdirpath(self, serverpath, webconf): |
|
198 | def addwebdirpath(self, serverpath, webconf): | |
199 | """Add the hgwebdir entries for this subrepo, and any of its subrepos. |
|
199 | """Add the hgwebdir entries for this subrepo, and any of its subrepos. | |
200 |
|
200 | |||
201 | ``serverpath`` is the path component of the URL for this repo. |
|
201 | ``serverpath`` is the path component of the URL for this repo. | |
202 |
|
202 | |||
203 | ``webconf`` is the dictionary of hgwebdir entries. |
|
203 | ``webconf`` is the dictionary of hgwebdir entries. | |
204 | """ |
|
204 | """ | |
205 | pass |
|
205 | pass | |
206 |
|
206 | |||
207 | def storeclean(self, path): |
|
207 | def storeclean(self, path): | |
208 | """ |
|
208 | """ | |
209 | returns true if the repository has not changed since it was last |
|
209 | returns true if the repository has not changed since it was last | |
210 | cloned from or pushed to a given repository. |
|
210 | cloned from or pushed to a given repository. | |
211 | """ |
|
211 | """ | |
212 | return False |
|
212 | return False | |
213 |
|
213 | |||
214 | def dirty(self, ignoreupdate=False, missing=False): |
|
214 | def dirty(self, ignoreupdate=False, missing=False): | |
215 | """returns true if the dirstate of the subrepo is dirty or does not |
|
215 | """returns true if the dirstate of the subrepo is dirty or does not | |
216 | match current stored state. If ignoreupdate is true, only check |
|
216 | match current stored state. If ignoreupdate is true, only check | |
217 | whether the subrepo has uncommitted changes in its dirstate. If missing |
|
217 | whether the subrepo has uncommitted changes in its dirstate. If missing | |
218 | is true, check for deleted files. |
|
218 | is true, check for deleted files. | |
219 | """ |
|
219 | """ | |
220 | raise NotImplementedError |
|
220 | raise NotImplementedError | |
221 |
|
221 | |||
222 | def dirtyreason(self, ignoreupdate=False, missing=False): |
|
222 | def dirtyreason(self, ignoreupdate=False, missing=False): | |
223 | """return reason string if it is ``dirty()`` |
|
223 | """return reason string if it is ``dirty()`` | |
224 |
|
224 | |||
225 | Returned string should have enough information for the message |
|
225 | Returned string should have enough information for the message | |
226 | of exception. |
|
226 | of exception. | |
227 |
|
227 | |||
228 | This returns None, otherwise. |
|
228 | This returns None, otherwise. | |
229 | """ |
|
229 | """ | |
230 | if self.dirty(ignoreupdate=ignoreupdate, missing=missing): |
|
230 | if self.dirty(ignoreupdate=ignoreupdate, missing=missing): | |
231 | return _('uncommitted changes in subrepository "%s"' |
|
231 | return _('uncommitted changes in subrepository "%s"' | |
232 | ) % subrelpath(self) |
|
232 | ) % subrelpath(self) | |
233 |
|
233 | |||
234 | def bailifchanged(self, ignoreupdate=False, hint=None): |
|
234 | def bailifchanged(self, ignoreupdate=False, hint=None): | |
235 | """raise Abort if subrepository is ``dirty()`` |
|
235 | """raise Abort if subrepository is ``dirty()`` | |
236 | """ |
|
236 | """ | |
237 | dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate, |
|
237 | dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate, | |
238 | missing=True) |
|
238 | missing=True) | |
239 | if dirtyreason: |
|
239 | if dirtyreason: | |
240 | raise error.Abort(dirtyreason, hint=hint) |
|
240 | raise error.Abort(dirtyreason, hint=hint) | |
241 |
|
241 | |||
242 | def basestate(self): |
|
242 | def basestate(self): | |
243 | """current working directory base state, disregarding .hgsubstate |
|
243 | """current working directory base state, disregarding .hgsubstate | |
244 | state and working directory modifications""" |
|
244 | state and working directory modifications""" | |
245 | raise NotImplementedError |
|
245 | raise NotImplementedError | |
246 |
|
246 | |||
247 | def checknested(self, path): |
|
247 | def checknested(self, path): | |
248 | """check if path is a subrepository within this repository""" |
|
248 | """check if path is a subrepository within this repository""" | |
249 | return False |
|
249 | return False | |
250 |
|
250 | |||
251 | def commit(self, text, user, date): |
|
251 | def commit(self, text, user, date): | |
252 | """commit the current changes to the subrepo with the given |
|
252 | """commit the current changes to the subrepo with the given | |
253 | log message. Use given user and date if possible. Return the |
|
253 | log message. Use given user and date if possible. Return the | |
254 | new state of the subrepo. |
|
254 | new state of the subrepo. | |
255 | """ |
|
255 | """ | |
256 | raise NotImplementedError |
|
256 | raise NotImplementedError | |
257 |
|
257 | |||
258 | def phase(self, state): |
|
258 | def phase(self, state): | |
259 | """returns phase of specified state in the subrepository. |
|
259 | """returns phase of specified state in the subrepository. | |
260 | """ |
|
260 | """ | |
261 | return phases.public |
|
261 | return phases.public | |
262 |
|
262 | |||
263 | def remove(self): |
|
263 | def remove(self): | |
264 | """remove the subrepo |
|
264 | """remove the subrepo | |
265 |
|
265 | |||
266 | (should verify the dirstate is not dirty first) |
|
266 | (should verify the dirstate is not dirty first) | |
267 | """ |
|
267 | """ | |
268 | raise NotImplementedError |
|
268 | raise NotImplementedError | |
269 |
|
269 | |||
270 | def get(self, state, overwrite=False): |
|
270 | def get(self, state, overwrite=False): | |
271 | """run whatever commands are needed to put the subrepo into |
|
271 | """run whatever commands are needed to put the subrepo into | |
272 | this state |
|
272 | this state | |
273 | """ |
|
273 | """ | |
274 | raise NotImplementedError |
|
274 | raise NotImplementedError | |
275 |
|
275 | |||
276 | def merge(self, state): |
|
276 | def merge(self, state): | |
277 | """merge currently-saved state with the new state.""" |
|
277 | """merge currently-saved state with the new state.""" | |
278 | raise NotImplementedError |
|
278 | raise NotImplementedError | |
279 |
|
279 | |||
280 | def push(self, opts): |
|
280 | def push(self, opts): | |
281 | """perform whatever action is analogous to 'hg push' |
|
281 | """perform whatever action is analogous to 'hg push' | |
282 |
|
282 | |||
283 | This may be a no-op on some systems. |
|
283 | This may be a no-op on some systems. | |
284 | """ |
|
284 | """ | |
285 | raise NotImplementedError |
|
285 | raise NotImplementedError | |
286 |
|
286 | |||
287 | def add(self, ui, match, prefix, explicitonly, **opts): |
|
287 | def add(self, ui, match, prefix, explicitonly, **opts): | |
288 | return [] |
|
288 | return [] | |
289 |
|
289 | |||
290 | def addremove(self, matcher, prefix, opts): |
|
290 | def addremove(self, matcher, prefix, opts): | |
291 | self.ui.warn("%s: %s" % (prefix, _("addremove is not supported"))) |
|
291 | self.ui.warn("%s: %s" % (prefix, _("addremove is not supported"))) | |
292 | return 1 |
|
292 | return 1 | |
293 |
|
293 | |||
294 | def cat(self, match, fm, fntemplate, prefix, **opts): |
|
294 | def cat(self, match, fm, fntemplate, prefix, **opts): | |
295 | return 1 |
|
295 | return 1 | |
296 |
|
296 | |||
297 | def status(self, rev2, **opts): |
|
297 | def status(self, rev2, **opts): | |
298 | return scmutil.status([], [], [], [], [], [], []) |
|
298 | return scmutil.status([], [], [], [], [], [], []) | |
299 |
|
299 | |||
300 | def diff(self, ui, diffopts, node2, match, prefix, **opts): |
|
300 | def diff(self, ui, diffopts, node2, match, prefix, **opts): | |
301 | pass |
|
301 | pass | |
302 |
|
302 | |||
303 | def outgoing(self, ui, dest, opts): |
|
303 | def outgoing(self, ui, dest, opts): | |
304 | return 1 |
|
304 | return 1 | |
305 |
|
305 | |||
306 | def incoming(self, ui, source, opts): |
|
306 | def incoming(self, ui, source, opts): | |
307 | return 1 |
|
307 | return 1 | |
308 |
|
308 | |||
309 | def files(self): |
|
309 | def files(self): | |
310 | """return filename iterator""" |
|
310 | """return filename iterator""" | |
311 | raise NotImplementedError |
|
311 | raise NotImplementedError | |
312 |
|
312 | |||
313 | def filedata(self, name, decode): |
|
313 | def filedata(self, name, decode): | |
314 | """return file data, optionally passed through repo decoders""" |
|
314 | """return file data, optionally passed through repo decoders""" | |
315 | raise NotImplementedError |
|
315 | raise NotImplementedError | |
316 |
|
316 | |||
317 | def fileflags(self, name): |
|
317 | def fileflags(self, name): | |
318 | """return file flags""" |
|
318 | """return file flags""" | |
319 | return '' |
|
319 | return '' | |
320 |
|
320 | |||
321 | def getfileset(self, expr): |
|
321 | def getfileset(self, expr): | |
322 | """Resolve the fileset expression for this repo""" |
|
322 | """Resolve the fileset expression for this repo""" | |
323 | return set() |
|
323 | return set() | |
324 |
|
324 | |||
325 | def printfiles(self, ui, m, fm, fmt, subrepos): |
|
325 | def printfiles(self, ui, m, fm, fmt, subrepos): | |
326 | """handle the files command for this subrepo""" |
|
326 | """handle the files command for this subrepo""" | |
327 | return 1 |
|
327 | return 1 | |
328 |
|
328 | |||
329 | def archive(self, archiver, prefix, match=None, decode=True): |
|
329 | def archive(self, archiver, prefix, match=None, decode=True): | |
330 | if match is not None: |
|
330 | if match is not None: | |
331 | files = [f for f in self.files() if match(f)] |
|
331 | files = [f for f in self.files() if match(f)] | |
332 | else: |
|
332 | else: | |
333 | files = self.files() |
|
333 | files = self.files() | |
334 | total = len(files) |
|
334 | total = len(files) | |
335 | relpath = subrelpath(self) |
|
335 | relpath = subrelpath(self) | |
336 | self.ui.progress(_('archiving (%s)') % relpath, 0, |
|
336 | self.ui.progress(_('archiving (%s)') % relpath, 0, | |
337 | unit=_('files'), total=total) |
|
337 | unit=_('files'), total=total) | |
338 | for i, name in enumerate(files): |
|
338 | for i, name in enumerate(files): | |
339 | flags = self.fileflags(name) |
|
339 | flags = self.fileflags(name) | |
340 | mode = 'x' in flags and 0o755 or 0o644 |
|
340 | mode = 'x' in flags and 0o755 or 0o644 | |
341 | symlink = 'l' in flags |
|
341 | symlink = 'l' in flags | |
342 | archiver.addfile(prefix + self._path + '/' + name, |
|
342 | archiver.addfile(prefix + self._path + '/' + name, | |
343 | mode, symlink, self.filedata(name, decode)) |
|
343 | mode, symlink, self.filedata(name, decode)) | |
344 | self.ui.progress(_('archiving (%s)') % relpath, i + 1, |
|
344 | self.ui.progress(_('archiving (%s)') % relpath, i + 1, | |
345 | unit=_('files'), total=total) |
|
345 | unit=_('files'), total=total) | |
346 | self.ui.progress(_('archiving (%s)') % relpath, None) |
|
346 | self.ui.progress(_('archiving (%s)') % relpath, None) | |
347 | return total |
|
347 | return total | |
348 |
|
348 | |||
349 | def walk(self, match): |
|
349 | def walk(self, match): | |
350 | ''' |
|
350 | ''' | |
351 | walk recursively through the directory tree, finding all files |
|
351 | walk recursively through the directory tree, finding all files | |
352 | matched by the match function |
|
352 | matched by the match function | |
353 | ''' |
|
353 | ''' | |
354 |
|
354 | |||
355 | def forget(self, match, prefix, dryrun): |
|
355 | def forget(self, match, prefix, dryrun): | |
356 | return ([], []) |
|
356 | return ([], []) | |
357 |
|
357 | |||
358 | def removefiles(self, matcher, prefix, after, force, subrepos, |
|
358 | def removefiles(self, matcher, prefix, after, force, subrepos, | |
359 | dryrun, warnings): |
|
359 | dryrun, warnings): | |
360 | """remove the matched files from the subrepository and the filesystem, |
|
360 | """remove the matched files from the subrepository and the filesystem, | |
361 | possibly by force and/or after the file has been removed from the |
|
361 | possibly by force and/or after the file has been removed from the | |
362 | filesystem. Return 0 on success, 1 on any warning. |
|
362 | filesystem. Return 0 on success, 1 on any warning. | |
363 | """ |
|
363 | """ | |
364 | warnings.append(_("warning: removefiles not implemented (%s)") |
|
364 | warnings.append(_("warning: removefiles not implemented (%s)") | |
365 | % self._path) |
|
365 | % self._path) | |
366 | return 1 |
|
366 | return 1 | |
367 |
|
367 | |||
368 | def revert(self, substate, *pats, **opts): |
|
368 | def revert(self, substate, *pats, **opts): | |
369 | self.ui.warn(_('%s: reverting %s subrepos is unsupported\n') \ |
|
369 | self.ui.warn(_('%s: reverting %s subrepos is unsupported\n') \ | |
370 | % (substate[0], substate[2])) |
|
370 | % (substate[0], substate[2])) | |
371 | return [] |
|
371 | return [] | |
372 |
|
372 | |||
373 | def shortid(self, revid): |
|
373 | def shortid(self, revid): | |
374 | return revid |
|
374 | return revid | |
375 |
|
375 | |||
376 | def unshare(self): |
|
376 | def unshare(self): | |
377 | ''' |
|
377 | ''' | |
378 | convert this repository from shared to normal storage. |
|
378 | convert this repository from shared to normal storage. | |
379 | ''' |
|
379 | ''' | |
380 |
|
380 | |||
381 | def verify(self): |
|
381 | def verify(self): | |
382 | '''verify the integrity of the repository. Return 0 on success or |
|
382 | '''verify the integrity of the repository. Return 0 on success or | |
383 | warning, 1 on any error. |
|
383 | warning, 1 on any error. | |
384 | ''' |
|
384 | ''' | |
385 | return 0 |
|
385 | return 0 | |
386 |
|
386 | |||
387 | @propertycache |
|
387 | @propertycache | |
388 | def wvfs(self): |
|
388 | def wvfs(self): | |
389 | """return vfs to access the working directory of this subrepository |
|
389 | """return vfs to access the working directory of this subrepository | |
390 | """ |
|
390 | """ | |
391 | return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path)) |
|
391 | return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path)) | |
392 |
|
392 | |||
393 | @propertycache |
|
393 | @propertycache | |
394 | def _relpath(self): |
|
394 | def _relpath(self): | |
395 | """return path to this subrepository as seen from outermost repository |
|
395 | """return path to this subrepository as seen from outermost repository | |
396 | """ |
|
396 | """ | |
397 | return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path) |
|
397 | return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path) | |
398 |
|
398 | |||
399 | class hgsubrepo(abstractsubrepo): |
|
399 | class hgsubrepo(abstractsubrepo): | |
400 | def __init__(self, ctx, path, state, allowcreate): |
|
400 | def __init__(self, ctx, path, state, allowcreate): | |
401 | super(hgsubrepo, self).__init__(ctx, path) |
|
401 | super(hgsubrepo, self).__init__(ctx, path) | |
402 | self._state = state |
|
402 | self._state = state | |
403 | r = ctx.repo() |
|
403 | r = ctx.repo() | |
404 | root = r.wjoin(path) |
|
404 | root = r.wjoin(path) | |
405 | create = allowcreate and not r.wvfs.exists('%s/.hg' % path) |
|
405 | create = allowcreate and not r.wvfs.exists('%s/.hg' % path) | |
406 | self._repo = hg.repository(r.baseui, root, create=create) |
|
406 | self._repo = hg.repository(r.baseui, root, create=create) | |
407 |
|
407 | |||
408 | # Propagate the parent's --hidden option |
|
408 | # Propagate the parent's --hidden option | |
409 | if r is r.unfiltered(): |
|
409 | if r is r.unfiltered(): | |
410 | self._repo = self._repo.unfiltered() |
|
410 | self._repo = self._repo.unfiltered() | |
411 |
|
411 | |||
412 | self.ui = self._repo.ui |
|
412 | self.ui = self._repo.ui | |
413 | for s, k in [('ui', 'commitsubrepos')]: |
|
413 | for s, k in [('ui', 'commitsubrepos')]: | |
414 | v = r.ui.config(s, k) |
|
414 | v = r.ui.config(s, k) | |
415 | if v: |
|
415 | if v: | |
416 | self.ui.setconfig(s, k, v, 'subrepo') |
|
416 | self.ui.setconfig(s, k, v, 'subrepo') | |
417 | # internal config: ui._usedassubrepo |
|
417 | # internal config: ui._usedassubrepo | |
418 | self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo') |
|
418 | self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo') | |
419 | self._initrepo(r, state[0], create) |
|
419 | self._initrepo(r, state[0], create) | |
420 |
|
420 | |||
421 | @annotatesubrepoerror |
|
421 | @annotatesubrepoerror | |
422 | def addwebdirpath(self, serverpath, webconf): |
|
422 | def addwebdirpath(self, serverpath, webconf): | |
423 | cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf) |
|
423 | cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf) | |
424 |
|
424 | |||
425 | def storeclean(self, path): |
|
425 | def storeclean(self, path): | |
426 | with self._repo.lock(): |
|
426 | with self._repo.lock(): | |
427 | return self._storeclean(path) |
|
427 | return self._storeclean(path) | |
428 |
|
428 | |||
429 | def _storeclean(self, path): |
|
429 | def _storeclean(self, path): | |
430 | clean = True |
|
430 | clean = True | |
431 | itercache = self._calcstorehash(path) |
|
431 | itercache = self._calcstorehash(path) | |
432 | for filehash in self._readstorehashcache(path): |
|
432 | for filehash in self._readstorehashcache(path): | |
433 | if filehash != next(itercache, None): |
|
433 | if filehash != next(itercache, None): | |
434 | clean = False |
|
434 | clean = False | |
435 | break |
|
435 | break | |
436 | if clean: |
|
436 | if clean: | |
437 | # if not empty: |
|
437 | # if not empty: | |
438 | # the cached and current pull states have a different size |
|
438 | # the cached and current pull states have a different size | |
439 | clean = next(itercache, None) is None |
|
439 | clean = next(itercache, None) is None | |
440 | return clean |
|
440 | return clean | |
441 |
|
441 | |||
442 | def _calcstorehash(self, remotepath): |
|
442 | def _calcstorehash(self, remotepath): | |
443 | '''calculate a unique "store hash" |
|
443 | '''calculate a unique "store hash" | |
444 |
|
444 | |||
445 | This method is used to to detect when there are changes that may |
|
445 | This method is used to to detect when there are changes that may | |
446 | require a push to a given remote path.''' |
|
446 | require a push to a given remote path.''' | |
447 | # sort the files that will be hashed in increasing (likely) file size |
|
447 | # sort the files that will be hashed in increasing (likely) file size | |
448 | filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i') |
|
448 | filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i') | |
449 | yield '# %s\n' % _expandedabspath(remotepath) |
|
449 | yield '# %s\n' % _expandedabspath(remotepath) | |
450 | vfs = self._repo.vfs |
|
450 | vfs = self._repo.vfs | |
451 | for relname in filelist: |
|
451 | for relname in filelist: | |
452 | filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest()) |
|
452 | filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest()) | |
453 | yield '%s = %s\n' % (relname, filehash) |
|
453 | yield '%s = %s\n' % (relname, filehash) | |
454 |
|
454 | |||
455 | @propertycache |
|
455 | @propertycache | |
456 | def _cachestorehashvfs(self): |
|
456 | def _cachestorehashvfs(self): | |
457 | return vfsmod.vfs(self._repo.vfs.join('cache/storehash')) |
|
457 | return vfsmod.vfs(self._repo.vfs.join('cache/storehash')) | |
458 |
|
458 | |||
459 | def _readstorehashcache(self, remotepath): |
|
459 | def _readstorehashcache(self, remotepath): | |
460 | '''read the store hash cache for a given remote repository''' |
|
460 | '''read the store hash cache for a given remote repository''' | |
461 | cachefile = _getstorehashcachename(remotepath) |
|
461 | cachefile = _getstorehashcachename(remotepath) | |
462 | return self._cachestorehashvfs.tryreadlines(cachefile, 'r') |
|
462 | return self._cachestorehashvfs.tryreadlines(cachefile, 'r') | |
463 |
|
463 | |||
464 | def _cachestorehash(self, remotepath): |
|
464 | def _cachestorehash(self, remotepath): | |
465 | '''cache the current store hash |
|
465 | '''cache the current store hash | |
466 |
|
466 | |||
467 | Each remote repo requires its own store hash cache, because a subrepo |
|
467 | Each remote repo requires its own store hash cache, because a subrepo | |
468 | store may be "clean" versus a given remote repo, but not versus another |
|
468 | store may be "clean" versus a given remote repo, but not versus another | |
469 | ''' |
|
469 | ''' | |
470 | cachefile = _getstorehashcachename(remotepath) |
|
470 | cachefile = _getstorehashcachename(remotepath) | |
471 | with self._repo.lock(): |
|
471 | with self._repo.lock(): | |
472 | storehash = list(self._calcstorehash(remotepath)) |
|
472 | storehash = list(self._calcstorehash(remotepath)) | |
473 | vfs = self._cachestorehashvfs |
|
473 | vfs = self._cachestorehashvfs | |
474 | vfs.writelines(cachefile, storehash, mode='wb', notindexed=True) |
|
474 | vfs.writelines(cachefile, storehash, mode='wb', notindexed=True) | |
475 |
|
475 | |||
476 | def _getctx(self): |
|
476 | def _getctx(self): | |
477 | '''fetch the context for this subrepo revision, possibly a workingctx |
|
477 | '''fetch the context for this subrepo revision, possibly a workingctx | |
478 | ''' |
|
478 | ''' | |
479 | if self._ctx.rev() is None: |
|
479 | if self._ctx.rev() is None: | |
480 | return self._repo[None] # workingctx if parent is workingctx |
|
480 | return self._repo[None] # workingctx if parent is workingctx | |
481 | else: |
|
481 | else: | |
482 | rev = self._state[1] |
|
482 | rev = self._state[1] | |
483 | return self._repo[rev] |
|
483 | return self._repo[rev] | |
484 |
|
484 | |||
485 | @annotatesubrepoerror |
|
485 | @annotatesubrepoerror | |
486 | def _initrepo(self, parentrepo, source, create): |
|
486 | def _initrepo(self, parentrepo, source, create): | |
487 | self._repo._subparent = parentrepo |
|
487 | self._repo._subparent = parentrepo | |
488 | self._repo._subsource = source |
|
488 | self._repo._subsource = source | |
489 |
|
489 | |||
490 | if create: |
|
490 | if create: | |
491 | lines = ['[paths]\n'] |
|
491 | lines = ['[paths]\n'] | |
492 |
|
492 | |||
493 | def addpathconfig(key, value): |
|
493 | def addpathconfig(key, value): | |
494 | if value: |
|
494 | if value: | |
495 | lines.append('%s = %s\n' % (key, value)) |
|
495 | lines.append('%s = %s\n' % (key, value)) | |
496 | self.ui.setconfig('paths', key, value, 'subrepo') |
|
496 | self.ui.setconfig('paths', key, value, 'subrepo') | |
497 |
|
497 | |||
498 | defpath = _abssource(self._repo, abort=False) |
|
498 | defpath = _abssource(self._repo, abort=False) | |
499 | defpushpath = _abssource(self._repo, True, abort=False) |
|
499 | defpushpath = _abssource(self._repo, True, abort=False) | |
500 | addpathconfig('default', defpath) |
|
500 | addpathconfig('default', defpath) | |
501 | if defpath != defpushpath: |
|
501 | if defpath != defpushpath: | |
502 | addpathconfig('default-push', defpushpath) |
|
502 | addpathconfig('default-push', defpushpath) | |
503 |
|
503 | |||
504 | self._repo.vfs.write('hgrc', util.tonativeeol(''.join(lines))) |
|
504 | self._repo.vfs.write('hgrc', util.tonativeeol(''.join(lines))) | |
505 |
|
505 | |||
506 | @annotatesubrepoerror |
|
506 | @annotatesubrepoerror | |
507 | def add(self, ui, match, prefix, explicitonly, **opts): |
|
507 | def add(self, ui, match, prefix, explicitonly, **opts): | |
508 | return cmdutil.add(ui, self._repo, match, |
|
508 | return cmdutil.add(ui, self._repo, match, | |
509 | self.wvfs.reljoin(prefix, self._path), |
|
509 | self.wvfs.reljoin(prefix, self._path), | |
510 | explicitonly, **opts) |
|
510 | explicitonly, **opts) | |
511 |
|
511 | |||
512 | @annotatesubrepoerror |
|
512 | @annotatesubrepoerror | |
513 | def addremove(self, m, prefix, opts): |
|
513 | def addremove(self, m, prefix, opts): | |
514 | # In the same way as sub directories are processed, once in a subrepo, |
|
514 | # In the same way as sub directories are processed, once in a subrepo, | |
515 | # always entry any of its subrepos. Don't corrupt the options that will |
|
515 | # always entry any of its subrepos. Don't corrupt the options that will | |
516 | # be used to process sibling subrepos however. |
|
516 | # be used to process sibling subrepos however. | |
517 | opts = copy.copy(opts) |
|
517 | opts = copy.copy(opts) | |
518 | opts['subrepos'] = True |
|
518 | opts['subrepos'] = True | |
519 | return scmutil.addremove(self._repo, m, |
|
519 | return scmutil.addremove(self._repo, m, | |
520 | self.wvfs.reljoin(prefix, self._path), opts) |
|
520 | self.wvfs.reljoin(prefix, self._path), opts) | |
521 |
|
521 | |||
522 | @annotatesubrepoerror |
|
522 | @annotatesubrepoerror | |
523 | def cat(self, match, fm, fntemplate, prefix, **opts): |
|
523 | def cat(self, match, fm, fntemplate, prefix, **opts): | |
524 | rev = self._state[1] |
|
524 | rev = self._state[1] | |
525 | ctx = self._repo[rev] |
|
525 | ctx = self._repo[rev] | |
526 | return cmdutil.cat(self.ui, self._repo, ctx, match, fm, fntemplate, |
|
526 | return cmdutil.cat(self.ui, self._repo, ctx, match, fm, fntemplate, | |
527 | prefix, **opts) |
|
527 | prefix, **opts) | |
528 |
|
528 | |||
529 | @annotatesubrepoerror |
|
529 | @annotatesubrepoerror | |
530 | def status(self, rev2, **opts): |
|
530 | def status(self, rev2, **opts): | |
531 | try: |
|
531 | try: | |
532 | rev1 = self._state[1] |
|
532 | rev1 = self._state[1] | |
533 | ctx1 = self._repo[rev1] |
|
533 | ctx1 = self._repo[rev1] | |
534 | ctx2 = self._repo[rev2] |
|
534 | ctx2 = self._repo[rev2] | |
535 | return self._repo.status(ctx1, ctx2, **opts) |
|
535 | return self._repo.status(ctx1, ctx2, **opts) | |
536 | except error.RepoLookupError as inst: |
|
536 | except error.RepoLookupError as inst: | |
537 | self.ui.warn(_('warning: error "%s" in subrepository "%s"\n') |
|
537 | self.ui.warn(_('warning: error "%s" in subrepository "%s"\n') | |
538 | % (inst, subrelpath(self))) |
|
538 | % (inst, subrelpath(self))) | |
539 | return scmutil.status([], [], [], [], [], [], []) |
|
539 | return scmutil.status([], [], [], [], [], [], []) | |
540 |
|
540 | |||
541 | @annotatesubrepoerror |
|
541 | @annotatesubrepoerror | |
542 | def diff(self, ui, diffopts, node2, match, prefix, **opts): |
|
542 | def diff(self, ui, diffopts, node2, match, prefix, **opts): | |
543 | try: |
|
543 | try: | |
544 | node1 = node.bin(self._state[1]) |
|
544 | node1 = node.bin(self._state[1]) | |
545 | # We currently expect node2 to come from substate and be |
|
545 | # We currently expect node2 to come from substate and be | |
546 | # in hex format |
|
546 | # in hex format | |
547 | if node2 is not None: |
|
547 | if node2 is not None: | |
548 | node2 = node.bin(node2) |
|
548 | node2 = node.bin(node2) | |
549 | logcmdutil.diffordiffstat(ui, self._repo, diffopts, |
|
549 | logcmdutil.diffordiffstat(ui, self._repo, diffopts, | |
550 | node1, node2, match, |
|
550 | node1, node2, match, | |
551 | prefix=posixpath.join(prefix, self._path), |
|
551 | prefix=posixpath.join(prefix, self._path), | |
552 | listsubrepos=True, **opts) |
|
552 | listsubrepos=True, **opts) | |
553 | except error.RepoLookupError as inst: |
|
553 | except error.RepoLookupError as inst: | |
554 | self.ui.warn(_('warning: error "%s" in subrepository "%s"\n') |
|
554 | self.ui.warn(_('warning: error "%s" in subrepository "%s"\n') | |
555 | % (inst, subrelpath(self))) |
|
555 | % (inst, subrelpath(self))) | |
556 |
|
556 | |||
557 | @annotatesubrepoerror |
|
557 | @annotatesubrepoerror | |
558 | def archive(self, archiver, prefix, match=None, decode=True): |
|
558 | def archive(self, archiver, prefix, match=None, decode=True): | |
559 | self._get(self._state + ('hg',)) |
|
559 | self._get(self._state + ('hg',)) | |
560 | files = self.files() |
|
560 | files = self.files() | |
561 | if match: |
|
561 | if match: | |
562 | files = [f for f in files if match(f)] |
|
562 | files = [f for f in files if match(f)] | |
563 | rev = self._state[1] |
|
563 | rev = self._state[1] | |
564 | ctx = self._repo[rev] |
|
564 | ctx = self._repo[rev] | |
565 | scmutil.fileprefetchhooks(self._repo, ctx, files) |
|
565 | scmutil.fileprefetchhooks(self._repo, ctx, files) | |
566 | total = abstractsubrepo.archive(self, archiver, prefix, match) |
|
566 | total = abstractsubrepo.archive(self, archiver, prefix, match) | |
567 | for subpath in ctx.substate: |
|
567 | for subpath in ctx.substate: | |
568 | s = subrepo(ctx, subpath, True) |
|
568 | s = subrepo(ctx, subpath, True) | |
569 | submatch = matchmod.subdirmatcher(subpath, match) |
|
569 | submatch = matchmod.subdirmatcher(subpath, match) | |
570 | total += s.archive(archiver, prefix + self._path + '/', submatch, |
|
570 | total += s.archive(archiver, prefix + self._path + '/', submatch, | |
571 | decode) |
|
571 | decode) | |
572 | return total |
|
572 | return total | |
573 |
|
573 | |||
574 | @annotatesubrepoerror |
|
574 | @annotatesubrepoerror | |
575 | def dirty(self, ignoreupdate=False, missing=False): |
|
575 | def dirty(self, ignoreupdate=False, missing=False): | |
576 | r = self._state[1] |
|
576 | r = self._state[1] | |
577 | if r == '' and not ignoreupdate: # no state recorded |
|
577 | if r == '' and not ignoreupdate: # no state recorded | |
578 | return True |
|
578 | return True | |
579 | w = self._repo[None] |
|
579 | w = self._repo[None] | |
580 | if r != w.p1().hex() and not ignoreupdate: |
|
580 | if r != w.p1().hex() and not ignoreupdate: | |
581 | # different version checked out |
|
581 | # different version checked out | |
582 | return True |
|
582 | return True | |
583 | return w.dirty(missing=missing) # working directory changed |
|
583 | return w.dirty(missing=missing) # working directory changed | |
584 |
|
584 | |||
585 | def basestate(self): |
|
585 | def basestate(self): | |
586 | return self._repo['.'].hex() |
|
586 | return self._repo['.'].hex() | |
587 |
|
587 | |||
588 | def checknested(self, path): |
|
588 | def checknested(self, path): | |
589 | return self._repo._checknested(self._repo.wjoin(path)) |
|
589 | return self._repo._checknested(self._repo.wjoin(path)) | |
590 |
|
590 | |||
591 | @annotatesubrepoerror |
|
591 | @annotatesubrepoerror | |
592 | def commit(self, text, user, date): |
|
592 | def commit(self, text, user, date): | |
593 | # don't bother committing in the subrepo if it's only been |
|
593 | # don't bother committing in the subrepo if it's only been | |
594 | # updated |
|
594 | # updated | |
595 | if not self.dirty(True): |
|
595 | if not self.dirty(True): | |
596 | return self._repo['.'].hex() |
|
596 | return self._repo['.'].hex() | |
597 | self.ui.debug("committing subrepo %s\n" % subrelpath(self)) |
|
597 | self.ui.debug("committing subrepo %s\n" % subrelpath(self)) | |
598 | n = self._repo.commit(text, user, date) |
|
598 | n = self._repo.commit(text, user, date) | |
599 | if not n: |
|
599 | if not n: | |
600 | return self._repo['.'].hex() # different version checked out |
|
600 | return self._repo['.'].hex() # different version checked out | |
601 | return node.hex(n) |
|
601 | return node.hex(n) | |
602 |
|
602 | |||
603 | @annotatesubrepoerror |
|
603 | @annotatesubrepoerror | |
604 | def phase(self, state): |
|
604 | def phase(self, state): | |
605 | return self._repo[state or '.'].phase() |
|
605 | return self._repo[state or '.'].phase() | |
606 |
|
606 | |||
607 | @annotatesubrepoerror |
|
607 | @annotatesubrepoerror | |
608 | def remove(self): |
|
608 | def remove(self): | |
609 | # we can't fully delete the repository as it may contain |
|
609 | # we can't fully delete the repository as it may contain | |
610 | # local-only history |
|
610 | # local-only history | |
611 | self.ui.note(_('removing subrepo %s\n') % subrelpath(self)) |
|
611 | self.ui.note(_('removing subrepo %s\n') % subrelpath(self)) | |
612 | hg.clean(self._repo, node.nullid, False) |
|
612 | hg.clean(self._repo, node.nullid, False) | |
613 |
|
613 | |||
614 | def _get(self, state): |
|
614 | def _get(self, state): | |
615 | source, revision, kind = state |
|
615 | source, revision, kind = state | |
616 | parentrepo = self._repo._subparent |
|
616 | parentrepo = self._repo._subparent | |
617 |
|
617 | |||
618 | if revision in self._repo.unfiltered(): |
|
618 | if revision in self._repo.unfiltered(): | |
619 | # Allow shared subrepos tracked at null to setup the sharedpath |
|
619 | # Allow shared subrepos tracked at null to setup the sharedpath | |
620 | if len(self._repo) != 0 or not parentrepo.shared(): |
|
620 | if len(self._repo) != 0 or not parentrepo.shared(): | |
621 | return True |
|
621 | return True | |
622 | self._repo._subsource = source |
|
622 | self._repo._subsource = source | |
623 | srcurl = _abssource(self._repo) |
|
623 | srcurl = _abssource(self._repo) | |
624 | other = hg.peer(self._repo, {}, srcurl) |
|
624 | other = hg.peer(self._repo, {}, srcurl) | |
625 | if len(self._repo) == 0: |
|
625 | if len(self._repo) == 0: | |
626 | # use self._repo.vfs instead of self.wvfs to remove .hg only |
|
626 | # use self._repo.vfs instead of self.wvfs to remove .hg only | |
627 | self._repo.vfs.rmtree() |
|
627 | self._repo.vfs.rmtree() | |
628 |
|
628 | |||
629 | # A remote subrepo could be shared if there is a local copy |
|
629 | # A remote subrepo could be shared if there is a local copy | |
630 | # relative to the parent's share source. But clone pooling doesn't |
|
630 | # relative to the parent's share source. But clone pooling doesn't | |
631 | # assemble the repos in a tree, so that can't be consistently done. |
|
631 | # assemble the repos in a tree, so that can't be consistently done. | |
632 | # A simpler option is for the user to configure clone pooling, and |
|
632 | # A simpler option is for the user to configure clone pooling, and | |
633 | # work with that. |
|
633 | # work with that. | |
634 | if parentrepo.shared() and hg.islocal(srcurl): |
|
634 | if parentrepo.shared() and hg.islocal(srcurl): | |
635 | self.ui.status(_('sharing subrepo %s from %s\n') |
|
635 | self.ui.status(_('sharing subrepo %s from %s\n') | |
636 | % (subrelpath(self), srcurl)) |
|
636 | % (subrelpath(self), srcurl)) | |
637 | shared = hg.share(self._repo._subparent.baseui, |
|
637 | shared = hg.share(self._repo._subparent.baseui, | |
638 | other, self._repo.root, |
|
638 | other, self._repo.root, | |
639 | update=False, bookmarks=False) |
|
639 | update=False, bookmarks=False) | |
640 | self._repo = shared.local() |
|
640 | self._repo = shared.local() | |
641 | else: |
|
641 | else: | |
642 | # TODO: find a common place for this and this code in the |
|
642 | # TODO: find a common place for this and this code in the | |
643 | # share.py wrap of the clone command. |
|
643 | # share.py wrap of the clone command. | |
644 | if parentrepo.shared(): |
|
644 | if parentrepo.shared(): | |
645 | pool = self.ui.config('share', 'pool') |
|
645 | pool = self.ui.config('share', 'pool') | |
646 | if pool: |
|
646 | if pool: | |
647 | pool = util.expandpath(pool) |
|
647 | pool = util.expandpath(pool) | |
648 |
|
648 | |||
649 | shareopts = { |
|
649 | shareopts = { | |
650 | 'pool': pool, |
|
650 | 'pool': pool, | |
651 | 'mode': self.ui.config('share', 'poolnaming'), |
|
651 | 'mode': self.ui.config('share', 'poolnaming'), | |
652 | } |
|
652 | } | |
653 | else: |
|
653 | else: | |
654 | shareopts = {} |
|
654 | shareopts = {} | |
655 |
|
655 | |||
656 | self.ui.status(_('cloning subrepo %s from %s\n') |
|
656 | self.ui.status(_('cloning subrepo %s from %s\n') | |
657 | % (subrelpath(self), srcurl)) |
|
657 | % (subrelpath(self), srcurl)) | |
658 | other, cloned = hg.clone(self._repo._subparent.baseui, {}, |
|
658 | other, cloned = hg.clone(self._repo._subparent.baseui, {}, | |
659 | other, self._repo.root, |
|
659 | other, self._repo.root, | |
660 | update=False, shareopts=shareopts) |
|
660 | update=False, shareopts=shareopts) | |
661 | self._repo = cloned.local() |
|
661 | self._repo = cloned.local() | |
662 | self._initrepo(parentrepo, source, create=True) |
|
662 | self._initrepo(parentrepo, source, create=True) | |
663 | self._cachestorehash(srcurl) |
|
663 | self._cachestorehash(srcurl) | |
664 | else: |
|
664 | else: | |
665 | self.ui.status(_('pulling subrepo %s from %s\n') |
|
665 | self.ui.status(_('pulling subrepo %s from %s\n') | |
666 | % (subrelpath(self), srcurl)) |
|
666 | % (subrelpath(self), srcurl)) | |
667 | cleansub = self.storeclean(srcurl) |
|
667 | cleansub = self.storeclean(srcurl) | |
668 | exchange.pull(self._repo, other) |
|
668 | exchange.pull(self._repo, other) | |
669 | if cleansub: |
|
669 | if cleansub: | |
670 | # keep the repo clean after pull |
|
670 | # keep the repo clean after pull | |
671 | self._cachestorehash(srcurl) |
|
671 | self._cachestorehash(srcurl) | |
672 | return False |
|
672 | return False | |
673 |
|
673 | |||
674 | @annotatesubrepoerror |
|
674 | @annotatesubrepoerror | |
675 | def get(self, state, overwrite=False): |
|
675 | def get(self, state, overwrite=False): | |
676 | inrepo = self._get(state) |
|
676 | inrepo = self._get(state) | |
677 | source, revision, kind = state |
|
677 | source, revision, kind = state | |
678 | repo = self._repo |
|
678 | repo = self._repo | |
679 | repo.ui.debug("getting subrepo %s\n" % self._path) |
|
679 | repo.ui.debug("getting subrepo %s\n" % self._path) | |
680 | if inrepo: |
|
680 | if inrepo: | |
681 | urepo = repo.unfiltered() |
|
681 | urepo = repo.unfiltered() | |
682 | ctx = urepo[revision] |
|
682 | ctx = urepo[revision] | |
683 | if ctx.hidden(): |
|
683 | if ctx.hidden(): | |
684 | urepo.ui.warn( |
|
684 | urepo.ui.warn( | |
685 | _('revision %s in subrepository "%s" is hidden\n') \ |
|
685 | _('revision %s in subrepository "%s" is hidden\n') \ | |
686 | % (revision[0:12], self._path)) |
|
686 | % (revision[0:12], self._path)) | |
687 | repo = urepo |
|
687 | repo = urepo | |
688 | hg.updaterepo(repo, revision, overwrite) |
|
688 | hg.updaterepo(repo, revision, overwrite) | |
689 |
|
689 | |||
690 | @annotatesubrepoerror |
|
690 | @annotatesubrepoerror | |
691 | def merge(self, state): |
|
691 | def merge(self, state): | |
692 | self._get(state) |
|
692 | self._get(state) | |
693 | cur = self._repo['.'] |
|
693 | cur = self._repo['.'] | |
694 | dst = self._repo[state[1]] |
|
694 | dst = self._repo[state[1]] | |
695 | anc = dst.ancestor(cur) |
|
695 | anc = dst.ancestor(cur) | |
696 |
|
696 | |||
697 | def mergefunc(): |
|
697 | def mergefunc(): | |
698 | if anc == cur and dst.branch() == cur.branch(): |
|
698 | if anc == cur and dst.branch() == cur.branch(): | |
699 | self.ui.debug('updating subrepository "%s"\n' |
|
699 | self.ui.debug('updating subrepository "%s"\n' | |
700 | % subrelpath(self)) |
|
700 | % subrelpath(self)) | |
701 | hg.update(self._repo, state[1]) |
|
701 | hg.update(self._repo, state[1]) | |
702 | elif anc == dst: |
|
702 | elif anc == dst: | |
703 | self.ui.debug('skipping subrepository "%s"\n' |
|
703 | self.ui.debug('skipping subrepository "%s"\n' | |
704 | % subrelpath(self)) |
|
704 | % subrelpath(self)) | |
705 | else: |
|
705 | else: | |
706 | self.ui.debug('merging subrepository "%s"\n' % subrelpath(self)) |
|
706 | self.ui.debug('merging subrepository "%s"\n' % subrelpath(self)) | |
707 | hg.merge(self._repo, state[1], remind=False) |
|
707 | hg.merge(self._repo, state[1], remind=False) | |
708 |
|
708 | |||
709 | wctx = self._repo[None] |
|
709 | wctx = self._repo[None] | |
710 | if self.dirty(): |
|
710 | if self.dirty(): | |
711 | if anc != dst: |
|
711 | if anc != dst: | |
712 | if _updateprompt(self.ui, self, wctx.dirty(), cur, dst): |
|
712 | if _updateprompt(self.ui, self, wctx.dirty(), cur, dst): | |
713 | mergefunc() |
|
713 | mergefunc() | |
714 | else: |
|
714 | else: | |
715 | mergefunc() |
|
715 | mergefunc() | |
716 | else: |
|
716 | else: | |
717 | mergefunc() |
|
717 | mergefunc() | |
718 |
|
718 | |||
719 | @annotatesubrepoerror |
|
719 | @annotatesubrepoerror | |
720 | def push(self, opts): |
|
720 | def push(self, opts): | |
721 | force = opts.get('force') |
|
721 | force = opts.get('force') | |
722 | newbranch = opts.get('new_branch') |
|
722 | newbranch = opts.get('new_branch') | |
723 | ssh = opts.get('ssh') |
|
723 | ssh = opts.get('ssh') | |
724 |
|
724 | |||
725 | # push subrepos depth-first for coherent ordering |
|
725 | # push subrepos depth-first for coherent ordering | |
726 | c = self._repo['.'] |
|
726 | c = self._repo['.'] | |
727 | subs = c.substate # only repos that are committed |
|
727 | subs = c.substate # only repos that are committed | |
728 | for s in sorted(subs): |
|
728 | for s in sorted(subs): | |
729 | if c.sub(s).push(opts) == 0: |
|
729 | if c.sub(s).push(opts) == 0: | |
730 | return False |
|
730 | return False | |
731 |
|
731 | |||
732 | dsturl = _abssource(self._repo, True) |
|
732 | dsturl = _abssource(self._repo, True) | |
733 | if not force: |
|
733 | if not force: | |
734 | if self.storeclean(dsturl): |
|
734 | if self.storeclean(dsturl): | |
735 | self.ui.status( |
|
735 | self.ui.status( | |
736 | _('no changes made to subrepo %s since last push to %s\n') |
|
736 | _('no changes made to subrepo %s since last push to %s\n') | |
737 | % (subrelpath(self), dsturl)) |
|
737 | % (subrelpath(self), dsturl)) | |
738 | return None |
|
738 | return None | |
739 | self.ui.status(_('pushing subrepo %s to %s\n') % |
|
739 | self.ui.status(_('pushing subrepo %s to %s\n') % | |
740 | (subrelpath(self), dsturl)) |
|
740 | (subrelpath(self), dsturl)) | |
741 | other = hg.peer(self._repo, {'ssh': ssh}, dsturl) |
|
741 | other = hg.peer(self._repo, {'ssh': ssh}, dsturl) | |
742 | res = exchange.push(self._repo, other, force, newbranch=newbranch) |
|
742 | res = exchange.push(self._repo, other, force, newbranch=newbranch) | |
743 |
|
743 | |||
744 | # the repo is now clean |
|
744 | # the repo is now clean | |
745 | self._cachestorehash(dsturl) |
|
745 | self._cachestorehash(dsturl) | |
746 | return res.cgresult |
|
746 | return res.cgresult | |
747 |
|
747 | |||
748 | @annotatesubrepoerror |
|
748 | @annotatesubrepoerror | |
749 | def outgoing(self, ui, dest, opts): |
|
749 | def outgoing(self, ui, dest, opts): | |
750 | if 'rev' in opts or 'branch' in opts: |
|
750 | if 'rev' in opts or 'branch' in opts: | |
751 | opts = copy.copy(opts) |
|
751 | opts = copy.copy(opts) | |
752 | opts.pop('rev', None) |
|
752 | opts.pop('rev', None) | |
753 | opts.pop('branch', None) |
|
753 | opts.pop('branch', None) | |
754 | return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts) |
|
754 | return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts) | |
755 |
|
755 | |||
756 | @annotatesubrepoerror |
|
756 | @annotatesubrepoerror | |
757 | def incoming(self, ui, source, opts): |
|
757 | def incoming(self, ui, source, opts): | |
758 | if 'rev' in opts or 'branch' in opts: |
|
758 | if 'rev' in opts or 'branch' in opts: | |
759 | opts = copy.copy(opts) |
|
759 | opts = copy.copy(opts) | |
760 | opts.pop('rev', None) |
|
760 | opts.pop('rev', None) | |
761 | opts.pop('branch', None) |
|
761 | opts.pop('branch', None) | |
762 | return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts) |
|
762 | return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts) | |
763 |
|
763 | |||
764 | @annotatesubrepoerror |
|
764 | @annotatesubrepoerror | |
765 | def files(self): |
|
765 | def files(self): | |
766 | rev = self._state[1] |
|
766 | rev = self._state[1] | |
767 | ctx = self._repo[rev] |
|
767 | ctx = self._repo[rev] | |
768 | return ctx.manifest().keys() |
|
768 | return ctx.manifest().keys() | |
769 |
|
769 | |||
770 | def filedata(self, name, decode): |
|
770 | def filedata(self, name, decode): | |
771 | rev = self._state[1] |
|
771 | rev = self._state[1] | |
772 | data = self._repo[rev][name].data() |
|
772 | data = self._repo[rev][name].data() | |
773 | if decode: |
|
773 | if decode: | |
774 | data = self._repo.wwritedata(name, data) |
|
774 | data = self._repo.wwritedata(name, data) | |
775 | return data |
|
775 | return data | |
776 |
|
776 | |||
777 | def fileflags(self, name): |
|
777 | def fileflags(self, name): | |
778 | rev = self._state[1] |
|
778 | rev = self._state[1] | |
779 | ctx = self._repo[rev] |
|
779 | ctx = self._repo[rev] | |
780 | return ctx.flags(name) |
|
780 | return ctx.flags(name) | |
781 |
|
781 | |||
782 | @annotatesubrepoerror |
|
782 | @annotatesubrepoerror | |
783 | def printfiles(self, ui, m, fm, fmt, subrepos): |
|
783 | def printfiles(self, ui, m, fm, fmt, subrepos): | |
784 | # If the parent context is a workingctx, use the workingctx here for |
|
784 | # If the parent context is a workingctx, use the workingctx here for | |
785 | # consistency. |
|
785 | # consistency. | |
786 | if self._ctx.rev() is None: |
|
786 | if self._ctx.rev() is None: | |
787 | ctx = self._repo[None] |
|
787 | ctx = self._repo[None] | |
788 | else: |
|
788 | else: | |
789 | rev = self._state[1] |
|
789 | rev = self._state[1] | |
790 | ctx = self._repo[rev] |
|
790 | ctx = self._repo[rev] | |
791 | return cmdutil.files(ui, ctx, m, fm, fmt, subrepos) |
|
791 | return cmdutil.files(ui, ctx, m, fm, fmt, subrepos) | |
792 |
|
792 | |||
793 | @annotatesubrepoerror |
|
793 | @annotatesubrepoerror | |
794 | def getfileset(self, expr): |
|
794 | def getfileset(self, expr): | |
795 | if self._ctx.rev() is None: |
|
795 | if self._ctx.rev() is None: | |
796 | ctx = self._repo[None] |
|
796 | ctx = self._repo[None] | |
797 | else: |
|
797 | else: | |
798 | rev = self._state[1] |
|
798 | rev = self._state[1] | |
799 | ctx = self._repo[rev] |
|
799 | ctx = self._repo[rev] | |
800 |
|
800 | |||
801 | files = ctx.getfileset(expr) |
|
801 | files = ctx.getfileset(expr) | |
802 |
|
802 | |||
803 | for subpath in ctx.substate: |
|
803 | for subpath in ctx.substate: | |
804 | sub = ctx.sub(subpath) |
|
804 | sub = ctx.sub(subpath) | |
805 |
|
805 | |||
806 | try: |
|
806 | try: | |
807 | files.extend(subpath + '/' + f for f in sub.getfileset(expr)) |
|
807 | files.extend(subpath + '/' + f for f in sub.getfileset(expr)) | |
808 | except error.LookupError: |
|
808 | except error.LookupError: | |
809 | self.ui.status(_("skipping missing subrepository: %s\n") |
|
809 | self.ui.status(_("skipping missing subrepository: %s\n") | |
810 | % self.wvfs.reljoin(reporelpath(self), subpath)) |
|
810 | % self.wvfs.reljoin(reporelpath(self), subpath)) | |
811 | return files |
|
811 | return files | |
812 |
|
812 | |||
813 | def walk(self, match): |
|
813 | def walk(self, match): | |
814 | ctx = self._repo[None] |
|
814 | ctx = self._repo[None] | |
815 | return ctx.walk(match) |
|
815 | return ctx.walk(match) | |
816 |
|
816 | |||
817 | @annotatesubrepoerror |
|
817 | @annotatesubrepoerror | |
818 | def forget(self, match, prefix, dryrun): |
|
818 | def forget(self, match, prefix, dryrun): | |
819 | return cmdutil.forget(self.ui, self._repo, match, |
|
819 | return cmdutil.forget(self.ui, self._repo, match, | |
820 | self.wvfs.reljoin(prefix, self._path), |
|
820 | self.wvfs.reljoin(prefix, self._path), | |
821 | True, dryrun=dryrun) |
|
821 | True, dryrun=dryrun) | |
822 |
|
822 | |||
823 | @annotatesubrepoerror |
|
823 | @annotatesubrepoerror | |
824 | def removefiles(self, matcher, prefix, after, force, subrepos, |
|
824 | def removefiles(self, matcher, prefix, after, force, subrepos, | |
825 | dryrun, warnings): |
|
825 | dryrun, warnings): | |
826 | return cmdutil.remove(self.ui, self._repo, matcher, |
|
826 | return cmdutil.remove(self.ui, self._repo, matcher, | |
827 | self.wvfs.reljoin(prefix, self._path), |
|
827 | self.wvfs.reljoin(prefix, self._path), | |
828 | after, force, subrepos, dryrun) |
|
828 | after, force, subrepos, dryrun) | |
829 |
|
829 | |||
830 | @annotatesubrepoerror |
|
830 | @annotatesubrepoerror | |
831 | def revert(self, substate, *pats, **opts): |
|
831 | def revert(self, substate, *pats, **opts): | |
832 | # reverting a subrepo is a 2 step process: |
|
832 | # reverting a subrepo is a 2 step process: | |
833 | # 1. if the no_backup is not set, revert all modified |
|
833 | # 1. if the no_backup is not set, revert all modified | |
834 | # files inside the subrepo |
|
834 | # files inside the subrepo | |
835 | # 2. update the subrepo to the revision specified in |
|
835 | # 2. update the subrepo to the revision specified in | |
836 | # the corresponding substate dictionary |
|
836 | # the corresponding substate dictionary | |
837 | self.ui.status(_('reverting subrepo %s\n') % substate[0]) |
|
837 | self.ui.status(_('reverting subrepo %s\n') % substate[0]) | |
838 | if not opts.get(r'no_backup'): |
|
838 | if not opts.get(r'no_backup'): | |
839 | # Revert all files on the subrepo, creating backups |
|
839 | # Revert all files on the subrepo, creating backups | |
840 | # Note that this will not recursively revert subrepos |
|
840 | # Note that this will not recursively revert subrepos | |
841 | # We could do it if there was a set:subrepos() predicate |
|
841 | # We could do it if there was a set:subrepos() predicate | |
842 | opts = opts.copy() |
|
842 | opts = opts.copy() | |
843 | opts[r'date'] = None |
|
843 | opts[r'date'] = None | |
844 | opts[r'rev'] = substate[1] |
|
844 | opts[r'rev'] = substate[1] | |
845 |
|
845 | |||
846 | self.filerevert(*pats, **opts) |
|
846 | self.filerevert(*pats, **opts) | |
847 |
|
847 | |||
848 | # Update the repo to the revision specified in the given substate |
|
848 | # Update the repo to the revision specified in the given substate | |
849 | if not opts.get(r'dry_run'): |
|
849 | if not opts.get(r'dry_run'): | |
850 | self.get(substate, overwrite=True) |
|
850 | self.get(substate, overwrite=True) | |
851 |
|
851 | |||
852 | def filerevert(self, *pats, **opts): |
|
852 | def filerevert(self, *pats, **opts): | |
853 | ctx = self._repo[opts[r'rev']] |
|
853 | ctx = self._repo[opts[r'rev']] | |
854 | parents = self._repo.dirstate.parents() |
|
854 | parents = self._repo.dirstate.parents() | |
855 | if opts.get(r'all'): |
|
855 | if opts.get(r'all'): | |
856 | pats = ['set:modified()'] |
|
856 | pats = ['set:modified()'] | |
857 | else: |
|
857 | else: | |
858 | pats = [] |
|
858 | pats = [] | |
859 | cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts) |
|
859 | cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts) | |
860 |
|
860 | |||
861 | def shortid(self, revid): |
|
861 | def shortid(self, revid): | |
862 | return revid[:12] |
|
862 | return revid[:12] | |
863 |
|
863 | |||
864 | @annotatesubrepoerror |
|
864 | @annotatesubrepoerror | |
865 | def unshare(self): |
|
865 | def unshare(self): | |
866 | # subrepo inherently violates our import layering rules |
|
866 | # subrepo inherently violates our import layering rules | |
867 | # because it wants to make repo objects from deep inside the stack |
|
867 | # because it wants to make repo objects from deep inside the stack | |
868 | # so we manually delay the circular imports to not break |
|
868 | # so we manually delay the circular imports to not break | |
869 | # scripts that don't use our demand-loading |
|
869 | # scripts that don't use our demand-loading | |
870 | global hg |
|
870 | global hg | |
871 | from . import hg as h |
|
871 | from . import hg as h | |
872 | hg = h |
|
872 | hg = h | |
873 |
|
873 | |||
874 | # Nothing prevents a user from sharing in a repo, and then making that a |
|
874 | # Nothing prevents a user from sharing in a repo, and then making that a | |
875 | # subrepo. Alternately, the previous unshare attempt may have failed |
|
875 | # subrepo. Alternately, the previous unshare attempt may have failed | |
876 | # part way through. So recurse whether or not this layer is shared. |
|
876 | # part way through. So recurse whether or not this layer is shared. | |
877 | if self._repo.shared(): |
|
877 | if self._repo.shared(): | |
878 | self.ui.status(_("unsharing subrepo '%s'\n") % self._relpath) |
|
878 | self.ui.status(_("unsharing subrepo '%s'\n") % self._relpath) | |
879 |
|
879 | |||
880 | hg.unshare(self.ui, self._repo) |
|
880 | hg.unshare(self.ui, self._repo) | |
881 |
|
881 | |||
882 | def verify(self): |
|
882 | def verify(self): | |
883 | try: |
|
883 | try: | |
884 | rev = self._state[1] |
|
884 | rev = self._state[1] | |
885 | ctx = self._repo.unfiltered()[rev] |
|
885 | ctx = self._repo.unfiltered()[rev] | |
886 | if ctx.hidden(): |
|
886 | if ctx.hidden(): | |
887 | # Since hidden revisions aren't pushed/pulled, it seems worth an |
|
887 | # Since hidden revisions aren't pushed/pulled, it seems worth an | |
888 | # explicit warning. |
|
888 | # explicit warning. | |
889 | ui = self._repo.ui |
|
889 | ui = self._repo.ui | |
890 | ui.warn(_("subrepo '%s' is hidden in revision %s\n") % |
|
890 | ui.warn(_("subrepo '%s' is hidden in revision %s\n") % | |
891 | (self._relpath, node.short(self._ctx.node()))) |
|
891 | (self._relpath, node.short(self._ctx.node()))) | |
892 | return 0 |
|
892 | return 0 | |
893 | except error.RepoLookupError: |
|
893 | except error.RepoLookupError: | |
894 | # A missing subrepo revision may be a case of needing to pull it, so |
|
894 | # A missing subrepo revision may be a case of needing to pull it, so | |
895 | # don't treat this as an error. |
|
895 | # don't treat this as an error. | |
896 | self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") % |
|
896 | self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") % | |
897 | (self._relpath, node.short(self._ctx.node()))) |
|
897 | (self._relpath, node.short(self._ctx.node()))) | |
898 | return 0 |
|
898 | return 0 | |
899 |
|
899 | |||
900 | @propertycache |
|
900 | @propertycache | |
901 | def wvfs(self): |
|
901 | def wvfs(self): | |
902 | """return own wvfs for efficiency and consistency |
|
902 | """return own wvfs for efficiency and consistency | |
903 | """ |
|
903 | """ | |
904 | return self._repo.wvfs |
|
904 | return self._repo.wvfs | |
905 |
|
905 | |||
906 | @propertycache |
|
906 | @propertycache | |
907 | def _relpath(self): |
|
907 | def _relpath(self): | |
908 | """return path to this subrepository as seen from outermost repository |
|
908 | """return path to this subrepository as seen from outermost repository | |
909 | """ |
|
909 | """ | |
910 | # Keep consistent dir separators by avoiding vfs.join(self._path) |
|
910 | # Keep consistent dir separators by avoiding vfs.join(self._path) | |
911 | return reporelpath(self._repo) |
|
911 | return reporelpath(self._repo) | |
912 |
|
912 | |||
913 | class svnsubrepo(abstractsubrepo): |
|
913 | class svnsubrepo(abstractsubrepo): | |
914 | def __init__(self, ctx, path, state, allowcreate): |
|
914 | def __init__(self, ctx, path, state, allowcreate): | |
915 | super(svnsubrepo, self).__init__(ctx, path) |
|
915 | super(svnsubrepo, self).__init__(ctx, path) | |
916 | self._state = state |
|
916 | self._state = state | |
917 | self._exe = procutil.findexe('svn') |
|
917 | self._exe = procutil.findexe('svn') | |
918 | if not self._exe: |
|
918 | if not self._exe: | |
919 | raise error.Abort(_("'svn' executable not found for subrepo '%s'") |
|
919 | raise error.Abort(_("'svn' executable not found for subrepo '%s'") | |
920 | % self._path) |
|
920 | % self._path) | |
921 |
|
921 | |||
922 | def _svncommand(self, commands, filename='', failok=False): |
|
922 | def _svncommand(self, commands, filename='', failok=False): | |
923 | cmd = [self._exe] |
|
923 | cmd = [self._exe] | |
924 | extrakw = {} |
|
924 | extrakw = {} | |
925 | if not self.ui.interactive(): |
|
925 | if not self.ui.interactive(): | |
926 | # Making stdin be a pipe should prevent svn from behaving |
|
926 | # Making stdin be a pipe should prevent svn from behaving | |
927 | # interactively even if we can't pass --non-interactive. |
|
927 | # interactively even if we can't pass --non-interactive. | |
928 | extrakw[r'stdin'] = subprocess.PIPE |
|
928 | extrakw[r'stdin'] = subprocess.PIPE | |
929 | # Starting in svn 1.5 --non-interactive is a global flag |
|
929 | # Starting in svn 1.5 --non-interactive is a global flag | |
930 | # instead of being per-command, but we need to support 1.4 so |
|
930 | # instead of being per-command, but we need to support 1.4 so | |
931 | # we have to be intelligent about what commands take |
|
931 | # we have to be intelligent about what commands take | |
932 | # --non-interactive. |
|
932 | # --non-interactive. | |
933 | if commands[0] in ('update', 'checkout', 'commit'): |
|
933 | if commands[0] in ('update', 'checkout', 'commit'): | |
934 | cmd.append('--non-interactive') |
|
934 | cmd.append('--non-interactive') | |
935 | cmd.extend(commands) |
|
935 | cmd.extend(commands) | |
936 | if filename is not None: |
|
936 | if filename is not None: | |
937 | path = self.wvfs.reljoin(self._ctx.repo().origroot, |
|
937 | path = self.wvfs.reljoin(self._ctx.repo().origroot, | |
938 | self._path, filename) |
|
938 | self._path, filename) | |
939 | cmd.append(path) |
|
939 | cmd.append(path) | |
940 | env = dict(encoding.environ) |
|
940 | env = dict(encoding.environ) | |
941 | # Avoid localized output, preserve current locale for everything else. |
|
941 | # Avoid localized output, preserve current locale for everything else. | |
942 | lc_all = env.get('LC_ALL') |
|
942 | lc_all = env.get('LC_ALL') | |
943 | if lc_all: |
|
943 | if lc_all: | |
944 | env['LANG'] = lc_all |
|
944 | env['LANG'] = lc_all | |
945 | del env['LC_ALL'] |
|
945 | del env['LC_ALL'] | |
946 | env['LC_MESSAGES'] = 'C' |
|
946 | env['LC_MESSAGES'] = 'C' | |
947 | p = subprocess.Popen(cmd, bufsize=-1, close_fds=procutil.closefds, |
|
947 | p = subprocess.Popen(cmd, bufsize=-1, close_fds=procutil.closefds, | |
948 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, |
|
948 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, | |
949 | universal_newlines=True, env=env, **extrakw) |
|
949 | universal_newlines=True, env=env, **extrakw) | |
950 | stdout, stderr = p.communicate() |
|
950 | stdout, stderr = p.communicate() | |
951 | stderr = stderr.strip() |
|
951 | stderr = stderr.strip() | |
952 | if not failok: |
|
952 | if not failok: | |
953 | if p.returncode: |
|
953 | if p.returncode: | |
954 | raise error.Abort(stderr or 'exited with code %d' |
|
954 | raise error.Abort(stderr or 'exited with code %d' | |
955 | % p.returncode) |
|
955 | % p.returncode) | |
956 | if stderr: |
|
956 | if stderr: | |
957 | self.ui.warn(stderr + '\n') |
|
957 | self.ui.warn(stderr + '\n') | |
958 | return stdout, stderr |
|
958 | return stdout, stderr | |
959 |
|
959 | |||
960 | @propertycache |
|
960 | @propertycache | |
961 | def _svnversion(self): |
|
961 | def _svnversion(self): | |
962 | output, err = self._svncommand(['--version', '--quiet'], filename=None) |
|
962 | output, err = self._svncommand(['--version', '--quiet'], filename=None) | |
963 | m = re.search(br'^(\d+)\.(\d+)', output) |
|
963 | m = re.search(br'^(\d+)\.(\d+)', output) | |
964 | if not m: |
|
964 | if not m: | |
965 | raise error.Abort(_('cannot retrieve svn tool version')) |
|
965 | raise error.Abort(_('cannot retrieve svn tool version')) | |
966 | return (int(m.group(1)), int(m.group(2))) |
|
966 | return (int(m.group(1)), int(m.group(2))) | |
967 |
|
967 | |||
968 | def _svnmissing(self): |
|
968 | def _svnmissing(self): | |
969 | return not self.wvfs.exists('.svn') |
|
969 | return not self.wvfs.exists('.svn') | |
970 |
|
970 | |||
971 | def _wcrevs(self): |
|
971 | def _wcrevs(self): | |
972 | # Get the working directory revision as well as the last |
|
972 | # Get the working directory revision as well as the last | |
973 | # commit revision so we can compare the subrepo state with |
|
973 | # commit revision so we can compare the subrepo state with | |
974 | # both. We used to store the working directory one. |
|
974 | # both. We used to store the working directory one. | |
975 | output, err = self._svncommand(['info', '--xml']) |
|
975 | output, err = self._svncommand(['info', '--xml']) | |
976 | doc = xml.dom.minidom.parseString(output) |
|
976 | doc = xml.dom.minidom.parseString(output) | |
977 | entries = doc.getElementsByTagName('entry') |
|
977 | entries = doc.getElementsByTagName('entry') | |
978 | lastrev, rev = '0', '0' |
|
978 | lastrev, rev = '0', '0' | |
979 | if entries: |
|
979 | if entries: | |
980 | rev = str(entries[0].getAttribute('revision')) or '0' |
|
980 | rev = str(entries[0].getAttribute('revision')) or '0' | |
981 | commits = entries[0].getElementsByTagName('commit') |
|
981 | commits = entries[0].getElementsByTagName('commit') | |
982 | if commits: |
|
982 | if commits: | |
983 | lastrev = str(commits[0].getAttribute('revision')) or '0' |
|
983 | lastrev = str(commits[0].getAttribute('revision')) or '0' | |
984 | return (lastrev, rev) |
|
984 | return (lastrev, rev) | |
985 |
|
985 | |||
986 | def _wcrev(self): |
|
986 | def _wcrev(self): | |
987 | return self._wcrevs()[0] |
|
987 | return self._wcrevs()[0] | |
988 |
|
988 | |||
989 | def _wcchanged(self): |
|
989 | def _wcchanged(self): | |
990 | """Return (changes, extchanges, missing) where changes is True |
|
990 | """Return (changes, extchanges, missing) where changes is True | |
991 | if the working directory was changed, extchanges is |
|
991 | if the working directory was changed, extchanges is | |
992 | True if any of these changes concern an external entry and missing |
|
992 | True if any of these changes concern an external entry and missing | |
993 | is True if any change is a missing entry. |
|
993 | is True if any change is a missing entry. | |
994 | """ |
|
994 | """ | |
995 | output, err = self._svncommand(['status', '--xml']) |
|
995 | output, err = self._svncommand(['status', '--xml']) | |
996 | externals, changes, missing = [], [], [] |
|
996 | externals, changes, missing = [], [], [] | |
997 | doc = xml.dom.minidom.parseString(output) |
|
997 | doc = xml.dom.minidom.parseString(output) | |
998 | for e in doc.getElementsByTagName('entry'): |
|
998 | for e in doc.getElementsByTagName('entry'): | |
999 | s = e.getElementsByTagName('wc-status') |
|
999 | s = e.getElementsByTagName('wc-status') | |
1000 | if not s: |
|
1000 | if not s: | |
1001 | continue |
|
1001 | continue | |
1002 | item = s[0].getAttribute('item') |
|
1002 | item = s[0].getAttribute('item') | |
1003 | props = s[0].getAttribute('props') |
|
1003 | props = s[0].getAttribute('props') | |
1004 | path = e.getAttribute('path') |
|
1004 | path = e.getAttribute('path') | |
1005 | if item == 'external': |
|
1005 | if item == 'external': | |
1006 | externals.append(path) |
|
1006 | externals.append(path) | |
1007 | elif item == 'missing': |
|
1007 | elif item == 'missing': | |
1008 | missing.append(path) |
|
1008 | missing.append(path) | |
1009 | if (item not in ('', 'normal', 'unversioned', 'external') |
|
1009 | if (item not in ('', 'normal', 'unversioned', 'external') | |
1010 | or props not in ('', 'none', 'normal')): |
|
1010 | or props not in ('', 'none', 'normal')): | |
1011 | changes.append(path) |
|
1011 | changes.append(path) | |
1012 | for path in changes: |
|
1012 | for path in changes: | |
1013 | for ext in externals: |
|
1013 | for ext in externals: | |
1014 | if path == ext or path.startswith(ext + pycompat.ossep): |
|
1014 | if path == ext or path.startswith(ext + pycompat.ossep): | |
1015 | return True, True, bool(missing) |
|
1015 | return True, True, bool(missing) | |
1016 | return bool(changes), False, bool(missing) |
|
1016 | return bool(changes), False, bool(missing) | |
1017 |
|
1017 | |||
1018 | @annotatesubrepoerror |
|
1018 | @annotatesubrepoerror | |
1019 | def dirty(self, ignoreupdate=False, missing=False): |
|
1019 | def dirty(self, ignoreupdate=False, missing=False): | |
1020 | if self._svnmissing(): |
|
1020 | if self._svnmissing(): | |
1021 | return self._state[1] != '' |
|
1021 | return self._state[1] != '' | |
1022 | wcchanged = self._wcchanged() |
|
1022 | wcchanged = self._wcchanged() | |
1023 | changed = wcchanged[0] or (missing and wcchanged[2]) |
|
1023 | changed = wcchanged[0] or (missing and wcchanged[2]) | |
1024 | if not changed: |
|
1024 | if not changed: | |
1025 | if self._state[1] in self._wcrevs() or ignoreupdate: |
|
1025 | if self._state[1] in self._wcrevs() or ignoreupdate: | |
1026 | return False |
|
1026 | return False | |
1027 | return True |
|
1027 | return True | |
1028 |
|
1028 | |||
1029 | def basestate(self): |
|
1029 | def basestate(self): | |
1030 | lastrev, rev = self._wcrevs() |
|
1030 | lastrev, rev = self._wcrevs() | |
1031 | if lastrev != rev: |
|
1031 | if lastrev != rev: | |
1032 | # Last committed rev is not the same than rev. We would |
|
1032 | # Last committed rev is not the same than rev. We would | |
1033 | # like to take lastrev but we do not know if the subrepo |
|
1033 | # like to take lastrev but we do not know if the subrepo | |
1034 | # URL exists at lastrev. Test it and fallback to rev it |
|
1034 | # URL exists at lastrev. Test it and fallback to rev it | |
1035 | # is not there. |
|
1035 | # is not there. | |
1036 | try: |
|
1036 | try: | |
1037 | self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)]) |
|
1037 | self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)]) | |
1038 | return lastrev |
|
1038 | return lastrev | |
1039 | except error.Abort: |
|
1039 | except error.Abort: | |
1040 | pass |
|
1040 | pass | |
1041 | return rev |
|
1041 | return rev | |
1042 |
|
1042 | |||
1043 | @annotatesubrepoerror |
|
1043 | @annotatesubrepoerror | |
1044 | def commit(self, text, user, date): |
|
1044 | def commit(self, text, user, date): | |
1045 | # user and date are out of our hands since svn is centralized |
|
1045 | # user and date are out of our hands since svn is centralized | |
1046 | changed, extchanged, missing = self._wcchanged() |
|
1046 | changed, extchanged, missing = self._wcchanged() | |
1047 | if not changed: |
|
1047 | if not changed: | |
1048 | return self.basestate() |
|
1048 | return self.basestate() | |
1049 | if extchanged: |
|
1049 | if extchanged: | |
1050 | # Do not try to commit externals |
|
1050 | # Do not try to commit externals | |
1051 | raise error.Abort(_('cannot commit svn externals')) |
|
1051 | raise error.Abort(_('cannot commit svn externals')) | |
1052 | if missing: |
|
1052 | if missing: | |
1053 | # svn can commit with missing entries but aborting like hg |
|
1053 | # svn can commit with missing entries but aborting like hg | |
1054 | # seems a better approach. |
|
1054 | # seems a better approach. | |
1055 | raise error.Abort(_('cannot commit missing svn entries')) |
|
1055 | raise error.Abort(_('cannot commit missing svn entries')) | |
1056 | commitinfo, err = self._svncommand(['commit', '-m', text]) |
|
1056 | commitinfo, err = self._svncommand(['commit', '-m', text]) | |
1057 | self.ui.status(commitinfo) |
|
1057 | self.ui.status(commitinfo) | |
1058 | newrev = re.search('Committed revision ([0-9]+).', commitinfo) |
|
1058 | newrev = re.search('Committed revision ([0-9]+).', commitinfo) | |
1059 | if not newrev: |
|
1059 | if not newrev: | |
1060 | if not commitinfo.strip(): |
|
1060 | if not commitinfo.strip(): | |
1061 | # Sometimes, our definition of "changed" differs from |
|
1061 | # Sometimes, our definition of "changed" differs from | |
1062 | # svn one. For instance, svn ignores missing files |
|
1062 | # svn one. For instance, svn ignores missing files | |
1063 | # when committing. If there are only missing files, no |
|
1063 | # when committing. If there are only missing files, no | |
1064 | # commit is made, no output and no error code. |
|
1064 | # commit is made, no output and no error code. | |
1065 | raise error.Abort(_('failed to commit svn changes')) |
|
1065 | raise error.Abort(_('failed to commit svn changes')) | |
1066 | raise error.Abort(commitinfo.splitlines()[-1]) |
|
1066 | raise error.Abort(commitinfo.splitlines()[-1]) | |
1067 | newrev = newrev.groups()[0] |
|
1067 | newrev = newrev.groups()[0] | |
1068 | self.ui.status(self._svncommand(['update', '-r', newrev])[0]) |
|
1068 | self.ui.status(self._svncommand(['update', '-r', newrev])[0]) | |
1069 | return newrev |
|
1069 | return newrev | |
1070 |
|
1070 | |||
1071 | @annotatesubrepoerror |
|
1071 | @annotatesubrepoerror | |
1072 | def remove(self): |
|
1072 | def remove(self): | |
1073 | if self.dirty(): |
|
1073 | if self.dirty(): | |
1074 | self.ui.warn(_('not removing repo %s because ' |
|
1074 | self.ui.warn(_('not removing repo %s because ' | |
1075 | 'it has changes.\n') % self._path) |
|
1075 | 'it has changes.\n') % self._path) | |
1076 | return |
|
1076 | return | |
1077 | self.ui.note(_('removing subrepo %s\n') % self._path) |
|
1077 | self.ui.note(_('removing subrepo %s\n') % self._path) | |
1078 |
|
1078 | |||
1079 | self.wvfs.rmtree(forcibly=True) |
|
1079 | self.wvfs.rmtree(forcibly=True) | |
1080 | try: |
|
1080 | try: | |
1081 | pwvfs = self._ctx.repo().wvfs |
|
1081 | pwvfs = self._ctx.repo().wvfs | |
1082 | pwvfs.removedirs(pwvfs.dirname(self._path)) |
|
1082 | pwvfs.removedirs(pwvfs.dirname(self._path)) | |
1083 | except OSError: |
|
1083 | except OSError: | |
1084 | pass |
|
1084 | pass | |
1085 |
|
1085 | |||
1086 | @annotatesubrepoerror |
|
1086 | @annotatesubrepoerror | |
1087 | def get(self, state, overwrite=False): |
|
1087 | def get(self, state, overwrite=False): | |
1088 | if overwrite: |
|
1088 | if overwrite: | |
1089 | self._svncommand(['revert', '--recursive']) |
|
1089 | self._svncommand(['revert', '--recursive']) | |
1090 | args = ['checkout'] |
|
1090 | args = ['checkout'] | |
1091 | if self._svnversion >= (1, 5): |
|
1091 | if self._svnversion >= (1, 5): | |
1092 | args.append('--force') |
|
1092 | args.append('--force') | |
1093 | # The revision must be specified at the end of the URL to properly |
|
1093 | # The revision must be specified at the end of the URL to properly | |
1094 | # update to a directory which has since been deleted and recreated. |
|
1094 | # update to a directory which has since been deleted and recreated. | |
1095 | args.append('%s@%s' % (state[0], state[1])) |
|
1095 | args.append('%s@%s' % (state[0], state[1])) | |
1096 |
|
1096 | |||
1097 | # SEC: check that the ssh url is safe |
|
1097 | # SEC: check that the ssh url is safe | |
1098 | util.checksafessh(state[0]) |
|
1098 | util.checksafessh(state[0]) | |
1099 |
|
1099 | |||
1100 | status, err = self._svncommand(args, failok=True) |
|
1100 | status, err = self._svncommand(args, failok=True) | |
1101 | _sanitize(self.ui, self.wvfs, '.svn') |
|
1101 | _sanitize(self.ui, self.wvfs, '.svn') | |
1102 | if not re.search('Checked out revision [0-9]+.', status): |
|
1102 | if not re.search('Checked out revision [0-9]+.', status): | |
1103 | if ('is already a working copy for a different URL' in err |
|
1103 | if ('is already a working copy for a different URL' in err | |
1104 | and (self._wcchanged()[:2] == (False, False))): |
|
1104 | and (self._wcchanged()[:2] == (False, False))): | |
1105 | # obstructed but clean working copy, so just blow it away. |
|
1105 | # obstructed but clean working copy, so just blow it away. | |
1106 | self.remove() |
|
1106 | self.remove() | |
1107 | self.get(state, overwrite=False) |
|
1107 | self.get(state, overwrite=False) | |
1108 | return |
|
1108 | return | |
1109 | raise error.Abort((status or err).splitlines()[-1]) |
|
1109 | raise error.Abort((status or err).splitlines()[-1]) | |
1110 | self.ui.status(status) |
|
1110 | self.ui.status(status) | |
1111 |
|
1111 | |||
1112 | @annotatesubrepoerror |
|
1112 | @annotatesubrepoerror | |
1113 | def merge(self, state): |
|
1113 | def merge(self, state): | |
1114 | old = self._state[1] |
|
1114 | old = self._state[1] | |
1115 | new = state[1] |
|
1115 | new = state[1] | |
1116 | wcrev = self._wcrev() |
|
1116 | wcrev = self._wcrev() | |
1117 | if new != wcrev: |
|
1117 | if new != wcrev: | |
1118 | dirty = old == wcrev or self._wcchanged()[0] |
|
1118 | dirty = old == wcrev or self._wcchanged()[0] | |
1119 | if _updateprompt(self.ui, self, dirty, wcrev, new): |
|
1119 | if _updateprompt(self.ui, self, dirty, wcrev, new): | |
1120 | self.get(state, False) |
|
1120 | self.get(state, False) | |
1121 |
|
1121 | |||
1122 | def push(self, opts): |
|
1122 | def push(self, opts): | |
1123 | # push is a no-op for SVN |
|
1123 | # push is a no-op for SVN | |
1124 | return True |
|
1124 | return True | |
1125 |
|
1125 | |||
1126 | @annotatesubrepoerror |
|
1126 | @annotatesubrepoerror | |
1127 | def files(self): |
|
1127 | def files(self): | |
1128 | output = self._svncommand(['list', '--recursive', '--xml'])[0] |
|
1128 | output = self._svncommand(['list', '--recursive', '--xml'])[0] | |
1129 | doc = xml.dom.minidom.parseString(output) |
|
1129 | doc = xml.dom.minidom.parseString(output) | |
1130 | paths = [] |
|
1130 | paths = [] | |
1131 | for e in doc.getElementsByTagName('entry'): |
|
1131 | for e in doc.getElementsByTagName('entry'): | |
1132 | kind = pycompat.bytestr(e.getAttribute('kind')) |
|
1132 | kind = pycompat.bytestr(e.getAttribute('kind')) | |
1133 | if kind != 'file': |
|
1133 | if kind != 'file': | |
1134 | continue |
|
1134 | continue | |
1135 | name = ''.join(c.data for c |
|
1135 | name = ''.join(c.data for c | |
1136 | in e.getElementsByTagName('name')[0].childNodes |
|
1136 | in e.getElementsByTagName('name')[0].childNodes | |
1137 | if c.nodeType == c.TEXT_NODE) |
|
1137 | if c.nodeType == c.TEXT_NODE) | |
1138 | paths.append(name.encode('utf-8')) |
|
1138 | paths.append(name.encode('utf-8')) | |
1139 | return paths |
|
1139 | return paths | |
1140 |
|
1140 | |||
1141 | def filedata(self, name, decode): |
|
1141 | def filedata(self, name, decode): | |
1142 | return self._svncommand(['cat'], name)[0] |
|
1142 | return self._svncommand(['cat'], name)[0] | |
1143 |
|
1143 | |||
1144 |
|
1144 | |||
1145 | class gitsubrepo(abstractsubrepo): |
|
1145 | class gitsubrepo(abstractsubrepo): | |
1146 | def __init__(self, ctx, path, state, allowcreate): |
|
1146 | def __init__(self, ctx, path, state, allowcreate): | |
1147 | super(gitsubrepo, self).__init__(ctx, path) |
|
1147 | super(gitsubrepo, self).__init__(ctx, path) | |
1148 | self._state = state |
|
1148 | self._state = state | |
1149 | self._abspath = ctx.repo().wjoin(path) |
|
1149 | self._abspath = ctx.repo().wjoin(path) | |
1150 | self._subparent = ctx.repo() |
|
1150 | self._subparent = ctx.repo() | |
1151 | self._ensuregit() |
|
1151 | self._ensuregit() | |
1152 |
|
1152 | |||
1153 | def _ensuregit(self): |
|
1153 | def _ensuregit(self): | |
1154 | try: |
|
1154 | try: | |
1155 | self._gitexecutable = 'git' |
|
1155 | self._gitexecutable = 'git' | |
1156 | out, err = self._gitnodir(['--version']) |
|
1156 | out, err = self._gitnodir(['--version']) | |
1157 | except OSError as e: |
|
1157 | except OSError as e: | |
1158 | genericerror = _("error executing git for subrepo '%s': %s") |
|
1158 | genericerror = _("error executing git for subrepo '%s': %s") | |
1159 | notfoundhint = _("check git is installed and in your PATH") |
|
1159 | notfoundhint = _("check git is installed and in your PATH") | |
1160 | if e.errno != errno.ENOENT: |
|
1160 | if e.errno != errno.ENOENT: | |
1161 | raise error.Abort(genericerror % ( |
|
1161 | raise error.Abort(genericerror % ( | |
1162 | self._path, encoding.strtolocal(e.strerror))) |
|
1162 | self._path, encoding.strtolocal(e.strerror))) | |
1163 | elif pycompat.iswindows: |
|
1163 | elif pycompat.iswindows: | |
1164 | try: |
|
1164 | try: | |
1165 | self._gitexecutable = 'git.cmd' |
|
1165 | self._gitexecutable = 'git.cmd' | |
1166 | out, err = self._gitnodir(['--version']) |
|
1166 | out, err = self._gitnodir(['--version']) | |
1167 | except OSError as e2: |
|
1167 | except OSError as e2: | |
1168 | if e2.errno == errno.ENOENT: |
|
1168 | if e2.errno == errno.ENOENT: | |
1169 | raise error.Abort(_("couldn't find 'git' or 'git.cmd'" |
|
1169 | raise error.Abort(_("couldn't find 'git' or 'git.cmd'" | |
1170 | " for subrepo '%s'") % self._path, |
|
1170 | " for subrepo '%s'") % self._path, | |
1171 | hint=notfoundhint) |
|
1171 | hint=notfoundhint) | |
1172 | else: |
|
1172 | else: | |
1173 | raise error.Abort(genericerror % (self._path, |
|
1173 | raise error.Abort(genericerror % (self._path, | |
1174 | encoding.strtolocal(e2.strerror))) |
|
1174 | encoding.strtolocal(e2.strerror))) | |
1175 | else: |
|
1175 | else: | |
1176 | raise error.Abort(_("couldn't find git for subrepo '%s'") |
|
1176 | raise error.Abort(_("couldn't find git for subrepo '%s'") | |
1177 | % self._path, hint=notfoundhint) |
|
1177 | % self._path, hint=notfoundhint) | |
1178 | versionstatus = self._checkversion(out) |
|
1178 | versionstatus = self._checkversion(out) | |
1179 | if versionstatus == 'unknown': |
|
1179 | if versionstatus == 'unknown': | |
1180 | self.ui.warn(_('cannot retrieve git version\n')) |
|
1180 | self.ui.warn(_('cannot retrieve git version\n')) | |
1181 | elif versionstatus == 'abort': |
|
1181 | elif versionstatus == 'abort': | |
1182 | raise error.Abort(_('git subrepo requires at least 1.6.0 or later')) |
|
1182 | raise error.Abort(_('git subrepo requires at least 1.6.0 or later')) | |
1183 | elif versionstatus == 'warning': |
|
1183 | elif versionstatus == 'warning': | |
1184 | self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n')) |
|
1184 | self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n')) | |
1185 |
|
1185 | |||
1186 | @staticmethod |
|
1186 | @staticmethod | |
1187 | def _gitversion(out): |
|
1187 | def _gitversion(out): | |
1188 | m = re.search(br'^git version (\d+)\.(\d+)\.(\d+)', out) |
|
1188 | m = re.search(br'^git version (\d+)\.(\d+)\.(\d+)', out) | |
1189 | if m: |
|
1189 | if m: | |
1190 | return (int(m.group(1)), int(m.group(2)), int(m.group(3))) |
|
1190 | return (int(m.group(1)), int(m.group(2)), int(m.group(3))) | |
1191 |
|
1191 | |||
1192 | m = re.search(br'^git version (\d+)\.(\d+)', out) |
|
1192 | m = re.search(br'^git version (\d+)\.(\d+)', out) | |
1193 | if m: |
|
1193 | if m: | |
1194 | return (int(m.group(1)), int(m.group(2)), 0) |
|
1194 | return (int(m.group(1)), int(m.group(2)), 0) | |
1195 |
|
1195 | |||
1196 | return -1 |
|
1196 | return -1 | |
1197 |
|
1197 | |||
1198 | @staticmethod |
|
1198 | @staticmethod | |
1199 | def _checkversion(out): |
|
1199 | def _checkversion(out): | |
1200 | '''ensure git version is new enough |
|
1200 | '''ensure git version is new enough | |
1201 |
|
1201 | |||
1202 | >>> _checkversion = gitsubrepo._checkversion |
|
1202 | >>> _checkversion = gitsubrepo._checkversion | |
1203 | >>> _checkversion(b'git version 1.6.0') |
|
1203 | >>> _checkversion(b'git version 1.6.0') | |
1204 | 'ok' |
|
1204 | 'ok' | |
1205 | >>> _checkversion(b'git version 1.8.5') |
|
1205 | >>> _checkversion(b'git version 1.8.5') | |
1206 | 'ok' |
|
1206 | 'ok' | |
1207 | >>> _checkversion(b'git version 1.4.0') |
|
1207 | >>> _checkversion(b'git version 1.4.0') | |
1208 | 'abort' |
|
1208 | 'abort' | |
1209 | >>> _checkversion(b'git version 1.5.0') |
|
1209 | >>> _checkversion(b'git version 1.5.0') | |
1210 | 'warning' |
|
1210 | 'warning' | |
1211 | >>> _checkversion(b'git version 1.9-rc0') |
|
1211 | >>> _checkversion(b'git version 1.9-rc0') | |
1212 | 'ok' |
|
1212 | 'ok' | |
1213 | >>> _checkversion(b'git version 1.9.0.265.g81cdec2') |
|
1213 | >>> _checkversion(b'git version 1.9.0.265.g81cdec2') | |
1214 | 'ok' |
|
1214 | 'ok' | |
1215 | >>> _checkversion(b'git version 1.9.0.GIT') |
|
1215 | >>> _checkversion(b'git version 1.9.0.GIT') | |
1216 | 'ok' |
|
1216 | 'ok' | |
1217 | >>> _checkversion(b'git version 12345') |
|
1217 | >>> _checkversion(b'git version 12345') | |
1218 | 'unknown' |
|
1218 | 'unknown' | |
1219 | >>> _checkversion(b'no') |
|
1219 | >>> _checkversion(b'no') | |
1220 | 'unknown' |
|
1220 | 'unknown' | |
1221 | ''' |
|
1221 | ''' | |
1222 | version = gitsubrepo._gitversion(out) |
|
1222 | version = gitsubrepo._gitversion(out) | |
1223 | # git 1.4.0 can't work at all, but 1.5.X can in at least some cases, |
|
1223 | # git 1.4.0 can't work at all, but 1.5.X can in at least some cases, | |
1224 | # despite the docstring comment. For now, error on 1.4.0, warn on |
|
1224 | # despite the docstring comment. For now, error on 1.4.0, warn on | |
1225 | # 1.5.0 but attempt to continue. |
|
1225 | # 1.5.0 but attempt to continue. | |
1226 | if version == -1: |
|
1226 | if version == -1: | |
1227 | return 'unknown' |
|
1227 | return 'unknown' | |
1228 | if version < (1, 5, 0): |
|
1228 | if version < (1, 5, 0): | |
1229 | return 'abort' |
|
1229 | return 'abort' | |
1230 | elif version < (1, 6, 0): |
|
1230 | elif version < (1, 6, 0): | |
1231 | return 'warning' |
|
1231 | return 'warning' | |
1232 | return 'ok' |
|
1232 | return 'ok' | |
1233 |
|
1233 | |||
1234 | def _gitcommand(self, commands, env=None, stream=False): |
|
1234 | def _gitcommand(self, commands, env=None, stream=False): | |
1235 | return self._gitdir(commands, env=env, stream=stream)[0] |
|
1235 | return self._gitdir(commands, env=env, stream=stream)[0] | |
1236 |
|
1236 | |||
1237 | def _gitdir(self, commands, env=None, stream=False): |
|
1237 | def _gitdir(self, commands, env=None, stream=False): | |
1238 | return self._gitnodir(commands, env=env, stream=stream, |
|
1238 | return self._gitnodir(commands, env=env, stream=stream, | |
1239 | cwd=self._abspath) |
|
1239 | cwd=self._abspath) | |
1240 |
|
1240 | |||
1241 | def _gitnodir(self, commands, env=None, stream=False, cwd=None): |
|
1241 | def _gitnodir(self, commands, env=None, stream=False, cwd=None): | |
1242 | """Calls the git command |
|
1242 | """Calls the git command | |
1243 |
|
1243 | |||
1244 | The methods tries to call the git command. versions prior to 1.6.0 |
|
1244 | The methods tries to call the git command. versions prior to 1.6.0 | |
1245 | are not supported and very probably fail. |
|
1245 | are not supported and very probably fail. | |
1246 | """ |
|
1246 | """ | |
1247 | self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands))) |
|
1247 | self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands))) | |
1248 | if env is None: |
|
1248 | if env is None: | |
1249 | env = encoding.environ.copy() |
|
1249 | env = encoding.environ.copy() | |
1250 | # disable localization for Git output (issue5176) |
|
1250 | # disable localization for Git output (issue5176) | |
1251 | env['LC_ALL'] = 'C' |
|
1251 | env['LC_ALL'] = 'C' | |
1252 | # fix for Git CVE-2015-7545 |
|
1252 | # fix for Git CVE-2015-7545 | |
1253 | if 'GIT_ALLOW_PROTOCOL' not in env: |
|
1253 | if 'GIT_ALLOW_PROTOCOL' not in env: | |
1254 | env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh' |
|
1254 | env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh' | |
1255 | # unless ui.quiet is set, print git's stderr, |
|
1255 | # unless ui.quiet is set, print git's stderr, | |
1256 | # which is mostly progress and useful info |
|
1256 | # which is mostly progress and useful info | |
1257 | errpipe = None |
|
1257 | errpipe = None | |
1258 | if self.ui.quiet: |
|
1258 | if self.ui.quiet: | |
1259 | errpipe = open(os.devnull, 'w') |
|
1259 | errpipe = open(os.devnull, 'w') | |
1260 | if self.ui._colormode and len(commands) and commands[0] == "diff": |
|
1260 | if self.ui._colormode and len(commands) and commands[0] == "diff": | |
1261 | # insert the argument in the front, |
|
1261 | # insert the argument in the front, | |
1262 | # the end of git diff arguments is used for paths |
|
1262 | # the end of git diff arguments is used for paths | |
1263 | commands.insert(1, '--color') |
|
1263 | commands.insert(1, '--color') | |
1264 | p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1, |
|
1264 | p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1, | |
1265 | cwd=cwd, env=env, close_fds=procutil.closefds, |
|
1265 | cwd=cwd, env=env, close_fds=procutil.closefds, | |
1266 | stdout=subprocess.PIPE, stderr=errpipe) |
|
1266 | stdout=subprocess.PIPE, stderr=errpipe) | |
1267 | if stream: |
|
1267 | if stream: | |
1268 | return p.stdout, None |
|
1268 | return p.stdout, None | |
1269 |
|
1269 | |||
1270 | retdata = p.stdout.read().strip() |
|
1270 | retdata = p.stdout.read().strip() | |
1271 | # wait for the child to exit to avoid race condition. |
|
1271 | # wait for the child to exit to avoid race condition. | |
1272 | p.wait() |
|
1272 | p.wait() | |
1273 |
|
1273 | |||
1274 | if p.returncode != 0 and p.returncode != 1: |
|
1274 | if p.returncode != 0 and p.returncode != 1: | |
1275 | # there are certain error codes that are ok |
|
1275 | # there are certain error codes that are ok | |
1276 | command = commands[0] |
|
1276 | command = commands[0] | |
1277 | if command in ('cat-file', 'symbolic-ref'): |
|
1277 | if command in ('cat-file', 'symbolic-ref'): | |
1278 | return retdata, p.returncode |
|
1278 | return retdata, p.returncode | |
1279 | # for all others, abort |
|
1279 | # for all others, abort | |
1280 | raise error.Abort(_('git %s error %d in %s') % |
|
1280 | raise error.Abort(_('git %s error %d in %s') % | |
1281 | (command, p.returncode, self._relpath)) |
|
1281 | (command, p.returncode, self._relpath)) | |
1282 |
|
1282 | |||
1283 | return retdata, p.returncode |
|
1283 | return retdata, p.returncode | |
1284 |
|
1284 | |||
1285 | def _gitmissing(self): |
|
1285 | def _gitmissing(self): | |
1286 | return not self.wvfs.exists('.git') |
|
1286 | return not self.wvfs.exists('.git') | |
1287 |
|
1287 | |||
1288 | def _gitstate(self): |
|
1288 | def _gitstate(self): | |
1289 | return self._gitcommand(['rev-parse', 'HEAD']) |
|
1289 | return self._gitcommand(['rev-parse', 'HEAD']) | |
1290 |
|
1290 | |||
1291 | def _gitcurrentbranch(self): |
|
1291 | def _gitcurrentbranch(self): | |
1292 | current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet']) |
|
1292 | current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet']) | |
1293 | if err: |
|
1293 | if err: | |
1294 | current = None |
|
1294 | current = None | |
1295 | return current |
|
1295 | return current | |
1296 |
|
1296 | |||
1297 | def _gitremote(self, remote): |
|
1297 | def _gitremote(self, remote): | |
1298 | out = self._gitcommand(['remote', 'show', '-n', remote]) |
|
1298 | out = self._gitcommand(['remote', 'show', '-n', remote]) | |
1299 | line = out.split('\n')[1] |
|
1299 | line = out.split('\n')[1] | |
1300 | i = line.index('URL: ') + len('URL: ') |
|
1300 | i = line.index('URL: ') + len('URL: ') | |
1301 | return line[i:] |
|
1301 | return line[i:] | |
1302 |
|
1302 | |||
1303 | def _githavelocally(self, revision): |
|
1303 | def _githavelocally(self, revision): | |
1304 | out, code = self._gitdir(['cat-file', '-e', revision]) |
|
1304 | out, code = self._gitdir(['cat-file', '-e', revision]) | |
1305 | return code == 0 |
|
1305 | return code == 0 | |
1306 |
|
1306 | |||
1307 | def _gitisancestor(self, r1, r2): |
|
1307 | def _gitisancestor(self, r1, r2): | |
1308 | base = self._gitcommand(['merge-base', r1, r2]) |
|
1308 | base = self._gitcommand(['merge-base', r1, r2]) | |
1309 | return base == r1 |
|
1309 | return base == r1 | |
1310 |
|
1310 | |||
1311 | def _gitisbare(self): |
|
1311 | def _gitisbare(self): | |
1312 | return self._gitcommand(['config', '--bool', 'core.bare']) == 'true' |
|
1312 | return self._gitcommand(['config', '--bool', 'core.bare']) == 'true' | |
1313 |
|
1313 | |||
1314 | def _gitupdatestat(self): |
|
1314 | def _gitupdatestat(self): | |
1315 | """This must be run before git diff-index. |
|
1315 | """This must be run before git diff-index. | |
1316 | diff-index only looks at changes to file stat; |
|
1316 | diff-index only looks at changes to file stat; | |
1317 | this command looks at file contents and updates the stat.""" |
|
1317 | this command looks at file contents and updates the stat.""" | |
1318 | self._gitcommand(['update-index', '-q', '--refresh']) |
|
1318 | self._gitcommand(['update-index', '-q', '--refresh']) | |
1319 |
|
1319 | |||
1320 | def _gitbranchmap(self): |
|
1320 | def _gitbranchmap(self): | |
1321 | '''returns 2 things: |
|
1321 | '''returns 2 things: | |
1322 | a map from git branch to revision |
|
1322 | a map from git branch to revision | |
1323 | a map from revision to branches''' |
|
1323 | a map from revision to branches''' | |
1324 | branch2rev = {} |
|
1324 | branch2rev = {} | |
1325 | rev2branch = {} |
|
1325 | rev2branch = {} | |
1326 |
|
1326 | |||
1327 | out = self._gitcommand(['for-each-ref', '--format', |
|
1327 | out = self._gitcommand(['for-each-ref', '--format', | |
1328 | '%(objectname) %(refname)']) |
|
1328 | '%(objectname) %(refname)']) | |
1329 | for line in out.split('\n'): |
|
1329 | for line in out.split('\n'): | |
1330 | revision, ref = line.split(' ') |
|
1330 | revision, ref = line.split(' ') | |
1331 | if (not ref.startswith('refs/heads/') and |
|
1331 | if (not ref.startswith('refs/heads/') and | |
1332 | not ref.startswith('refs/remotes/')): |
|
1332 | not ref.startswith('refs/remotes/')): | |
1333 | continue |
|
1333 | continue | |
1334 | if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'): |
|
1334 | if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'): | |
1335 | continue # ignore remote/HEAD redirects |
|
1335 | continue # ignore remote/HEAD redirects | |
1336 | branch2rev[ref] = revision |
|
1336 | branch2rev[ref] = revision | |
1337 | rev2branch.setdefault(revision, []).append(ref) |
|
1337 | rev2branch.setdefault(revision, []).append(ref) | |
1338 | return branch2rev, rev2branch |
|
1338 | return branch2rev, rev2branch | |
1339 |
|
1339 | |||
1340 | def _gittracking(self, branches): |
|
1340 | def _gittracking(self, branches): | |
1341 | 'return map of remote branch to local tracking branch' |
|
1341 | 'return map of remote branch to local tracking branch' | |
1342 | # assumes no more than one local tracking branch for each remote |
|
1342 | # assumes no more than one local tracking branch for each remote | |
1343 | tracking = {} |
|
1343 | tracking = {} | |
1344 | for b in branches: |
|
1344 | for b in branches: | |
1345 | if b.startswith('refs/remotes/'): |
|
1345 | if b.startswith('refs/remotes/'): | |
1346 | continue |
|
1346 | continue | |
1347 | bname = b.split('/', 2)[2] |
|
1347 | bname = b.split('/', 2)[2] | |
1348 | remote = self._gitcommand(['config', 'branch.%s.remote' % bname]) |
|
1348 | remote = self._gitcommand(['config', 'branch.%s.remote' % bname]) | |
1349 | if remote: |
|
1349 | if remote: | |
1350 | ref = self._gitcommand(['config', 'branch.%s.merge' % bname]) |
|
1350 | ref = self._gitcommand(['config', 'branch.%s.merge' % bname]) | |
1351 | tracking['refs/remotes/%s/%s' % |
|
1351 | tracking['refs/remotes/%s/%s' % | |
1352 | (remote, ref.split('/', 2)[2])] = b |
|
1352 | (remote, ref.split('/', 2)[2])] = b | |
1353 | return tracking |
|
1353 | return tracking | |
1354 |
|
1354 | |||
1355 | def _abssource(self, source): |
|
1355 | def _abssource(self, source): | |
1356 | if '://' not in source: |
|
1356 | if '://' not in source: | |
1357 | # recognize the scp syntax as an absolute source |
|
1357 | # recognize the scp syntax as an absolute source | |
1358 | colon = source.find(':') |
|
1358 | colon = source.find(':') | |
1359 | if colon != -1 and '/' not in source[:colon]: |
|
1359 | if colon != -1 and '/' not in source[:colon]: | |
1360 | return source |
|
1360 | return source | |
1361 | self._subsource = source |
|
1361 | self._subsource = source | |
1362 | return _abssource(self) |
|
1362 | return _abssource(self) | |
1363 |
|
1363 | |||
1364 | def _fetch(self, source, revision): |
|
1364 | def _fetch(self, source, revision): | |
1365 | if self._gitmissing(): |
|
1365 | if self._gitmissing(): | |
1366 | # SEC: check for safe ssh url |
|
1366 | # SEC: check for safe ssh url | |
1367 | util.checksafessh(source) |
|
1367 | util.checksafessh(source) | |
1368 |
|
1368 | |||
1369 | source = self._abssource(source) |
|
1369 | source = self._abssource(source) | |
1370 | self.ui.status(_('cloning subrepo %s from %s\n') % |
|
1370 | self.ui.status(_('cloning subrepo %s from %s\n') % | |
1371 | (self._relpath, source)) |
|
1371 | (self._relpath, source)) | |
1372 | self._gitnodir(['clone', source, self._abspath]) |
|
1372 | self._gitnodir(['clone', source, self._abspath]) | |
1373 | if self._githavelocally(revision): |
|
1373 | if self._githavelocally(revision): | |
1374 | return |
|
1374 | return | |
1375 | self.ui.status(_('pulling subrepo %s from %s\n') % |
|
1375 | self.ui.status(_('pulling subrepo %s from %s\n') % | |
1376 | (self._relpath, self._gitremote('origin'))) |
|
1376 | (self._relpath, self._gitremote('origin'))) | |
1377 | # try only origin: the originally cloned repo |
|
1377 | # try only origin: the originally cloned repo | |
1378 | self._gitcommand(['fetch']) |
|
1378 | self._gitcommand(['fetch']) | |
1379 | if not self._githavelocally(revision): |
|
1379 | if not self._githavelocally(revision): | |
1380 | raise error.Abort(_('revision %s does not exist in subrepository ' |
|
1380 | raise error.Abort(_('revision %s does not exist in subrepository ' | |
1381 | '"%s"\n') % (revision, self._relpath)) |
|
1381 | '"%s"\n') % (revision, self._relpath)) | |
1382 |
|
1382 | |||
1383 | @annotatesubrepoerror |
|
1383 | @annotatesubrepoerror | |
1384 | def dirty(self, ignoreupdate=False, missing=False): |
|
1384 | def dirty(self, ignoreupdate=False, missing=False): | |
1385 | if self._gitmissing(): |
|
1385 | if self._gitmissing(): | |
1386 | return self._state[1] != '' |
|
1386 | return self._state[1] != '' | |
1387 | if self._gitisbare(): |
|
1387 | if self._gitisbare(): | |
1388 | return True |
|
1388 | return True | |
1389 | if not ignoreupdate and self._state[1] != self._gitstate(): |
|
1389 | if not ignoreupdate and self._state[1] != self._gitstate(): | |
1390 | # different version checked out |
|
1390 | # different version checked out | |
1391 | return True |
|
1391 | return True | |
1392 | # check for staged changes or modified files; ignore untracked files |
|
1392 | # check for staged changes or modified files; ignore untracked files | |
1393 | self._gitupdatestat() |
|
1393 | self._gitupdatestat() | |
1394 | out, code = self._gitdir(['diff-index', '--quiet', 'HEAD']) |
|
1394 | out, code = self._gitdir(['diff-index', '--quiet', 'HEAD']) | |
1395 | return code == 1 |
|
1395 | return code == 1 | |
1396 |
|
1396 | |||
1397 | def basestate(self): |
|
1397 | def basestate(self): | |
1398 | return self._gitstate() |
|
1398 | return self._gitstate() | |
1399 |
|
1399 | |||
1400 | @annotatesubrepoerror |
|
1400 | @annotatesubrepoerror | |
1401 | def get(self, state, overwrite=False): |
|
1401 | def get(self, state, overwrite=False): | |
1402 | source, revision, kind = state |
|
1402 | source, revision, kind = state | |
1403 | if not revision: |
|
1403 | if not revision: | |
1404 | self.remove() |
|
1404 | self.remove() | |
1405 | return |
|
1405 | return | |
1406 | self._fetch(source, revision) |
|
1406 | self._fetch(source, revision) | |
1407 | # if the repo was set to be bare, unbare it |
|
1407 | # if the repo was set to be bare, unbare it | |
1408 | if self._gitisbare(): |
|
1408 | if self._gitisbare(): | |
1409 | self._gitcommand(['config', 'core.bare', 'false']) |
|
1409 | self._gitcommand(['config', 'core.bare', 'false']) | |
1410 | if self._gitstate() == revision: |
|
1410 | if self._gitstate() == revision: | |
1411 | self._gitcommand(['reset', '--hard', 'HEAD']) |
|
1411 | self._gitcommand(['reset', '--hard', 'HEAD']) | |
1412 | return |
|
1412 | return | |
1413 | elif self._gitstate() == revision: |
|
1413 | elif self._gitstate() == revision: | |
1414 | if overwrite: |
|
1414 | if overwrite: | |
1415 | # first reset the index to unmark new files for commit, because |
|
1415 | # first reset the index to unmark new files for commit, because | |
1416 | # reset --hard will otherwise throw away files added for commit, |
|
1416 | # reset --hard will otherwise throw away files added for commit, | |
1417 | # not just unmark them. |
|
1417 | # not just unmark them. | |
1418 | self._gitcommand(['reset', 'HEAD']) |
|
1418 | self._gitcommand(['reset', 'HEAD']) | |
1419 | self._gitcommand(['reset', '--hard', 'HEAD']) |
|
1419 | self._gitcommand(['reset', '--hard', 'HEAD']) | |
1420 | return |
|
1420 | return | |
1421 | branch2rev, rev2branch = self._gitbranchmap() |
|
1421 | branch2rev, rev2branch = self._gitbranchmap() | |
1422 |
|
1422 | |||
1423 | def checkout(args): |
|
1423 | def checkout(args): | |
1424 | cmd = ['checkout'] |
|
1424 | cmd = ['checkout'] | |
1425 | if overwrite: |
|
1425 | if overwrite: | |
1426 | # first reset the index to unmark new files for commit, because |
|
1426 | # first reset the index to unmark new files for commit, because | |
1427 | # the -f option will otherwise throw away files added for |
|
1427 | # the -f option will otherwise throw away files added for | |
1428 | # commit, not just unmark them. |
|
1428 | # commit, not just unmark them. | |
1429 | self._gitcommand(['reset', 'HEAD']) |
|
1429 | self._gitcommand(['reset', 'HEAD']) | |
1430 | cmd.append('-f') |
|
1430 | cmd.append('-f') | |
1431 | self._gitcommand(cmd + args) |
|
1431 | self._gitcommand(cmd + args) | |
1432 | _sanitize(self.ui, self.wvfs, '.git') |
|
1432 | _sanitize(self.ui, self.wvfs, '.git') | |
1433 |
|
1433 | |||
1434 | def rawcheckout(): |
|
1434 | def rawcheckout(): | |
1435 | # no branch to checkout, check it out with no branch |
|
1435 | # no branch to checkout, check it out with no branch | |
1436 | self.ui.warn(_('checking out detached HEAD in ' |
|
1436 | self.ui.warn(_('checking out detached HEAD in ' | |
1437 | 'subrepository "%s"\n') % self._relpath) |
|
1437 | 'subrepository "%s"\n') % self._relpath) | |
1438 | self.ui.warn(_('check out a git branch if you intend ' |
|
1438 | self.ui.warn(_('check out a git branch if you intend ' | |
1439 | 'to make changes\n')) |
|
1439 | 'to make changes\n')) | |
1440 | checkout(['-q', revision]) |
|
1440 | checkout(['-q', revision]) | |
1441 |
|
1441 | |||
1442 | if revision not in rev2branch: |
|
1442 | if revision not in rev2branch: | |
1443 | rawcheckout() |
|
1443 | rawcheckout() | |
1444 | return |
|
1444 | return | |
1445 | branches = rev2branch[revision] |
|
1445 | branches = rev2branch[revision] | |
1446 | firstlocalbranch = None |
|
1446 | firstlocalbranch = None | |
1447 | for b in branches: |
|
1447 | for b in branches: | |
1448 | if b == 'refs/heads/master': |
|
1448 | if b == 'refs/heads/master': | |
1449 | # master trumps all other branches |
|
1449 | # master trumps all other branches | |
1450 | checkout(['refs/heads/master']) |
|
1450 | checkout(['refs/heads/master']) | |
1451 | return |
|
1451 | return | |
1452 | if not firstlocalbranch and not b.startswith('refs/remotes/'): |
|
1452 | if not firstlocalbranch and not b.startswith('refs/remotes/'): | |
1453 | firstlocalbranch = b |
|
1453 | firstlocalbranch = b | |
1454 | if firstlocalbranch: |
|
1454 | if firstlocalbranch: | |
1455 | checkout([firstlocalbranch]) |
|
1455 | checkout([firstlocalbranch]) | |
1456 | return |
|
1456 | return | |
1457 |
|
1457 | |||
1458 | tracking = self._gittracking(branch2rev.keys()) |
|
1458 | tracking = self._gittracking(branch2rev.keys()) | |
1459 | # choose a remote branch already tracked if possible |
|
1459 | # choose a remote branch already tracked if possible | |
1460 | remote = branches[0] |
|
1460 | remote = branches[0] | |
1461 | if remote not in tracking: |
|
1461 | if remote not in tracking: | |
1462 | for b in branches: |
|
1462 | for b in branches: | |
1463 | if b in tracking: |
|
1463 | if b in tracking: | |
1464 | remote = b |
|
1464 | remote = b | |
1465 | break |
|
1465 | break | |
1466 |
|
1466 | |||
1467 | if remote not in tracking: |
|
1467 | if remote not in tracking: | |
1468 | # create a new local tracking branch |
|
1468 | # create a new local tracking branch | |
1469 | local = remote.split('/', 3)[3] |
|
1469 | local = remote.split('/', 3)[3] | |
1470 | checkout(['-b', local, remote]) |
|
1470 | checkout(['-b', local, remote]) | |
1471 | elif self._gitisancestor(branch2rev[tracking[remote]], remote): |
|
1471 | elif self._gitisancestor(branch2rev[tracking[remote]], remote): | |
1472 | # When updating to a tracked remote branch, |
|
1472 | # When updating to a tracked remote branch, | |
1473 | # if the local tracking branch is downstream of it, |
|
1473 | # if the local tracking branch is downstream of it, | |
1474 | # a normal `git pull` would have performed a "fast-forward merge" |
|
1474 | # a normal `git pull` would have performed a "fast-forward merge" | |
1475 | # which is equivalent to updating the local branch to the remote. |
|
1475 | # which is equivalent to updating the local branch to the remote. | |
1476 | # Since we are only looking at branching at update, we need to |
|
1476 | # Since we are only looking at branching at update, we need to | |
1477 | # detect this situation and perform this action lazily. |
|
1477 | # detect this situation and perform this action lazily. | |
1478 | if tracking[remote] != self._gitcurrentbranch(): |
|
1478 | if tracking[remote] != self._gitcurrentbranch(): | |
1479 | checkout([tracking[remote]]) |
|
1479 | checkout([tracking[remote]]) | |
1480 | self._gitcommand(['merge', '--ff', remote]) |
|
1480 | self._gitcommand(['merge', '--ff', remote]) | |
1481 | _sanitize(self.ui, self.wvfs, '.git') |
|
1481 | _sanitize(self.ui, self.wvfs, '.git') | |
1482 | else: |
|
1482 | else: | |
1483 | # a real merge would be required, just checkout the revision |
|
1483 | # a real merge would be required, just checkout the revision | |
1484 | rawcheckout() |
|
1484 | rawcheckout() | |
1485 |
|
1485 | |||
1486 | @annotatesubrepoerror |
|
1486 | @annotatesubrepoerror | |
1487 | def commit(self, text, user, date): |
|
1487 | def commit(self, text, user, date): | |
1488 | if self._gitmissing(): |
|
1488 | if self._gitmissing(): | |
1489 | raise error.Abort(_("subrepo %s is missing") % self._relpath) |
|
1489 | raise error.Abort(_("subrepo %s is missing") % self._relpath) | |
1490 | cmd = ['commit', '-a', '-m', text] |
|
1490 | cmd = ['commit', '-a', '-m', text] | |
1491 | env = encoding.environ.copy() |
|
1491 | env = encoding.environ.copy() | |
1492 | if user: |
|
1492 | if user: | |
1493 | cmd += ['--author', user] |
|
1493 | cmd += ['--author', user] | |
1494 | if date: |
|
1494 | if date: | |
1495 | # git's date parser silently ignores when seconds < 1e9 |
|
1495 | # git's date parser silently ignores when seconds < 1e9 | |
1496 | # convert to ISO8601 |
|
1496 | # convert to ISO8601 | |
1497 | env['GIT_AUTHOR_DATE'] = dateutil.datestr(date, |
|
1497 | env['GIT_AUTHOR_DATE'] = dateutil.datestr(date, | |
1498 | '%Y-%m-%dT%H:%M:%S %1%2') |
|
1498 | '%Y-%m-%dT%H:%M:%S %1%2') | |
1499 | self._gitcommand(cmd, env=env) |
|
1499 | self._gitcommand(cmd, env=env) | |
1500 | # make sure commit works otherwise HEAD might not exist under certain |
|
1500 | # make sure commit works otherwise HEAD might not exist under certain | |
1501 | # circumstances |
|
1501 | # circumstances | |
1502 | return self._gitstate() |
|
1502 | return self._gitstate() | |
1503 |
|
1503 | |||
1504 | @annotatesubrepoerror |
|
1504 | @annotatesubrepoerror | |
1505 | def merge(self, state): |
|
1505 | def merge(self, state): | |
1506 | source, revision, kind = state |
|
1506 | source, revision, kind = state | |
1507 | self._fetch(source, revision) |
|
1507 | self._fetch(source, revision) | |
1508 | base = self._gitcommand(['merge-base', revision, self._state[1]]) |
|
1508 | base = self._gitcommand(['merge-base', revision, self._state[1]]) | |
1509 | self._gitupdatestat() |
|
1509 | self._gitupdatestat() | |
1510 | out, code = self._gitdir(['diff-index', '--quiet', 'HEAD']) |
|
1510 | out, code = self._gitdir(['diff-index', '--quiet', 'HEAD']) | |
1511 |
|
1511 | |||
1512 | def mergefunc(): |
|
1512 | def mergefunc(): | |
1513 | if base == revision: |
|
1513 | if base == revision: | |
1514 | self.get(state) # fast forward merge |
|
1514 | self.get(state) # fast forward merge | |
1515 | elif base != self._state[1]: |
|
1515 | elif base != self._state[1]: | |
1516 | self._gitcommand(['merge', '--no-commit', revision]) |
|
1516 | self._gitcommand(['merge', '--no-commit', revision]) | |
1517 | _sanitize(self.ui, self.wvfs, '.git') |
|
1517 | _sanitize(self.ui, self.wvfs, '.git') | |
1518 |
|
1518 | |||
1519 | if self.dirty(): |
|
1519 | if self.dirty(): | |
1520 | if self._gitstate() != revision: |
|
1520 | if self._gitstate() != revision: | |
1521 | dirty = self._gitstate() == self._state[1] or code != 0 |
|
1521 | dirty = self._gitstate() == self._state[1] or code != 0 | |
1522 | if _updateprompt(self.ui, self, dirty, |
|
1522 | if _updateprompt(self.ui, self, dirty, | |
1523 | self._state[1][:7], revision[:7]): |
|
1523 | self._state[1][:7], revision[:7]): | |
1524 | mergefunc() |
|
1524 | mergefunc() | |
1525 | else: |
|
1525 | else: | |
1526 | mergefunc() |
|
1526 | mergefunc() | |
1527 |
|
1527 | |||
1528 | @annotatesubrepoerror |
|
1528 | @annotatesubrepoerror | |
1529 | def push(self, opts): |
|
1529 | def push(self, opts): | |
1530 | force = opts.get('force') |
|
1530 | force = opts.get('force') | |
1531 |
|
1531 | |||
1532 | if not self._state[1]: |
|
1532 | if not self._state[1]: | |
1533 | return True |
|
1533 | return True | |
1534 | if self._gitmissing(): |
|
1534 | if self._gitmissing(): | |
1535 | raise error.Abort(_("subrepo %s is missing") % self._relpath) |
|
1535 | raise error.Abort(_("subrepo %s is missing") % self._relpath) | |
1536 | # if a branch in origin contains the revision, nothing to do |
|
1536 | # if a branch in origin contains the revision, nothing to do | |
1537 | branch2rev, rev2branch = self._gitbranchmap() |
|
1537 | branch2rev, rev2branch = self._gitbranchmap() | |
1538 | if self._state[1] in rev2branch: |
|
1538 | if self._state[1] in rev2branch: | |
1539 | for b in rev2branch[self._state[1]]: |
|
1539 | for b in rev2branch[self._state[1]]: | |
1540 | if b.startswith('refs/remotes/origin/'): |
|
1540 | if b.startswith('refs/remotes/origin/'): | |
1541 | return True |
|
1541 | return True | |
1542 | for b, revision in branch2rev.iteritems(): |
|
1542 | for b, revision in branch2rev.iteritems(): | |
1543 | if b.startswith('refs/remotes/origin/'): |
|
1543 | if b.startswith('refs/remotes/origin/'): | |
1544 | if self._gitisancestor(self._state[1], revision): |
|
1544 | if self._gitisancestor(self._state[1], revision): | |
1545 | return True |
|
1545 | return True | |
1546 | # otherwise, try to push the currently checked out branch |
|
1546 | # otherwise, try to push the currently checked out branch | |
1547 | cmd = ['push'] |
|
1547 | cmd = ['push'] | |
1548 | if force: |
|
1548 | if force: | |
1549 | cmd.append('--force') |
|
1549 | cmd.append('--force') | |
1550 |
|
1550 | |||
1551 | current = self._gitcurrentbranch() |
|
1551 | current = self._gitcurrentbranch() | |
1552 | if current: |
|
1552 | if current: | |
1553 | # determine if the current branch is even useful |
|
1553 | # determine if the current branch is even useful | |
1554 | if not self._gitisancestor(self._state[1], current): |
|
1554 | if not self._gitisancestor(self._state[1], current): | |
1555 | self.ui.warn(_('unrelated git branch checked out ' |
|
1555 | self.ui.warn(_('unrelated git branch checked out ' | |
1556 | 'in subrepository "%s"\n') % self._relpath) |
|
1556 | 'in subrepository "%s"\n') % self._relpath) | |
1557 | return False |
|
1557 | return False | |
1558 | self.ui.status(_('pushing branch %s of subrepository "%s"\n') % |
|
1558 | self.ui.status(_('pushing branch %s of subrepository "%s"\n') % | |
1559 | (current.split('/', 2)[2], self._relpath)) |
|
1559 | (current.split('/', 2)[2], self._relpath)) | |
1560 | ret = self._gitdir(cmd + ['origin', current]) |
|
1560 | ret = self._gitdir(cmd + ['origin', current]) | |
1561 | return ret[1] == 0 |
|
1561 | return ret[1] == 0 | |
1562 | else: |
|
1562 | else: | |
1563 | self.ui.warn(_('no branch checked out in subrepository "%s"\n' |
|
1563 | self.ui.warn(_('no branch checked out in subrepository "%s"\n' | |
1564 | 'cannot push revision %s\n') % |
|
1564 | 'cannot push revision %s\n') % | |
1565 | (self._relpath, self._state[1])) |
|
1565 | (self._relpath, self._state[1])) | |
1566 | return False |
|
1566 | return False | |
1567 |
|
1567 | |||
1568 | @annotatesubrepoerror |
|
1568 | @annotatesubrepoerror | |
1569 | def add(self, ui, match, prefix, explicitonly, **opts): |
|
1569 | def add(self, ui, match, prefix, explicitonly, **opts): | |
1570 | if self._gitmissing(): |
|
1570 | if self._gitmissing(): | |
1571 | return [] |
|
1571 | return [] | |
1572 |
|
1572 | |||
1573 | (modified, added, removed, |
|
1573 | (modified, added, removed, | |
1574 | deleted, unknown, ignored, clean) = self.status(None, unknown=True, |
|
1574 | deleted, unknown, ignored, clean) = self.status(None, unknown=True, | |
1575 | clean=True) |
|
1575 | clean=True) | |
1576 |
|
1576 | |||
1577 | tracked = set() |
|
1577 | tracked = set() | |
1578 | # dirstates 'amn' warn, 'r' is added again |
|
1578 | # dirstates 'amn' warn, 'r' is added again | |
1579 | for l in (modified, added, deleted, clean): |
|
1579 | for l in (modified, added, deleted, clean): | |
1580 | tracked.update(l) |
|
1580 | tracked.update(l) | |
1581 |
|
1581 | |||
1582 | # Unknown files not of interest will be rejected by the matcher |
|
1582 | # Unknown files not of interest will be rejected by the matcher | |
1583 | files = unknown |
|
1583 | files = unknown | |
1584 | files.extend(match.files()) |
|
1584 | files.extend(match.files()) | |
1585 |
|
1585 | |||
1586 | rejected = [] |
|
1586 | rejected = [] | |
1587 |
|
1587 | |||
1588 | files = [f for f in sorted(set(files)) if match(f)] |
|
1588 | files = [f for f in sorted(set(files)) if match(f)] | |
1589 | for f in files: |
|
1589 | for f in files: | |
1590 | exact = match.exact(f) |
|
1590 | exact = match.exact(f) | |
1591 | command = ["add"] |
|
1591 | command = ["add"] | |
1592 | if exact: |
|
1592 | if exact: | |
1593 | command.append("-f") #should be added, even if ignored |
|
1593 | command.append("-f") #should be added, even if ignored | |
1594 | if ui.verbose or not exact: |
|
1594 | if ui.verbose or not exact: | |
1595 | ui.status(_('adding %s\n') % match.rel(f)) |
|
1595 | ui.status(_('adding %s\n') % match.rel(f)) | |
1596 |
|
1596 | |||
1597 | if f in tracked: # hg prints 'adding' even if already tracked |
|
1597 | if f in tracked: # hg prints 'adding' even if already tracked | |
1598 | if exact: |
|
1598 | if exact: | |
1599 | rejected.append(f) |
|
1599 | rejected.append(f) | |
1600 | continue |
|
1600 | continue | |
1601 | if not opts.get(r'dry_run'): |
|
1601 | if not opts.get(r'dry_run'): | |
1602 | self._gitcommand(command + [f]) |
|
1602 | self._gitcommand(command + [f]) | |
1603 |
|
1603 | |||
1604 | for f in rejected: |
|
1604 | for f in rejected: | |
1605 | ui.warn(_("%s already tracked!\n") % match.abs(f)) |
|
1605 | ui.warn(_("%s already tracked!\n") % match.abs(f)) | |
1606 |
|
1606 | |||
1607 | return rejected |
|
1607 | return rejected | |
1608 |
|
1608 | |||
1609 | @annotatesubrepoerror |
|
1609 | @annotatesubrepoerror | |
1610 | def remove(self): |
|
1610 | def remove(self): | |
1611 | if self._gitmissing(): |
|
1611 | if self._gitmissing(): | |
1612 | return |
|
1612 | return | |
1613 | if self.dirty(): |
|
1613 | if self.dirty(): | |
1614 | self.ui.warn(_('not removing repo %s because ' |
|
1614 | self.ui.warn(_('not removing repo %s because ' | |
1615 | 'it has changes.\n') % self._relpath) |
|
1615 | 'it has changes.\n') % self._relpath) | |
1616 | return |
|
1616 | return | |
1617 | # we can't fully delete the repository as it may contain |
|
1617 | # we can't fully delete the repository as it may contain | |
1618 | # local-only history |
|
1618 | # local-only history | |
1619 | self.ui.note(_('removing subrepo %s\n') % self._relpath) |
|
1619 | self.ui.note(_('removing subrepo %s\n') % self._relpath) | |
1620 | self._gitcommand(['config', 'core.bare', 'true']) |
|
1620 | self._gitcommand(['config', 'core.bare', 'true']) | |
1621 | for f, kind in self.wvfs.readdir(): |
|
1621 | for f, kind in self.wvfs.readdir(): | |
1622 | if f == '.git': |
|
1622 | if f == '.git': | |
1623 | continue |
|
1623 | continue | |
1624 | if kind == stat.S_IFDIR: |
|
1624 | if kind == stat.S_IFDIR: | |
1625 | self.wvfs.rmtree(f) |
|
1625 | self.wvfs.rmtree(f) | |
1626 | else: |
|
1626 | else: | |
1627 | self.wvfs.unlink(f) |
|
1627 | self.wvfs.unlink(f) | |
1628 |
|
1628 | |||
1629 | def archive(self, archiver, prefix, match=None, decode=True): |
|
1629 | def archive(self, archiver, prefix, match=None, decode=True): | |
1630 | total = 0 |
|
1630 | total = 0 | |
1631 | source, revision = self._state |
|
1631 | source, revision = self._state | |
1632 | if not revision: |
|
1632 | if not revision: | |
1633 | return total |
|
1633 | return total | |
1634 | self._fetch(source, revision) |
|
1634 | self._fetch(source, revision) | |
1635 |
|
1635 | |||
1636 | # Parse git's native archive command. |
|
1636 | # Parse git's native archive command. | |
1637 | # This should be much faster than manually traversing the trees |
|
1637 | # This should be much faster than manually traversing the trees | |
1638 | # and objects with many subprocess calls. |
|
1638 | # and objects with many subprocess calls. | |
1639 | tarstream = self._gitcommand(['archive', revision], stream=True) |
|
1639 | tarstream = self._gitcommand(['archive', revision], stream=True) | |
1640 | tar = tarfile.open(fileobj=tarstream, mode='r|') |
|
1640 | tar = tarfile.open(fileobj=tarstream, mode=r'r|') | |
1641 | relpath = subrelpath(self) |
|
1641 | relpath = subrelpath(self) | |
1642 | self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files')) |
|
1642 | self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files')) | |
1643 | for i, info in enumerate(tar): |
|
1643 | for i, info in enumerate(tar): | |
1644 | if info.isdir(): |
|
1644 | if info.isdir(): | |
1645 | continue |
|
1645 | continue | |
1646 | if match and not match(info.name): |
|
1646 | if match and not match(info.name): | |
1647 | continue |
|
1647 | continue | |
1648 | if info.issym(): |
|
1648 | if info.issym(): | |
1649 | data = info.linkname |
|
1649 | data = info.linkname | |
1650 | else: |
|
1650 | else: | |
1651 | data = tar.extractfile(info).read() |
|
1651 | data = tar.extractfile(info).read() | |
1652 | archiver.addfile(prefix + self._path + '/' + info.name, |
|
1652 | archiver.addfile(prefix + self._path + '/' + info.name, | |
1653 | info.mode, info.issym(), data) |
|
1653 | info.mode, info.issym(), data) | |
1654 | total += 1 |
|
1654 | total += 1 | |
1655 | self.ui.progress(_('archiving (%s)') % relpath, i + 1, |
|
1655 | self.ui.progress(_('archiving (%s)') % relpath, i + 1, | |
1656 | unit=_('files')) |
|
1656 | unit=_('files')) | |
1657 | self.ui.progress(_('archiving (%s)') % relpath, None) |
|
1657 | self.ui.progress(_('archiving (%s)') % relpath, None) | |
1658 | return total |
|
1658 | return total | |
1659 |
|
1659 | |||
1660 |
|
1660 | |||
1661 | @annotatesubrepoerror |
|
1661 | @annotatesubrepoerror | |
1662 | def cat(self, match, fm, fntemplate, prefix, **opts): |
|
1662 | def cat(self, match, fm, fntemplate, prefix, **opts): | |
1663 | rev = self._state[1] |
|
1663 | rev = self._state[1] | |
1664 | if match.anypats(): |
|
1664 | if match.anypats(): | |
1665 | return 1 #No support for include/exclude yet |
|
1665 | return 1 #No support for include/exclude yet | |
1666 |
|
1666 | |||
1667 | if not match.files(): |
|
1667 | if not match.files(): | |
1668 | return 1 |
|
1668 | return 1 | |
1669 |
|
1669 | |||
1670 | # TODO: add support for non-plain formatter (see cmdutil.cat()) |
|
1670 | # TODO: add support for non-plain formatter (see cmdutil.cat()) | |
1671 | for f in match.files(): |
|
1671 | for f in match.files(): | |
1672 | output = self._gitcommand(["show", "%s:%s" % (rev, f)]) |
|
1672 | output = self._gitcommand(["show", "%s:%s" % (rev, f)]) | |
1673 | fp = cmdutil.makefileobj(self._ctx, fntemplate, |
|
1673 | fp = cmdutil.makefileobj(self._ctx, fntemplate, | |
1674 | pathname=self.wvfs.reljoin(prefix, f)) |
|
1674 | pathname=self.wvfs.reljoin(prefix, f)) | |
1675 | fp.write(output) |
|
1675 | fp.write(output) | |
1676 | fp.close() |
|
1676 | fp.close() | |
1677 | return 0 |
|
1677 | return 0 | |
1678 |
|
1678 | |||
1679 |
|
1679 | |||
1680 | @annotatesubrepoerror |
|
1680 | @annotatesubrepoerror | |
1681 | def status(self, rev2, **opts): |
|
1681 | def status(self, rev2, **opts): | |
1682 | rev1 = self._state[1] |
|
1682 | rev1 = self._state[1] | |
1683 | if self._gitmissing() or not rev1: |
|
1683 | if self._gitmissing() or not rev1: | |
1684 | # if the repo is missing, return no results |
|
1684 | # if the repo is missing, return no results | |
1685 | return scmutil.status([], [], [], [], [], [], []) |
|
1685 | return scmutil.status([], [], [], [], [], [], []) | |
1686 | modified, added, removed = [], [], [] |
|
1686 | modified, added, removed = [], [], [] | |
1687 | self._gitupdatestat() |
|
1687 | self._gitupdatestat() | |
1688 | if rev2: |
|
1688 | if rev2: | |
1689 | command = ['diff-tree', '--no-renames', '-r', rev1, rev2] |
|
1689 | command = ['diff-tree', '--no-renames', '-r', rev1, rev2] | |
1690 | else: |
|
1690 | else: | |
1691 | command = ['diff-index', '--no-renames', rev1] |
|
1691 | command = ['diff-index', '--no-renames', rev1] | |
1692 | out = self._gitcommand(command) |
|
1692 | out = self._gitcommand(command) | |
1693 | for line in out.split('\n'): |
|
1693 | for line in out.split('\n'): | |
1694 | tab = line.find('\t') |
|
1694 | tab = line.find('\t') | |
1695 | if tab == -1: |
|
1695 | if tab == -1: | |
1696 | continue |
|
1696 | continue | |
1697 | status, f = line[tab - 1], line[tab + 1:] |
|
1697 | status, f = line[tab - 1], line[tab + 1:] | |
1698 | if status == 'M': |
|
1698 | if status == 'M': | |
1699 | modified.append(f) |
|
1699 | modified.append(f) | |
1700 | elif status == 'A': |
|
1700 | elif status == 'A': | |
1701 | added.append(f) |
|
1701 | added.append(f) | |
1702 | elif status == 'D': |
|
1702 | elif status == 'D': | |
1703 | removed.append(f) |
|
1703 | removed.append(f) | |
1704 |
|
1704 | |||
1705 | deleted, unknown, ignored, clean = [], [], [], [] |
|
1705 | deleted, unknown, ignored, clean = [], [], [], [] | |
1706 |
|
1706 | |||
1707 | command = ['status', '--porcelain', '-z'] |
|
1707 | command = ['status', '--porcelain', '-z'] | |
1708 | if opts.get(r'unknown'): |
|
1708 | if opts.get(r'unknown'): | |
1709 | command += ['--untracked-files=all'] |
|
1709 | command += ['--untracked-files=all'] | |
1710 | if opts.get(r'ignored'): |
|
1710 | if opts.get(r'ignored'): | |
1711 | command += ['--ignored'] |
|
1711 | command += ['--ignored'] | |
1712 | out = self._gitcommand(command) |
|
1712 | out = self._gitcommand(command) | |
1713 |
|
1713 | |||
1714 | changedfiles = set() |
|
1714 | changedfiles = set() | |
1715 | changedfiles.update(modified) |
|
1715 | changedfiles.update(modified) | |
1716 | changedfiles.update(added) |
|
1716 | changedfiles.update(added) | |
1717 | changedfiles.update(removed) |
|
1717 | changedfiles.update(removed) | |
1718 | for line in out.split('\0'): |
|
1718 | for line in out.split('\0'): | |
1719 | if not line: |
|
1719 | if not line: | |
1720 | continue |
|
1720 | continue | |
1721 | st = line[0:2] |
|
1721 | st = line[0:2] | |
1722 | #moves and copies show 2 files on one line |
|
1722 | #moves and copies show 2 files on one line | |
1723 | if line.find('\0') >= 0: |
|
1723 | if line.find('\0') >= 0: | |
1724 | filename1, filename2 = line[3:].split('\0') |
|
1724 | filename1, filename2 = line[3:].split('\0') | |
1725 | else: |
|
1725 | else: | |
1726 | filename1 = line[3:] |
|
1726 | filename1 = line[3:] | |
1727 | filename2 = None |
|
1727 | filename2 = None | |
1728 |
|
1728 | |||
1729 | changedfiles.add(filename1) |
|
1729 | changedfiles.add(filename1) | |
1730 | if filename2: |
|
1730 | if filename2: | |
1731 | changedfiles.add(filename2) |
|
1731 | changedfiles.add(filename2) | |
1732 |
|
1732 | |||
1733 | if st == '??': |
|
1733 | if st == '??': | |
1734 | unknown.append(filename1) |
|
1734 | unknown.append(filename1) | |
1735 | elif st == '!!': |
|
1735 | elif st == '!!': | |
1736 | ignored.append(filename1) |
|
1736 | ignored.append(filename1) | |
1737 |
|
1737 | |||
1738 | if opts.get(r'clean'): |
|
1738 | if opts.get(r'clean'): | |
1739 | out = self._gitcommand(['ls-files']) |
|
1739 | out = self._gitcommand(['ls-files']) | |
1740 | for f in out.split('\n'): |
|
1740 | for f in out.split('\n'): | |
1741 | if not f in changedfiles: |
|
1741 | if not f in changedfiles: | |
1742 | clean.append(f) |
|
1742 | clean.append(f) | |
1743 |
|
1743 | |||
1744 | return scmutil.status(modified, added, removed, deleted, |
|
1744 | return scmutil.status(modified, added, removed, deleted, | |
1745 | unknown, ignored, clean) |
|
1745 | unknown, ignored, clean) | |
1746 |
|
1746 | |||
1747 | @annotatesubrepoerror |
|
1747 | @annotatesubrepoerror | |
1748 | def diff(self, ui, diffopts, node2, match, prefix, **opts): |
|
1748 | def diff(self, ui, diffopts, node2, match, prefix, **opts): | |
1749 | node1 = self._state[1] |
|
1749 | node1 = self._state[1] | |
1750 | cmd = ['diff', '--no-renames'] |
|
1750 | cmd = ['diff', '--no-renames'] | |
1751 | if opts[r'stat']: |
|
1751 | if opts[r'stat']: | |
1752 | cmd.append('--stat') |
|
1752 | cmd.append('--stat') | |
1753 | else: |
|
1753 | else: | |
1754 | # for Git, this also implies '-p' |
|
1754 | # for Git, this also implies '-p' | |
1755 | cmd.append('-U%d' % diffopts.context) |
|
1755 | cmd.append('-U%d' % diffopts.context) | |
1756 |
|
1756 | |||
1757 | gitprefix = self.wvfs.reljoin(prefix, self._path) |
|
1757 | gitprefix = self.wvfs.reljoin(prefix, self._path) | |
1758 |
|
1758 | |||
1759 | if diffopts.noprefix: |
|
1759 | if diffopts.noprefix: | |
1760 | cmd.extend(['--src-prefix=%s/' % gitprefix, |
|
1760 | cmd.extend(['--src-prefix=%s/' % gitprefix, | |
1761 | '--dst-prefix=%s/' % gitprefix]) |
|
1761 | '--dst-prefix=%s/' % gitprefix]) | |
1762 | else: |
|
1762 | else: | |
1763 | cmd.extend(['--src-prefix=a/%s/' % gitprefix, |
|
1763 | cmd.extend(['--src-prefix=a/%s/' % gitprefix, | |
1764 | '--dst-prefix=b/%s/' % gitprefix]) |
|
1764 | '--dst-prefix=b/%s/' % gitprefix]) | |
1765 |
|
1765 | |||
1766 | if diffopts.ignorews: |
|
1766 | if diffopts.ignorews: | |
1767 | cmd.append('--ignore-all-space') |
|
1767 | cmd.append('--ignore-all-space') | |
1768 | if diffopts.ignorewsamount: |
|
1768 | if diffopts.ignorewsamount: | |
1769 | cmd.append('--ignore-space-change') |
|
1769 | cmd.append('--ignore-space-change') | |
1770 | if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \ |
|
1770 | if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \ | |
1771 | and diffopts.ignoreblanklines: |
|
1771 | and diffopts.ignoreblanklines: | |
1772 | cmd.append('--ignore-blank-lines') |
|
1772 | cmd.append('--ignore-blank-lines') | |
1773 |
|
1773 | |||
1774 | cmd.append(node1) |
|
1774 | cmd.append(node1) | |
1775 | if node2: |
|
1775 | if node2: | |
1776 | cmd.append(node2) |
|
1776 | cmd.append(node2) | |
1777 |
|
1777 | |||
1778 | output = "" |
|
1778 | output = "" | |
1779 | if match.always(): |
|
1779 | if match.always(): | |
1780 | output += self._gitcommand(cmd) + '\n' |
|
1780 | output += self._gitcommand(cmd) + '\n' | |
1781 | else: |
|
1781 | else: | |
1782 | st = self.status(node2)[:3] |
|
1782 | st = self.status(node2)[:3] | |
1783 | files = [f for sublist in st for f in sublist] |
|
1783 | files = [f for sublist in st for f in sublist] | |
1784 | for f in files: |
|
1784 | for f in files: | |
1785 | if match(f): |
|
1785 | if match(f): | |
1786 | output += self._gitcommand(cmd + ['--', f]) + '\n' |
|
1786 | output += self._gitcommand(cmd + ['--', f]) + '\n' | |
1787 |
|
1787 | |||
1788 | if output.strip(): |
|
1788 | if output.strip(): | |
1789 | ui.write(output) |
|
1789 | ui.write(output) | |
1790 |
|
1790 | |||
1791 | @annotatesubrepoerror |
|
1791 | @annotatesubrepoerror | |
1792 | def revert(self, substate, *pats, **opts): |
|
1792 | def revert(self, substate, *pats, **opts): | |
1793 | self.ui.status(_('reverting subrepo %s\n') % substate[0]) |
|
1793 | self.ui.status(_('reverting subrepo %s\n') % substate[0]) | |
1794 | if not opts.get(r'no_backup'): |
|
1794 | if not opts.get(r'no_backup'): | |
1795 | status = self.status(None) |
|
1795 | status = self.status(None) | |
1796 | names = status.modified |
|
1796 | names = status.modified | |
1797 | for name in names: |
|
1797 | for name in names: | |
1798 | bakname = scmutil.origpath(self.ui, self._subparent, name) |
|
1798 | bakname = scmutil.origpath(self.ui, self._subparent, name) | |
1799 | self.ui.note(_('saving current version of %s as %s\n') % |
|
1799 | self.ui.note(_('saving current version of %s as %s\n') % | |
1800 | (name, bakname)) |
|
1800 | (name, bakname)) | |
1801 | self.wvfs.rename(name, bakname) |
|
1801 | self.wvfs.rename(name, bakname) | |
1802 |
|
1802 | |||
1803 | if not opts.get(r'dry_run'): |
|
1803 | if not opts.get(r'dry_run'): | |
1804 | self.get(substate, overwrite=True) |
|
1804 | self.get(substate, overwrite=True) | |
1805 | return [] |
|
1805 | return [] | |
1806 |
|
1806 | |||
1807 | def shortid(self, revid): |
|
1807 | def shortid(self, revid): | |
1808 | return revid[:7] |
|
1808 | return revid[:7] | |
1809 |
|
1809 | |||
1810 | types = { |
|
1810 | types = { | |
1811 | 'hg': hgsubrepo, |
|
1811 | 'hg': hgsubrepo, | |
1812 | 'svn': svnsubrepo, |
|
1812 | 'svn': svnsubrepo, | |
1813 | 'git': gitsubrepo, |
|
1813 | 'git': gitsubrepo, | |
1814 | } |
|
1814 | } |
General Comments 0
You need to be logged in to leave comments.
Login now