Show More
@@ -1,360 +1,362 b'' | |||||
1 | # hg.py - repository classes for mercurial |
|
1 | # hg.py - repository classes for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2, incorporated herein by reference. |
|
7 | # GNU General Public License version 2, incorporated herein by reference. | |
8 |
|
8 | |||
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | from lock import release |
|
10 | from lock import release | |
11 | import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo |
|
11 | import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo | |
12 | import lock, util, extensions, error |
|
12 | import lock, util, extensions, error | |
13 | import merge as _merge |
|
13 | import merge as _merge | |
14 | import verify as _verify |
|
14 | import verify as _verify | |
15 | import errno, os, shutil |
|
15 | import errno, os, shutil | |
16 |
|
16 | |||
17 | def _local(path): |
|
17 | def _local(path): | |
18 | return (os.path.isfile(util.drop_scheme('file', path)) and |
|
18 | return (os.path.isfile(util.drop_scheme('file', path)) and | |
19 | bundlerepo or localrepo) |
|
19 | bundlerepo or localrepo) | |
20 |
|
20 | |||
21 | def parseurl(url, revs=[]): |
|
21 | def parseurl(url, revs=[]): | |
22 | '''parse url#branch, returning url, branch + revs''' |
|
22 | '''parse url#branch, returning url, branch + revs''' | |
23 |
|
23 | |||
24 | if '#' not in url: |
|
24 | if '#' not in url: | |
25 | return url, (revs or None), revs and revs[-1] or None |
|
25 | return url, (revs or None), revs and revs[-1] or None | |
26 |
|
26 | |||
27 | url, branch = url.split('#', 1) |
|
27 | url, branch = url.split('#', 1) | |
28 | checkout = revs and revs[-1] or branch |
|
28 | checkout = revs and revs[-1] or branch | |
29 | return url, (revs or []) + [branch], checkout |
|
29 | return url, (revs or []) + [branch], checkout | |
30 |
|
30 | |||
31 | schemes = { |
|
31 | schemes = { | |
32 | 'bundle': bundlerepo, |
|
32 | 'bundle': bundlerepo, | |
33 | 'file': _local, |
|
33 | 'file': _local, | |
34 | 'http': httprepo, |
|
34 | 'http': httprepo, | |
35 | 'https': httprepo, |
|
35 | 'https': httprepo, | |
36 | 'ssh': sshrepo, |
|
36 | 'ssh': sshrepo, | |
37 | 'static-http': statichttprepo, |
|
37 | 'static-http': statichttprepo, | |
38 | } |
|
38 | } | |
39 |
|
39 | |||
40 | def _lookup(path): |
|
40 | def _lookup(path): | |
41 | scheme = 'file' |
|
41 | scheme = 'file' | |
42 | if path: |
|
42 | if path: | |
43 | c = path.find(':') |
|
43 | c = path.find(':') | |
44 | if c > 0: |
|
44 | if c > 0: | |
45 | scheme = path[:c] |
|
45 | scheme = path[:c] | |
46 | thing = schemes.get(scheme) or schemes['file'] |
|
46 | thing = schemes.get(scheme) or schemes['file'] | |
47 | try: |
|
47 | try: | |
48 | return thing(path) |
|
48 | return thing(path) | |
49 | except TypeError: |
|
49 | except TypeError: | |
50 | return thing |
|
50 | return thing | |
51 |
|
51 | |||
52 | def islocal(repo): |
|
52 | def islocal(repo): | |
53 | '''return true if repo or path is local''' |
|
53 | '''return true if repo or path is local''' | |
54 | if isinstance(repo, str): |
|
54 | if isinstance(repo, str): | |
55 | try: |
|
55 | try: | |
56 | return _lookup(repo).islocal(repo) |
|
56 | return _lookup(repo).islocal(repo) | |
57 | except AttributeError: |
|
57 | except AttributeError: | |
58 | return False |
|
58 | return False | |
59 | return repo.local() |
|
59 | return repo.local() | |
60 |
|
60 | |||
61 | def repository(ui, path='', create=False): |
|
61 | def repository(ui, path='', create=False): | |
62 | """return a repository object for the specified path""" |
|
62 | """return a repository object for the specified path""" | |
63 | repo = _lookup(path).instance(ui, path, create) |
|
63 | repo = _lookup(path).instance(ui, path, create) | |
64 | ui = getattr(repo, "ui", ui) |
|
64 | ui = getattr(repo, "ui", ui) | |
65 | for name, module in extensions.extensions(): |
|
65 | for name, module in extensions.extensions(): | |
66 | hook = getattr(module, 'reposetup', None) |
|
66 | hook = getattr(module, 'reposetup', None) | |
67 | if hook: |
|
67 | if hook: | |
68 | hook(ui, repo) |
|
68 | hook(ui, repo) | |
69 | return repo |
|
69 | return repo | |
70 |
|
70 | |||
71 | def defaultdest(source): |
|
71 | def defaultdest(source): | |
72 | '''return default destination of clone if none is given''' |
|
72 | '''return default destination of clone if none is given''' | |
73 | return os.path.basename(os.path.normpath(source)) |
|
73 | return os.path.basename(os.path.normpath(source)) | |
74 |
|
74 | |||
75 | def localpath(path): |
|
75 | def localpath(path): | |
76 | if path.startswith('file://localhost/'): |
|
76 | if path.startswith('file://localhost/'): | |
77 | return path[16:] |
|
77 | return path[16:] | |
78 | if path.startswith('file://'): |
|
78 | if path.startswith('file://'): | |
79 | return path[7:] |
|
79 | return path[7:] | |
80 | if path.startswith('file:'): |
|
80 | if path.startswith('file:'): | |
81 | return path[5:] |
|
81 | return path[5:] | |
82 | return path |
|
82 | return path | |
83 |
|
83 | |||
84 | def share(ui, source, dest=None, update=True): |
|
84 | def share(ui, source, dest=None, update=True): | |
85 | '''create a shared repository''' |
|
85 | '''create a shared repository''' | |
86 |
|
86 | |||
87 | if not islocal(source): |
|
87 | if not islocal(source): | |
88 | raise util.Abort(_('can only share local repositories')) |
|
88 | raise util.Abort(_('can only share local repositories')) | |
89 |
|
89 | |||
90 | if not dest: |
|
90 | if not dest: | |
91 | dest = os.path.basename(source) |
|
91 | dest = os.path.basename(source) | |
92 |
|
92 | |||
93 | if isinstance(source, str): |
|
93 | if isinstance(source, str): | |
94 | origsource = ui.expandpath(source) |
|
94 | origsource = ui.expandpath(source) | |
95 | source, rev, checkout = parseurl(origsource, '') |
|
95 | source, rev, checkout = parseurl(origsource, '') | |
96 | srcrepo = repository(ui, source) |
|
96 | srcrepo = repository(ui, source) | |
97 | else: |
|
97 | else: | |
98 | srcrepo = source |
|
98 | srcrepo = source | |
99 | origsource = source = srcrepo.url() |
|
99 | origsource = source = srcrepo.url() | |
100 | checkout = None |
|
100 | checkout = None | |
101 |
|
101 | |||
102 | sharedpath = srcrepo.sharedpath # if our source is already sharing |
|
102 | sharedpath = srcrepo.sharedpath # if our source is already sharing | |
103 |
|
103 | |||
104 | root = os.path.realpath(dest) |
|
104 | root = os.path.realpath(dest) | |
105 | roothg = os.path.join(root, '.hg') |
|
105 | roothg = os.path.join(root, '.hg') | |
106 |
|
106 | |||
107 | if os.path.exists(roothg): |
|
107 | if os.path.exists(roothg): | |
108 | raise util.Abort(_('destination already exists')) |
|
108 | raise util.Abort(_('destination already exists')) | |
109 |
|
109 | |||
110 | if not os.path.isdir(root): |
|
110 | if not os.path.isdir(root): | |
111 | os.mkdir(root) |
|
111 | os.mkdir(root) | |
112 | os.mkdir(roothg) |
|
112 | os.mkdir(roothg) | |
113 |
|
113 | |||
114 | requirements = '' |
|
114 | requirements = '' | |
115 | try: |
|
115 | try: | |
116 | requirements = srcrepo.opener('requires').read() |
|
116 | requirements = srcrepo.opener('requires').read() | |
117 | except IOError, inst: |
|
117 | except IOError, inst: | |
118 | if inst.errno != errno.ENOENT: |
|
118 | if inst.errno != errno.ENOENT: | |
119 | raise |
|
119 | raise | |
120 |
|
120 | |||
121 | requirements += 'shared\n' |
|
121 | requirements += 'shared\n' | |
122 | file(os.path.join(roothg, 'requires'), 'w').write(requirements) |
|
122 | file(os.path.join(roothg, 'requires'), 'w').write(requirements) | |
123 | file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath) |
|
123 | file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath) | |
124 |
|
124 | |||
125 | default = srcrepo.ui.config('paths', 'default') |
|
125 | default = srcrepo.ui.config('paths', 'default') | |
126 | if default: |
|
126 | if default: | |
127 | f = file(os.path.join(roothg, 'hgrc'), 'w') |
|
127 | f = file(os.path.join(roothg, 'hgrc'), 'w') | |
128 | f.write('[paths]\ndefault = %s\n' % default) |
|
128 | f.write('[paths]\ndefault = %s\n' % default) | |
129 | f.close() |
|
129 | f.close() | |
130 |
|
130 | |||
131 | r = repository(ui, root) |
|
131 | r = repository(ui, root) | |
132 |
|
132 | |||
133 | if update: |
|
133 | if update: | |
134 | r.ui.status(_("updating working directory\n")) |
|
134 | r.ui.status(_("updating working directory\n")) | |
135 | if update is not True: |
|
135 | if update is not True: | |
136 | checkout = update |
|
136 | checkout = update | |
137 | for test in (checkout, 'default', 'tip'): |
|
137 | for test in (checkout, 'default', 'tip'): | |
138 | try: |
|
138 | try: | |
139 | uprev = r.lookup(test) |
|
139 | uprev = r.lookup(test) | |
140 | break |
|
140 | break | |
141 | except: |
|
141 | except: | |
142 | continue |
|
142 | continue | |
143 | _update(r, uprev) |
|
143 | _update(r, uprev) | |
144 |
|
144 | |||
145 | def clone(ui, source, dest=None, pull=False, rev=None, update=True, |
|
145 | def clone(ui, source, dest=None, pull=False, rev=None, update=True, | |
146 | stream=False): |
|
146 | stream=False): | |
147 | """Make a copy of an existing repository. |
|
147 | """Make a copy of an existing repository. | |
148 |
|
148 | |||
149 | Create a copy of an existing repository in a new directory. The |
|
149 | Create a copy of an existing repository in a new directory. The | |
150 | source and destination are URLs, as passed to the repository |
|
150 | source and destination are URLs, as passed to the repository | |
151 | function. Returns a pair of repository objects, the source and |
|
151 | function. Returns a pair of repository objects, the source and | |
152 | newly created destination. |
|
152 | newly created destination. | |
153 |
|
153 | |||
154 | The location of the source is added to the new repository's |
|
154 | The location of the source is added to the new repository's | |
155 | .hg/hgrc file, as the default to be used for future pulls and |
|
155 | .hg/hgrc file, as the default to be used for future pulls and | |
156 | pushes. |
|
156 | pushes. | |
157 |
|
157 | |||
158 | If an exception is raised, the partly cloned/updated destination |
|
158 | If an exception is raised, the partly cloned/updated destination | |
159 | repository will be deleted. |
|
159 | repository will be deleted. | |
160 |
|
160 | |||
161 | Arguments: |
|
161 | Arguments: | |
162 |
|
162 | |||
163 | source: repository object or URL |
|
163 | source: repository object or URL | |
164 |
|
164 | |||
165 | dest: URL of destination repository to create (defaults to base |
|
165 | dest: URL of destination repository to create (defaults to base | |
166 | name of source repository) |
|
166 | name of source repository) | |
167 |
|
167 | |||
168 | pull: always pull from source repository, even in local case |
|
168 | pull: always pull from source repository, even in local case | |
169 |
|
169 | |||
170 | stream: stream raw data uncompressed from repository (fast over |
|
170 | stream: stream raw data uncompressed from repository (fast over | |
171 | LAN, slow over WAN) |
|
171 | LAN, slow over WAN) | |
172 |
|
172 | |||
173 | rev: revision to clone up to (implies pull=True) |
|
173 | rev: revision to clone up to (implies pull=True) | |
174 |
|
174 | |||
175 | update: update working directory after clone completes, if |
|
175 | update: update working directory after clone completes, if | |
176 | destination is local repository (True means update to default rev, |
|
176 | destination is local repository (True means update to default rev, | |
177 | anything else is treated as a revision) |
|
177 | anything else is treated as a revision) | |
178 | """ |
|
178 | """ | |
179 |
|
179 | |||
180 | if isinstance(source, str): |
|
180 | if isinstance(source, str): | |
181 | origsource = ui.expandpath(source) |
|
181 | origsource = ui.expandpath(source) | |
182 | source, rev, checkout = parseurl(origsource, rev) |
|
182 | source, rev, checkout = parseurl(origsource, rev) | |
183 | src_repo = repository(ui, source) |
|
183 | src_repo = repository(ui, source) | |
184 | else: |
|
184 | else: | |
185 | src_repo = source |
|
185 | src_repo = source | |
186 | origsource = source = src_repo.url() |
|
186 | origsource = source = src_repo.url() | |
187 | checkout = rev and rev[-1] or None |
|
187 | checkout = rev and rev[-1] or None | |
188 |
|
188 | |||
189 | if dest is None: |
|
189 | if dest is None: | |
190 | dest = defaultdest(source) |
|
190 | dest = defaultdest(source) | |
191 | ui.status(_("destination directory: %s\n") % dest) |
|
191 | ui.status(_("destination directory: %s\n") % dest) | |
192 |
|
192 | |||
193 | dest = localpath(dest) |
|
193 | dest = localpath(dest) | |
194 | source = localpath(source) |
|
194 | source = localpath(source) | |
195 |
|
195 | |||
196 | if os.path.exists(dest): |
|
196 | if os.path.exists(dest): | |
197 | if not os.path.isdir(dest): |
|
197 | if not os.path.isdir(dest): | |
198 | raise util.Abort(_("destination '%s' already exists") % dest) |
|
198 | raise util.Abort(_("destination '%s' already exists") % dest) | |
199 | elif os.listdir(dest): |
|
199 | elif os.listdir(dest): | |
200 | raise util.Abort(_("destination '%s' is not empty") % dest) |
|
200 | raise util.Abort(_("destination '%s' is not empty") % dest) | |
201 |
|
201 | |||
202 | class DirCleanup(object): |
|
202 | class DirCleanup(object): | |
203 | def __init__(self, dir_): |
|
203 | def __init__(self, dir_): | |
204 | self.rmtree = shutil.rmtree |
|
204 | self.rmtree = shutil.rmtree | |
205 | self.dir_ = dir_ |
|
205 | self.dir_ = dir_ | |
206 | def close(self): |
|
206 | def close(self): | |
207 | self.dir_ = None |
|
207 | self.dir_ = None | |
208 | def cleanup(self): |
|
208 | def cleanup(self): | |
209 | if self.dir_: |
|
209 | if self.dir_: | |
210 | self.rmtree(self.dir_, True) |
|
210 | self.rmtree(self.dir_, True) | |
211 |
|
211 | |||
212 | src_lock = dest_lock = dir_cleanup = None |
|
212 | src_lock = dest_lock = dir_cleanup = None | |
213 | try: |
|
213 | try: | |
214 | if islocal(dest): |
|
214 | if islocal(dest): | |
215 | dir_cleanup = DirCleanup(dest) |
|
215 | dir_cleanup = DirCleanup(dest) | |
216 |
|
216 | |||
217 | abspath = origsource |
|
217 | abspath = origsource | |
218 | copy = False |
|
218 | copy = False | |
219 | if src_repo.cancopy() and islocal(dest): |
|
219 | if src_repo.cancopy() and islocal(dest): | |
220 | abspath = os.path.abspath(util.drop_scheme('file', origsource)) |
|
220 | abspath = os.path.abspath(util.drop_scheme('file', origsource)) | |
221 | copy = not pull and not rev |
|
221 | copy = not pull and not rev | |
222 |
|
222 | |||
223 | if copy: |
|
223 | if copy: | |
224 | try: |
|
224 | try: | |
225 | # we use a lock here because if we race with commit, we |
|
225 | # we use a lock here because if we race with commit, we | |
226 | # can end up with extra data in the cloned revlogs that's |
|
226 | # can end up with extra data in the cloned revlogs that's | |
227 | # not pointed to by changesets, thus causing verify to |
|
227 | # not pointed to by changesets, thus causing verify to | |
228 | # fail |
|
228 | # fail | |
229 | src_lock = src_repo.lock(wait=False) |
|
229 | src_lock = src_repo.lock(wait=False) | |
230 | except error.LockError: |
|
230 | except error.LockError: | |
231 | copy = False |
|
231 | copy = False | |
232 |
|
232 | |||
233 | if copy: |
|
233 | if copy: | |
234 | hgdir = os.path.realpath(os.path.join(dest, ".hg")) |
|
234 | hgdir = os.path.realpath(os.path.join(dest, ".hg")) | |
235 | if not os.path.exists(dest): |
|
235 | if not os.path.exists(dest): | |
236 | os.mkdir(dest) |
|
236 | os.mkdir(dest) | |
237 | else: |
|
237 | else: | |
238 | # only clean up directories we create ourselves |
|
238 | # only clean up directories we create ourselves | |
239 | dir_cleanup.dir_ = hgdir |
|
239 | dir_cleanup.dir_ = hgdir | |
240 | try: |
|
240 | try: | |
241 | dest_path = hgdir |
|
241 | dest_path = hgdir | |
242 | os.mkdir(dest_path) |
|
242 | os.mkdir(dest_path) | |
243 | except OSError, inst: |
|
243 | except OSError, inst: | |
244 | if inst.errno == errno.EEXIST: |
|
244 | if inst.errno == errno.EEXIST: | |
245 | dir_cleanup.close() |
|
245 | dir_cleanup.close() | |
246 | raise util.Abort(_("destination '%s' already exists") |
|
246 | raise util.Abort(_("destination '%s' already exists") | |
247 | % dest) |
|
247 | % dest) | |
248 | raise |
|
248 | raise | |
249 |
|
249 | |||
250 | for f in src_repo.store.copylist(): |
|
250 | for f in src_repo.store.copylist(): | |
251 | src = os.path.join(src_repo.path, f) |
|
251 | src = os.path.join(src_repo.path, f) | |
252 | dst = os.path.join(dest_path, f) |
|
252 | dst = os.path.join(dest_path, f) | |
253 | dstbase = os.path.dirname(dst) |
|
253 | dstbase = os.path.dirname(dst) | |
254 | if dstbase and not os.path.exists(dstbase): |
|
254 | if dstbase and not os.path.exists(dstbase): | |
255 | os.mkdir(dstbase) |
|
255 | os.mkdir(dstbase) | |
256 | if os.path.exists(src): |
|
256 | if os.path.exists(src): | |
257 | if dst.endswith('data'): |
|
257 | if dst.endswith('data'): | |
258 | # lock to avoid premature writing to the target |
|
258 | # lock to avoid premature writing to the target | |
259 | dest_lock = lock.lock(os.path.join(dstbase, "lock")) |
|
259 | dest_lock = lock.lock(os.path.join(dstbase, "lock")) | |
260 | util.copyfiles(src, dst) |
|
260 | util.copyfiles(src, dst) | |
261 |
|
261 | |||
262 | # we need to re-init the repo after manually copying the data |
|
262 | # we need to re-init the repo after manually copying the data | |
263 | # into it |
|
263 | # into it | |
264 | dest_repo = repository(ui, dest) |
|
264 | dest_repo = repository(ui, dest) | |
265 |
|
265 | |||
266 | else: |
|
266 | else: | |
267 | try: |
|
267 | try: | |
268 | dest_repo = repository(ui, dest, create=True) |
|
268 | dest_repo = repository(ui, dest, create=True) | |
269 | except OSError, inst: |
|
269 | except OSError, inst: | |
270 | if inst.errno == errno.EEXIST: |
|
270 | if inst.errno == errno.EEXIST: | |
271 | dir_cleanup.close() |
|
271 | dir_cleanup.close() | |
272 | raise util.Abort(_("destination '%s' already exists") |
|
272 | raise util.Abort(_("destination '%s' already exists") | |
273 | % dest) |
|
273 | % dest) | |
274 | raise |
|
274 | raise | |
275 |
|
275 | |||
276 | revs = None |
|
276 | revs = None | |
277 | if rev: |
|
277 | if rev: | |
278 | if 'lookup' not in src_repo.capabilities: |
|
278 | if 'lookup' not in src_repo.capabilities: | |
279 | raise util.Abort(_("src repository does not support revision " |
|
279 | raise util.Abort(_("src repository does not support revision " | |
280 | "lookup and so doesn't support clone by " |
|
280 | "lookup and so doesn't support clone by " | |
281 | "revision")) |
|
281 | "revision")) | |
282 | revs = [src_repo.lookup(r) for r in rev] |
|
282 | revs = [src_repo.lookup(r) for r in rev] | |
283 | checkout = revs[0] |
|
283 | checkout = revs[0] | |
284 | if dest_repo.local(): |
|
284 | if dest_repo.local(): | |
285 | dest_repo.clone(src_repo, heads=revs, stream=stream) |
|
285 | dest_repo.clone(src_repo, heads=revs, stream=stream) | |
286 | elif src_repo.local(): |
|
286 | elif src_repo.local(): | |
287 | src_repo.push(dest_repo, revs=revs) |
|
287 | src_repo.push(dest_repo, revs=revs) | |
288 | else: |
|
288 | else: | |
289 | raise util.Abort(_("clone from remote to remote not supported")) |
|
289 | raise util.Abort(_("clone from remote to remote not supported")) | |
290 |
|
290 | |||
291 | if dir_cleanup: |
|
291 | if dir_cleanup: | |
292 | dir_cleanup.close() |
|
292 | dir_cleanup.close() | |
293 |
|
293 | |||
294 | if dest_repo.local(): |
|
294 | if dest_repo.local(): | |
295 | fp = dest_repo.opener("hgrc", "w", text=True) |
|
295 | fp = dest_repo.opener("hgrc", "w", text=True) | |
296 | fp.write("[paths]\n") |
|
296 | fp.write("[paths]\n") | |
297 | fp.write("default = %s\n" % abspath) |
|
297 | fp.write("default = %s\n" % abspath) | |
298 | fp.close() |
|
298 | fp.close() | |
299 |
|
299 | |||
|
300 | dest_repo.ui.setconfig('paths', 'default', abspath) | |||
|
301 | ||||
300 | if update: |
|
302 | if update: | |
301 | dest_repo.ui.status(_("updating working directory\n")) |
|
303 | dest_repo.ui.status(_("updating working directory\n")) | |
302 | if update is not True: |
|
304 | if update is not True: | |
303 | checkout = update |
|
305 | checkout = update | |
304 | for test in (checkout, 'default', 'tip'): |
|
306 | for test in (checkout, 'default', 'tip'): | |
305 | try: |
|
307 | try: | |
306 | uprev = dest_repo.lookup(test) |
|
308 | uprev = dest_repo.lookup(test) | |
307 | break |
|
309 | break | |
308 | except: |
|
310 | except: | |
309 | continue |
|
311 | continue | |
310 | _update(dest_repo, uprev) |
|
312 | _update(dest_repo, uprev) | |
311 |
|
313 | |||
312 | return src_repo, dest_repo |
|
314 | return src_repo, dest_repo | |
313 | finally: |
|
315 | finally: | |
314 | release(src_lock, dest_lock) |
|
316 | release(src_lock, dest_lock) | |
315 | if dir_cleanup is not None: |
|
317 | if dir_cleanup is not None: | |
316 | dir_cleanup.cleanup() |
|
318 | dir_cleanup.cleanup() | |
317 |
|
319 | |||
318 | def _showstats(repo, stats): |
|
320 | def _showstats(repo, stats): | |
319 | stats = ((stats[0], _("updated")), |
|
321 | stats = ((stats[0], _("updated")), | |
320 | (stats[1], _("merged")), |
|
322 | (stats[1], _("merged")), | |
321 | (stats[2], _("removed")), |
|
323 | (stats[2], _("removed")), | |
322 | (stats[3], _("unresolved"))) |
|
324 | (stats[3], _("unresolved"))) | |
323 | note = ", ".join([_("%d files %s") % s for s in stats]) |
|
325 | note = ", ".join([_("%d files %s") % s for s in stats]) | |
324 | repo.ui.status("%s\n" % note) |
|
326 | repo.ui.status("%s\n" % note) | |
325 |
|
327 | |||
326 | def update(repo, node): |
|
328 | def update(repo, node): | |
327 | """update the working directory to node, merging linear changes""" |
|
329 | """update the working directory to node, merging linear changes""" | |
328 | stats = _merge.update(repo, node, False, False, None) |
|
330 | stats = _merge.update(repo, node, False, False, None) | |
329 | _showstats(repo, stats) |
|
331 | _showstats(repo, stats) | |
330 | if stats[3]: |
|
332 | if stats[3]: | |
331 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) |
|
333 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) | |
332 | return stats[3] > 0 |
|
334 | return stats[3] > 0 | |
333 |
|
335 | |||
334 | # naming conflict in clone() |
|
336 | # naming conflict in clone() | |
335 | _update = update |
|
337 | _update = update | |
336 |
|
338 | |||
337 | def clean(repo, node, show_stats=True): |
|
339 | def clean(repo, node, show_stats=True): | |
338 | """forcibly switch the working directory to node, clobbering changes""" |
|
340 | """forcibly switch the working directory to node, clobbering changes""" | |
339 | stats = _merge.update(repo, node, False, True, None) |
|
341 | stats = _merge.update(repo, node, False, True, None) | |
340 | if show_stats: _showstats(repo, stats) |
|
342 | if show_stats: _showstats(repo, stats) | |
341 | return stats[3] > 0 |
|
343 | return stats[3] > 0 | |
342 |
|
344 | |||
343 | def merge(repo, node, force=None, remind=True): |
|
345 | def merge(repo, node, force=None, remind=True): | |
344 | """branch merge with node, resolving changes""" |
|
346 | """branch merge with node, resolving changes""" | |
345 | stats = _merge.update(repo, node, True, force, False) |
|
347 | stats = _merge.update(repo, node, True, force, False) | |
346 | _showstats(repo, stats) |
|
348 | _showstats(repo, stats) | |
347 | if stats[3]: |
|
349 | if stats[3]: | |
348 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " |
|
350 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " | |
349 | "or 'hg up --clean' to abandon\n")) |
|
351 | "or 'hg up --clean' to abandon\n")) | |
350 | elif remind: |
|
352 | elif remind: | |
351 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) |
|
353 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) | |
352 | return stats[3] > 0 |
|
354 | return stats[3] > 0 | |
353 |
|
355 | |||
354 | def revert(repo, node, choose): |
|
356 | def revert(repo, node, choose): | |
355 | """revert changes to revision in node without updating dirstate""" |
|
357 | """revert changes to revision in node without updating dirstate""" | |
356 | return _merge.update(repo, node, False, True, choose)[3] > 0 |
|
358 | return _merge.update(repo, node, False, True, choose)[3] > 0 | |
357 |
|
359 | |||
358 | def verify(repo): |
|
360 | def verify(repo): | |
359 | """verify the consistency of a repository""" |
|
361 | """verify the consistency of a repository""" | |
360 | return _verify.verify(repo) |
|
362 | return _verify.verify(repo) |
@@ -1,469 +1,479 b'' | |||||
1 | # merge.py - directory-level update/merge handling for Mercurial |
|
1 | # merge.py - directory-level update/merge handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2, incorporated herein by reference. |
|
6 | # GNU General Public License version 2, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from node import nullid, nullrev, hex, bin |
|
8 | from node import nullid, nullrev, hex, bin | |
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | import util, filemerge, copies |
|
10 | import util, filemerge, copies, subrepo | |
11 | import errno, os, shutil |
|
11 | import errno, os, shutil | |
12 |
|
12 | |||
13 | class mergestate(object): |
|
13 | class mergestate(object): | |
14 | '''track 3-way merge state of individual files''' |
|
14 | '''track 3-way merge state of individual files''' | |
15 | def __init__(self, repo): |
|
15 | def __init__(self, repo): | |
16 | self._repo = repo |
|
16 | self._repo = repo | |
17 | self._read() |
|
17 | self._read() | |
18 | def reset(self, node=None): |
|
18 | def reset(self, node=None): | |
19 | self._state = {} |
|
19 | self._state = {} | |
20 | if node: |
|
20 | if node: | |
21 | self._local = node |
|
21 | self._local = node | |
22 | shutil.rmtree(self._repo.join("merge"), True) |
|
22 | shutil.rmtree(self._repo.join("merge"), True) | |
23 | def _read(self): |
|
23 | def _read(self): | |
24 | self._state = {} |
|
24 | self._state = {} | |
25 | try: |
|
25 | try: | |
26 | localnode = None |
|
26 | localnode = None | |
27 | f = self._repo.opener("merge/state") |
|
27 | f = self._repo.opener("merge/state") | |
28 | for i, l in enumerate(f): |
|
28 | for i, l in enumerate(f): | |
29 | if i == 0: |
|
29 | if i == 0: | |
30 | localnode = l[:-1] |
|
30 | localnode = l[:-1] | |
31 | else: |
|
31 | else: | |
32 | bits = l[:-1].split("\0") |
|
32 | bits = l[:-1].split("\0") | |
33 | self._state[bits[0]] = bits[1:] |
|
33 | self._state[bits[0]] = bits[1:] | |
34 | self._local = bin(localnode) |
|
34 | self._local = bin(localnode) | |
35 | except IOError, err: |
|
35 | except IOError, err: | |
36 | if err.errno != errno.ENOENT: |
|
36 | if err.errno != errno.ENOENT: | |
37 | raise |
|
37 | raise | |
38 | def _write(self): |
|
38 | def _write(self): | |
39 | f = self._repo.opener("merge/state", "w") |
|
39 | f = self._repo.opener("merge/state", "w") | |
40 | f.write(hex(self._local) + "\n") |
|
40 | f.write(hex(self._local) + "\n") | |
41 | for d, v in self._state.iteritems(): |
|
41 | for d, v in self._state.iteritems(): | |
42 | f.write("\0".join([d] + v) + "\n") |
|
42 | f.write("\0".join([d] + v) + "\n") | |
43 | def add(self, fcl, fco, fca, fd, flags): |
|
43 | def add(self, fcl, fco, fca, fd, flags): | |
44 | hash = util.sha1(fcl.path()).hexdigest() |
|
44 | hash = util.sha1(fcl.path()).hexdigest() | |
45 | self._repo.opener("merge/" + hash, "w").write(fcl.data()) |
|
45 | self._repo.opener("merge/" + hash, "w").write(fcl.data()) | |
46 | self._state[fd] = ['u', hash, fcl.path(), fca.path(), |
|
46 | self._state[fd] = ['u', hash, fcl.path(), fca.path(), | |
47 | hex(fca.filenode()), fco.path(), flags] |
|
47 | hex(fca.filenode()), fco.path(), flags] | |
48 | self._write() |
|
48 | self._write() | |
49 | def __contains__(self, dfile): |
|
49 | def __contains__(self, dfile): | |
50 | return dfile in self._state |
|
50 | return dfile in self._state | |
51 | def __getitem__(self, dfile): |
|
51 | def __getitem__(self, dfile): | |
52 | return self._state[dfile][0] |
|
52 | return self._state[dfile][0] | |
53 | def __iter__(self): |
|
53 | def __iter__(self): | |
54 | l = self._state.keys() |
|
54 | l = self._state.keys() | |
55 | l.sort() |
|
55 | l.sort() | |
56 | for f in l: |
|
56 | for f in l: | |
57 | yield f |
|
57 | yield f | |
58 | def mark(self, dfile, state): |
|
58 | def mark(self, dfile, state): | |
59 | self._state[dfile][0] = state |
|
59 | self._state[dfile][0] = state | |
60 | self._write() |
|
60 | self._write() | |
61 | def resolve(self, dfile, wctx, octx): |
|
61 | def resolve(self, dfile, wctx, octx): | |
62 | if self[dfile] == 'r': |
|
62 | if self[dfile] == 'r': | |
63 | return 0 |
|
63 | return 0 | |
64 | state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] |
|
64 | state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] | |
65 | f = self._repo.opener("merge/" + hash) |
|
65 | f = self._repo.opener("merge/" + hash) | |
66 | self._repo.wwrite(dfile, f.read(), flags) |
|
66 | self._repo.wwrite(dfile, f.read(), flags) | |
67 | fcd = wctx[dfile] |
|
67 | fcd = wctx[dfile] | |
68 | fco = octx[ofile] |
|
68 | fco = octx[ofile] | |
69 | fca = self._repo.filectx(afile, fileid=anode) |
|
69 | fca = self._repo.filectx(afile, fileid=anode) | |
70 | r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) |
|
70 | r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) | |
71 | if not r: |
|
71 | if not r: | |
72 | self.mark(dfile, 'r') |
|
72 | self.mark(dfile, 'r') | |
73 | return r |
|
73 | return r | |
74 |
|
74 | |||
75 | def _checkunknown(wctx, mctx): |
|
75 | def _checkunknown(wctx, mctx): | |
76 | "check for collisions between unknown files and files in mctx" |
|
76 | "check for collisions between unknown files and files in mctx" | |
77 | for f in wctx.unknown(): |
|
77 | for f in wctx.unknown(): | |
78 | if f in mctx and mctx[f].cmp(wctx[f].data()): |
|
78 | if f in mctx and mctx[f].cmp(wctx[f].data()): | |
79 | raise util.Abort(_("untracked file in working directory differs" |
|
79 | raise util.Abort(_("untracked file in working directory differs" | |
80 | " from file in requested revision: '%s'") % f) |
|
80 | " from file in requested revision: '%s'") % f) | |
81 |
|
81 | |||
82 | def _checkcollision(mctx): |
|
82 | def _checkcollision(mctx): | |
83 | "check for case folding collisions in the destination context" |
|
83 | "check for case folding collisions in the destination context" | |
84 | folded = {} |
|
84 | folded = {} | |
85 | for fn in mctx: |
|
85 | for fn in mctx: | |
86 | fold = fn.lower() |
|
86 | fold = fn.lower() | |
87 | if fold in folded: |
|
87 | if fold in folded: | |
88 | raise util.Abort(_("case-folding collision between %s and %s") |
|
88 | raise util.Abort(_("case-folding collision between %s and %s") | |
89 | % (fn, folded[fold])) |
|
89 | % (fn, folded[fold])) | |
90 | folded[fold] = fn |
|
90 | folded[fold] = fn | |
91 |
|
91 | |||
92 | def _forgetremoved(wctx, mctx, branchmerge): |
|
92 | def _forgetremoved(wctx, mctx, branchmerge): | |
93 | """ |
|
93 | """ | |
94 | Forget removed files |
|
94 | Forget removed files | |
95 |
|
95 | |||
96 | If we're jumping between revisions (as opposed to merging), and if |
|
96 | If we're jumping between revisions (as opposed to merging), and if | |
97 | neither the working directory nor the target rev has the file, |
|
97 | neither the working directory nor the target rev has the file, | |
98 | then we need to remove it from the dirstate, to prevent the |
|
98 | then we need to remove it from the dirstate, to prevent the | |
99 | dirstate from listing the file when it is no longer in the |
|
99 | dirstate from listing the file when it is no longer in the | |
100 | manifest. |
|
100 | manifest. | |
101 |
|
101 | |||
102 | If we're merging, and the other revision has removed a file |
|
102 | If we're merging, and the other revision has removed a file | |
103 | that is not present in the working directory, we need to mark it |
|
103 | that is not present in the working directory, we need to mark it | |
104 | as removed. |
|
104 | as removed. | |
105 | """ |
|
105 | """ | |
106 |
|
106 | |||
107 | action = [] |
|
107 | action = [] | |
108 | state = branchmerge and 'r' or 'f' |
|
108 | state = branchmerge and 'r' or 'f' | |
109 | for f in wctx.deleted(): |
|
109 | for f in wctx.deleted(): | |
110 | if f not in mctx: |
|
110 | if f not in mctx: | |
111 | action.append((f, state)) |
|
111 | action.append((f, state)) | |
112 |
|
112 | |||
113 | if not branchmerge: |
|
113 | if not branchmerge: | |
114 | for f in wctx.removed(): |
|
114 | for f in wctx.removed(): | |
115 | if f not in mctx: |
|
115 | if f not in mctx: | |
116 | action.append((f, "f")) |
|
116 | action.append((f, "f")) | |
117 |
|
117 | |||
118 | return action |
|
118 | return action | |
119 |
|
119 | |||
120 | def manifestmerge(repo, p1, p2, pa, overwrite, partial): |
|
120 | def manifestmerge(repo, p1, p2, pa, overwrite, partial): | |
121 | """ |
|
121 | """ | |
122 | Merge p1 and p2 with ancestor ma and generate merge action list |
|
122 | Merge p1 and p2 with ancestor ma and generate merge action list | |
123 |
|
123 | |||
124 | overwrite = whether we clobber working files |
|
124 | overwrite = whether we clobber working files | |
125 | partial = function to filter file lists |
|
125 | partial = function to filter file lists | |
126 | """ |
|
126 | """ | |
127 |
|
127 | |||
128 | def fmerge(f, f2, fa): |
|
128 | def fmerge(f, f2, fa): | |
129 | """merge flags""" |
|
129 | """merge flags""" | |
130 | a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2) |
|
130 | a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2) | |
131 | if m == n: # flags agree |
|
131 | if m == n: # flags agree | |
132 | return m # unchanged |
|
132 | return m # unchanged | |
133 | if m and n and not a: # flags set, don't agree, differ from parent |
|
133 | if m and n and not a: # flags set, don't agree, differ from parent | |
134 | r = repo.ui.prompt( |
|
134 | r = repo.ui.prompt( | |
135 | _(" conflicting flags for %s\n" |
|
135 | _(" conflicting flags for %s\n" | |
136 | "(n)one, e(x)ec or sym(l)ink?") % f, |
|
136 | "(n)one, e(x)ec or sym(l)ink?") % f, | |
137 | (_("&None"), _("E&xec"), _("Sym&link")), _("n")) |
|
137 | (_("&None"), _("E&xec"), _("Sym&link")), _("n")) | |
138 | return r != _("n") and r or '' |
|
138 | return r != _("n") and r or '' | |
139 | if m and m != a: # changed from a to m |
|
139 | if m and m != a: # changed from a to m | |
140 | return m |
|
140 | return m | |
141 | if n and n != a: # changed from a to n |
|
141 | if n and n != a: # changed from a to n | |
142 | return n |
|
142 | return n | |
143 | return '' # flag was cleared |
|
143 | return '' # flag was cleared | |
144 |
|
144 | |||
145 | def act(msg, m, f, *args): |
|
145 | def act(msg, m, f, *args): | |
146 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) |
|
146 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) | |
147 | action.append((f, m) + args) |
|
147 | action.append((f, m) + args) | |
148 |
|
148 | |||
149 | action, copy = [], {} |
|
149 | action, copy = [], {} | |
150 |
|
150 | |||
151 | if overwrite: |
|
151 | if overwrite: | |
152 | pa = p1 |
|
152 | pa = p1 | |
153 | elif pa == p2: # backwards |
|
153 | elif pa == p2: # backwards | |
154 | pa = p1.p1() |
|
154 | pa = p1.p1() | |
155 | elif pa and repo.ui.configbool("merge", "followcopies", True): |
|
155 | elif pa and repo.ui.configbool("merge", "followcopies", True): | |
156 | dirs = repo.ui.configbool("merge", "followdirs", True) |
|
156 | dirs = repo.ui.configbool("merge", "followdirs", True) | |
157 | copy, diverge = copies.copies(repo, p1, p2, pa, dirs) |
|
157 | copy, diverge = copies.copies(repo, p1, p2, pa, dirs) | |
158 | for of, fl in diverge.iteritems(): |
|
158 | for of, fl in diverge.iteritems(): | |
159 | act("divergent renames", "dr", of, fl) |
|
159 | act("divergent renames", "dr", of, fl) | |
160 |
|
160 | |||
161 | repo.ui.note(_("resolving manifests\n")) |
|
161 | repo.ui.note(_("resolving manifests\n")) | |
162 | repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial))) |
|
162 | repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial))) | |
163 | repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2)) |
|
163 | repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2)) | |
164 |
|
164 | |||
165 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
165 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() | |
166 | copied = set(copy.values()) |
|
166 | copied = set(copy.values()) | |
167 |
|
167 | |||
168 | # Compare manifests |
|
168 | # Compare manifests | |
169 | for f, n in m1.iteritems(): |
|
169 | for f, n in m1.iteritems(): | |
170 | if partial and not partial(f): |
|
170 | if partial and not partial(f): | |
171 | continue |
|
171 | continue | |
172 | if f in m2: |
|
172 | if f in m2: | |
173 | rflags = fmerge(f, f, f) |
|
173 | rflags = fmerge(f, f, f) | |
174 | a = ma.get(f, nullid) |
|
174 | a = ma.get(f, nullid) | |
175 | if n == m2[f] or m2[f] == a: # same or local newer |
|
175 | if n == m2[f] or m2[f] == a: # same or local newer | |
176 | if m1.flags(f) != rflags: |
|
176 | if m1.flags(f) != rflags: | |
177 | act("update permissions", "e", f, rflags) |
|
177 | act("update permissions", "e", f, rflags) | |
178 | elif n == a: # remote newer |
|
178 | elif n == a: # remote newer | |
179 | act("remote is newer", "g", f, rflags) |
|
179 | act("remote is newer", "g", f, rflags) | |
180 | else: # both changed |
|
180 | else: # both changed | |
181 | act("versions differ", "m", f, f, f, rflags, False) |
|
181 | act("versions differ", "m", f, f, f, rflags, False) | |
182 | elif f in copied: # files we'll deal with on m2 side |
|
182 | elif f in copied: # files we'll deal with on m2 side | |
183 | pass |
|
183 | pass | |
184 | elif f in copy: |
|
184 | elif f in copy: | |
185 | f2 = copy[f] |
|
185 | f2 = copy[f] | |
186 | if f2 not in m2: # directory rename |
|
186 | if f2 not in m2: # directory rename | |
187 | act("remote renamed directory to " + f2, "d", |
|
187 | act("remote renamed directory to " + f2, "d", | |
188 | f, None, f2, m1.flags(f)) |
|
188 | f, None, f2, m1.flags(f)) | |
189 | else: # case 2 A,B/B/B or case 4,21 A/B/B |
|
189 | else: # case 2 A,B/B/B or case 4,21 A/B/B | |
190 | act("local copied/moved to " + f2, "m", |
|
190 | act("local copied/moved to " + f2, "m", | |
191 | f, f2, f, fmerge(f, f2, f2), False) |
|
191 | f, f2, f, fmerge(f, f2, f2), False) | |
192 | elif f in ma: # clean, a different, no remote |
|
192 | elif f in ma: # clean, a different, no remote | |
193 | if n != ma[f]: |
|
193 | if n != ma[f]: | |
194 | if repo.ui.prompt( |
|
194 | if repo.ui.prompt( | |
195 | _(" local changed %s which remote deleted\n" |
|
195 | _(" local changed %s which remote deleted\n" | |
196 | "use (c)hanged version or (d)elete?") % f, |
|
196 | "use (c)hanged version or (d)elete?") % f, | |
197 | (_("&Changed"), _("&Delete")), _("c")) == _("d"): |
|
197 | (_("&Changed"), _("&Delete")), _("c")) == _("d"): | |
198 | act("prompt delete", "r", f) |
|
198 | act("prompt delete", "r", f) | |
199 | else: |
|
199 | else: | |
200 | act("prompt keep", "a", f) |
|
200 | act("prompt keep", "a", f) | |
201 | elif n[20:] == "a": # added, no remote |
|
201 | elif n[20:] == "a": # added, no remote | |
202 | act("remote deleted", "f", f) |
|
202 | act("remote deleted", "f", f) | |
203 | elif n[20:] != "u": |
|
203 | elif n[20:] != "u": | |
204 | act("other deleted", "r", f) |
|
204 | act("other deleted", "r", f) | |
205 |
|
205 | |||
206 | for f, n in m2.iteritems(): |
|
206 | for f, n in m2.iteritems(): | |
207 | if partial and not partial(f): |
|
207 | if partial and not partial(f): | |
208 | continue |
|
208 | continue | |
209 | if f in m1 or f in copied: # files already visited |
|
209 | if f in m1 or f in copied: # files already visited | |
210 | continue |
|
210 | continue | |
211 | if f in copy: |
|
211 | if f in copy: | |
212 | f2 = copy[f] |
|
212 | f2 = copy[f] | |
213 | if f2 not in m1: # directory rename |
|
213 | if f2 not in m1: # directory rename | |
214 | act("local renamed directory to " + f2, "d", |
|
214 | act("local renamed directory to " + f2, "d", | |
215 | None, f, f2, m2.flags(f)) |
|
215 | None, f, f2, m2.flags(f)) | |
216 | elif f2 in m2: # rename case 1, A/A,B/A |
|
216 | elif f2 in m2: # rename case 1, A/A,B/A | |
217 | act("remote copied to " + f, "m", |
|
217 | act("remote copied to " + f, "m", | |
218 | f2, f, f, fmerge(f2, f, f2), False) |
|
218 | f2, f, f, fmerge(f2, f, f2), False) | |
219 | else: # case 3,20 A/B/A |
|
219 | else: # case 3,20 A/B/A | |
220 | act("remote moved to " + f, "m", |
|
220 | act("remote moved to " + f, "m", | |
221 | f2, f, f, fmerge(f2, f, f2), True) |
|
221 | f2, f, f, fmerge(f2, f, f2), True) | |
222 | elif f not in ma: |
|
222 | elif f not in ma: | |
223 | act("remote created", "g", f, m2.flags(f)) |
|
223 | act("remote created", "g", f, m2.flags(f)) | |
224 | elif n != ma[f]: |
|
224 | elif n != ma[f]: | |
225 | if repo.ui.prompt( |
|
225 | if repo.ui.prompt( | |
226 | _("remote changed %s which local deleted\n" |
|
226 | _("remote changed %s which local deleted\n" | |
227 | "use (c)hanged version or leave (d)eleted?") % f, |
|
227 | "use (c)hanged version or leave (d)eleted?") % f, | |
228 | (_("&Changed"), _("&Deleted")), _("c")) == _("c"): |
|
228 | (_("&Changed"), _("&Deleted")), _("c")) == _("c"): | |
229 | act("prompt recreating", "g", f, m2.flags(f)) |
|
229 | act("prompt recreating", "g", f, m2.flags(f)) | |
230 |
|
230 | |||
231 | return action |
|
231 | return action | |
232 |
|
232 | |||
233 | def actionkey(a): |
|
233 | def actionkey(a): | |
234 | return a[1] == 'r' and -1 or 0, a |
|
234 | return a[1] == 'r' and -1 or 0, a | |
235 |
|
235 | |||
236 | def applyupdates(repo, action, wctx, mctx): |
|
236 | def applyupdates(repo, action, wctx, mctx): | |
237 | "apply the merge action list to the working directory" |
|
237 | "apply the merge action list to the working directory" | |
238 |
|
238 | |||
239 | updated, merged, removed, unresolved = 0, 0, 0, 0 |
|
239 | updated, merged, removed, unresolved = 0, 0, 0, 0 | |
240 | ms = mergestate(repo) |
|
240 | ms = mergestate(repo) | |
241 | ms.reset(wctx.parents()[0].node()) |
|
241 | ms.reset(wctx.parents()[0].node()) | |
242 | moves = [] |
|
242 | moves = [] | |
243 | action.sort(key=actionkey) |
|
243 | action.sort(key=actionkey) | |
|
244 | substate = wctx.substate # prime | |||
244 |
|
245 | |||
245 | # prescan for merges |
|
246 | # prescan for merges | |
246 | for a in action: |
|
247 | for a in action: | |
247 | f, m = a[:2] |
|
248 | f, m = a[:2] | |
248 | if m == 'm': # merge |
|
249 | if m == 'm': # merge | |
249 | f2, fd, flags, move = a[2:] |
|
250 | f2, fd, flags, move = a[2:] | |
|
251 | if f == '.hgsubstate': # merged internally | |||
|
252 | continue | |||
250 | repo.ui.debug(_("preserving %s for resolve of %s\n") % (f, fd)) |
|
253 | repo.ui.debug(_("preserving %s for resolve of %s\n") % (f, fd)) | |
251 | fcl = wctx[f] |
|
254 | fcl = wctx[f] | |
252 | fco = mctx[f2] |
|
255 | fco = mctx[f2] | |
253 | fca = fcl.ancestor(fco) or repo.filectx(f, fileid=nullrev) |
|
256 | fca = fcl.ancestor(fco) or repo.filectx(f, fileid=nullrev) | |
254 | ms.add(fcl, fco, fca, fd, flags) |
|
257 | ms.add(fcl, fco, fca, fd, flags) | |
255 | if f != fd and move: |
|
258 | if f != fd and move: | |
256 | moves.append(f) |
|
259 | moves.append(f) | |
257 |
|
260 | |||
258 | # remove renamed files after safely stored |
|
261 | # remove renamed files after safely stored | |
259 | for f in moves: |
|
262 | for f in moves: | |
260 | if util.lexists(repo.wjoin(f)): |
|
263 | if util.lexists(repo.wjoin(f)): | |
261 | repo.ui.debug(_("removing %s\n") % f) |
|
264 | repo.ui.debug(_("removing %s\n") % f) | |
262 | os.unlink(repo.wjoin(f)) |
|
265 | os.unlink(repo.wjoin(f)) | |
263 |
|
266 | |||
264 | audit_path = util.path_auditor(repo.root) |
|
267 | audit_path = util.path_auditor(repo.root) | |
265 |
|
268 | |||
266 | for a in action: |
|
269 | for a in action: | |
267 | f, m = a[:2] |
|
270 | f, m = a[:2] | |
268 | if f and f[0] == "/": |
|
271 | if f and f[0] == "/": | |
269 | continue |
|
272 | continue | |
270 | if m == "r": # remove |
|
273 | if m == "r": # remove | |
271 | repo.ui.note(_("removing %s\n") % f) |
|
274 | repo.ui.note(_("removing %s\n") % f) | |
272 | audit_path(f) |
|
275 | audit_path(f) | |
|
276 | if f == '.hgsubstate': # subrepo states need updating | |||
|
277 | subrepo.submerge(repo, wctx, mctx, wctx) | |||
273 | try: |
|
278 | try: | |
274 | util.unlink(repo.wjoin(f)) |
|
279 | util.unlink(repo.wjoin(f)) | |
275 | except OSError, inst: |
|
280 | except OSError, inst: | |
276 | if inst.errno != errno.ENOENT: |
|
281 | if inst.errno != errno.ENOENT: | |
277 | repo.ui.warn(_("update failed to remove %s: %s!\n") % |
|
282 | repo.ui.warn(_("update failed to remove %s: %s!\n") % | |
278 | (f, inst.strerror)) |
|
283 | (f, inst.strerror)) | |
279 | removed += 1 |
|
284 | removed += 1 | |
280 | elif m == "m": # merge |
|
285 | elif m == "m": # merge | |
|
286 | if f == '.hgsubstate': # subrepo states need updating | |||
|
287 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx)) | |||
|
288 | continue | |||
281 | f2, fd, flags, move = a[2:] |
|
289 | f2, fd, flags, move = a[2:] | |
282 | r = ms.resolve(fd, wctx, mctx) |
|
290 | r = ms.resolve(fd, wctx, mctx) | |
283 | if r > 0: |
|
291 | if r > 0: | |
284 | unresolved += 1 |
|
292 | unresolved += 1 | |
285 | else: |
|
293 | else: | |
286 | if r is None: |
|
294 | if r is None: | |
287 | updated += 1 |
|
295 | updated += 1 | |
288 | else: |
|
296 | else: | |
289 | merged += 1 |
|
297 | merged += 1 | |
290 | util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags) |
|
298 | util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags) | |
291 | if f != fd and move and util.lexists(repo.wjoin(f)): |
|
299 | if f != fd and move and util.lexists(repo.wjoin(f)): | |
292 | repo.ui.debug(_("removing %s\n") % f) |
|
300 | repo.ui.debug(_("removing %s\n") % f) | |
293 | os.unlink(repo.wjoin(f)) |
|
301 | os.unlink(repo.wjoin(f)) | |
294 | elif m == "g": # get |
|
302 | elif m == "g": # get | |
295 | flags = a[2] |
|
303 | flags = a[2] | |
296 | repo.ui.note(_("getting %s\n") % f) |
|
304 | repo.ui.note(_("getting %s\n") % f) | |
297 | t = mctx.filectx(f).data() |
|
305 | t = mctx.filectx(f).data() | |
298 | repo.wwrite(f, t, flags) |
|
306 | repo.wwrite(f, t, flags) | |
299 | updated += 1 |
|
307 | updated += 1 | |
|
308 | if f == '.hgsubstate': # subrepo states need updating | |||
|
309 | subrepo.submerge(repo, wctx, mctx, wctx) | |||
300 | elif m == "d": # directory rename |
|
310 | elif m == "d": # directory rename | |
301 | f2, fd, flags = a[2:] |
|
311 | f2, fd, flags = a[2:] | |
302 | if f: |
|
312 | if f: | |
303 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) |
|
313 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) | |
304 | t = wctx.filectx(f).data() |
|
314 | t = wctx.filectx(f).data() | |
305 | repo.wwrite(fd, t, flags) |
|
315 | repo.wwrite(fd, t, flags) | |
306 | util.unlink(repo.wjoin(f)) |
|
316 | util.unlink(repo.wjoin(f)) | |
307 | if f2: |
|
317 | if f2: | |
308 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) |
|
318 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) | |
309 | t = mctx.filectx(f2).data() |
|
319 | t = mctx.filectx(f2).data() | |
310 | repo.wwrite(fd, t, flags) |
|
320 | repo.wwrite(fd, t, flags) | |
311 | updated += 1 |
|
321 | updated += 1 | |
312 | elif m == "dr": # divergent renames |
|
322 | elif m == "dr": # divergent renames | |
313 | fl = a[2] |
|
323 | fl = a[2] | |
314 | repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f) |
|
324 | repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f) | |
315 | for nf in fl: |
|
325 | for nf in fl: | |
316 | repo.ui.warn(" %s\n" % nf) |
|
326 | repo.ui.warn(" %s\n" % nf) | |
317 | elif m == "e": # exec |
|
327 | elif m == "e": # exec | |
318 | flags = a[2] |
|
328 | flags = a[2] | |
319 | util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags) |
|
329 | util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags) | |
320 |
|
330 | |||
321 | return updated, merged, removed, unresolved |
|
331 | return updated, merged, removed, unresolved | |
322 |
|
332 | |||
323 | def recordupdates(repo, action, branchmerge): |
|
333 | def recordupdates(repo, action, branchmerge): | |
324 | "record merge actions to the dirstate" |
|
334 | "record merge actions to the dirstate" | |
325 |
|
335 | |||
326 | for a in action: |
|
336 | for a in action: | |
327 | f, m = a[:2] |
|
337 | f, m = a[:2] | |
328 | if m == "r": # remove |
|
338 | if m == "r": # remove | |
329 | if branchmerge: |
|
339 | if branchmerge: | |
330 | repo.dirstate.remove(f) |
|
340 | repo.dirstate.remove(f) | |
331 | else: |
|
341 | else: | |
332 | repo.dirstate.forget(f) |
|
342 | repo.dirstate.forget(f) | |
333 | elif m == "a": # re-add |
|
343 | elif m == "a": # re-add | |
334 | if not branchmerge: |
|
344 | if not branchmerge: | |
335 | repo.dirstate.add(f) |
|
345 | repo.dirstate.add(f) | |
336 | elif m == "f": # forget |
|
346 | elif m == "f": # forget | |
337 | repo.dirstate.forget(f) |
|
347 | repo.dirstate.forget(f) | |
338 | elif m == "e": # exec change |
|
348 | elif m == "e": # exec change | |
339 | repo.dirstate.normallookup(f) |
|
349 | repo.dirstate.normallookup(f) | |
340 | elif m == "g": # get |
|
350 | elif m == "g": # get | |
341 | if branchmerge: |
|
351 | if branchmerge: | |
342 | repo.dirstate.normaldirty(f) |
|
352 | repo.dirstate.normaldirty(f) | |
343 | else: |
|
353 | else: | |
344 | repo.dirstate.normal(f) |
|
354 | repo.dirstate.normal(f) | |
345 | elif m == "m": # merge |
|
355 | elif m == "m": # merge | |
346 | f2, fd, flag, move = a[2:] |
|
356 | f2, fd, flag, move = a[2:] | |
347 | if branchmerge: |
|
357 | if branchmerge: | |
348 | # We've done a branch merge, mark this file as merged |
|
358 | # We've done a branch merge, mark this file as merged | |
349 | # so that we properly record the merger later |
|
359 | # so that we properly record the merger later | |
350 | repo.dirstate.merge(fd) |
|
360 | repo.dirstate.merge(fd) | |
351 | if f != f2: # copy/rename |
|
361 | if f != f2: # copy/rename | |
352 | if move: |
|
362 | if move: | |
353 | repo.dirstate.remove(f) |
|
363 | repo.dirstate.remove(f) | |
354 | if f != fd: |
|
364 | if f != fd: | |
355 | repo.dirstate.copy(f, fd) |
|
365 | repo.dirstate.copy(f, fd) | |
356 | else: |
|
366 | else: | |
357 | repo.dirstate.copy(f2, fd) |
|
367 | repo.dirstate.copy(f2, fd) | |
358 | else: |
|
368 | else: | |
359 | # We've update-merged a locally modified file, so |
|
369 | # We've update-merged a locally modified file, so | |
360 | # we set the dirstate to emulate a normal checkout |
|
370 | # we set the dirstate to emulate a normal checkout | |
361 | # of that file some time in the past. Thus our |
|
371 | # of that file some time in the past. Thus our | |
362 | # merge will appear as a normal local file |
|
372 | # merge will appear as a normal local file | |
363 | # modification. |
|
373 | # modification. | |
364 | repo.dirstate.normallookup(fd) |
|
374 | repo.dirstate.normallookup(fd) | |
365 | if move: |
|
375 | if move: | |
366 | repo.dirstate.forget(f) |
|
376 | repo.dirstate.forget(f) | |
367 | elif m == "d": # directory rename |
|
377 | elif m == "d": # directory rename | |
368 | f2, fd, flag = a[2:] |
|
378 | f2, fd, flag = a[2:] | |
369 | if not f2 and f not in repo.dirstate: |
|
379 | if not f2 and f not in repo.dirstate: | |
370 | # untracked file moved |
|
380 | # untracked file moved | |
371 | continue |
|
381 | continue | |
372 | if branchmerge: |
|
382 | if branchmerge: | |
373 | repo.dirstate.add(fd) |
|
383 | repo.dirstate.add(fd) | |
374 | if f: |
|
384 | if f: | |
375 | repo.dirstate.remove(f) |
|
385 | repo.dirstate.remove(f) | |
376 | repo.dirstate.copy(f, fd) |
|
386 | repo.dirstate.copy(f, fd) | |
377 | if f2: |
|
387 | if f2: | |
378 | repo.dirstate.copy(f2, fd) |
|
388 | repo.dirstate.copy(f2, fd) | |
379 | else: |
|
389 | else: | |
380 | repo.dirstate.normal(fd) |
|
390 | repo.dirstate.normal(fd) | |
381 | if f: |
|
391 | if f: | |
382 | repo.dirstate.forget(f) |
|
392 | repo.dirstate.forget(f) | |
383 |
|
393 | |||
384 | def update(repo, node, branchmerge, force, partial): |
|
394 | def update(repo, node, branchmerge, force, partial): | |
385 | """ |
|
395 | """ | |
386 | Perform a merge between the working directory and the given node |
|
396 | Perform a merge between the working directory and the given node | |
387 |
|
397 | |||
388 | branchmerge = whether to merge between branches |
|
398 | branchmerge = whether to merge between branches | |
389 | force = whether to force branch merging or file overwriting |
|
399 | force = whether to force branch merging or file overwriting | |
390 | partial = a function to filter file lists (dirstate not updated) |
|
400 | partial = a function to filter file lists (dirstate not updated) | |
391 | """ |
|
401 | """ | |
392 |
|
402 | |||
393 | wlock = repo.wlock() |
|
403 | wlock = repo.wlock() | |
394 | try: |
|
404 | try: | |
395 | wc = repo[None] |
|
405 | wc = repo[None] | |
396 | if node is None: |
|
406 | if node is None: | |
397 | # tip of current branch |
|
407 | # tip of current branch | |
398 | try: |
|
408 | try: | |
399 | node = repo.branchtags()[wc.branch()] |
|
409 | node = repo.branchtags()[wc.branch()] | |
400 | except KeyError: |
|
410 | except KeyError: | |
401 | if wc.branch() == "default": # no default branch! |
|
411 | if wc.branch() == "default": # no default branch! | |
402 | node = repo.lookup("tip") # update to tip |
|
412 | node = repo.lookup("tip") # update to tip | |
403 | else: |
|
413 | else: | |
404 | raise util.Abort(_("branch %s not found") % wc.branch()) |
|
414 | raise util.Abort(_("branch %s not found") % wc.branch()) | |
405 | overwrite = force and not branchmerge |
|
415 | overwrite = force and not branchmerge | |
406 | pl = wc.parents() |
|
416 | pl = wc.parents() | |
407 | p1, p2 = pl[0], repo[node] |
|
417 | p1, p2 = pl[0], repo[node] | |
408 | pa = p1.ancestor(p2) |
|
418 | pa = p1.ancestor(p2) | |
409 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) |
|
419 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) | |
410 | fastforward = False |
|
420 | fastforward = False | |
411 |
|
421 | |||
412 | ### check phase |
|
422 | ### check phase | |
413 | if not overwrite and len(pl) > 1: |
|
423 | if not overwrite and len(pl) > 1: | |
414 | raise util.Abort(_("outstanding uncommitted merges")) |
|
424 | raise util.Abort(_("outstanding uncommitted merges")) | |
415 | if branchmerge: |
|
425 | if branchmerge: | |
416 | if pa == p2: |
|
426 | if pa == p2: | |
417 | raise util.Abort(_("can't merge with ancestor")) |
|
427 | raise util.Abort(_("can't merge with ancestor")) | |
418 | elif pa == p1: |
|
428 | elif pa == p1: | |
419 | if p1.branch() != p2.branch(): |
|
429 | if p1.branch() != p2.branch(): | |
420 | fastforward = True |
|
430 | fastforward = True | |
421 | else: |
|
431 | else: | |
422 | raise util.Abort(_("nothing to merge (use 'hg update'" |
|
432 | raise util.Abort(_("nothing to merge (use 'hg update'" | |
423 | " or check 'hg heads')")) |
|
433 | " or check 'hg heads')")) | |
424 | if not force and (wc.files() or wc.deleted()): |
|
434 | if not force and (wc.files() or wc.deleted()): | |
425 | raise util.Abort(_("outstanding uncommitted changes " |
|
435 | raise util.Abort(_("outstanding uncommitted changes " | |
426 | "(use 'hg status' to list changes)")) |
|
436 | "(use 'hg status' to list changes)")) | |
427 | elif not overwrite: |
|
437 | elif not overwrite: | |
428 | if pa == p1 or pa == p2: # linear |
|
438 | if pa == p1 or pa == p2: # linear | |
429 | pass # all good |
|
439 | pass # all good | |
430 | elif p1.branch() == p2.branch(): |
|
440 | elif p1.branch() == p2.branch(): | |
431 | if wc.files() or wc.deleted(): |
|
441 | if wc.files() or wc.deleted(): | |
432 | raise util.Abort(_("crosses branches (use 'hg merge' or " |
|
442 | raise util.Abort(_("crosses branches (use 'hg merge' or " | |
433 | "'hg update -C' to discard changes)")) |
|
443 | "'hg update -C' to discard changes)")) | |
434 | raise util.Abort(_("crosses branches (use 'hg merge' " |
|
444 | raise util.Abort(_("crosses branches (use 'hg merge' " | |
435 | "or 'hg update -C')")) |
|
445 | "or 'hg update -C')")) | |
436 | elif wc.files() or wc.deleted(): |
|
446 | elif wc.files() or wc.deleted(): | |
437 | raise util.Abort(_("crosses named branches (use " |
|
447 | raise util.Abort(_("crosses named branches (use " | |
438 | "'hg update -C' to discard changes)")) |
|
448 | "'hg update -C' to discard changes)")) | |
439 | else: |
|
449 | else: | |
440 | # Allow jumping branches if there are no changes |
|
450 | # Allow jumping branches if there are no changes | |
441 | overwrite = True |
|
451 | overwrite = True | |
442 |
|
452 | |||
443 | ### calculate phase |
|
453 | ### calculate phase | |
444 | action = [] |
|
454 | action = [] | |
445 | if not force: |
|
455 | if not force: | |
446 | _checkunknown(wc, p2) |
|
456 | _checkunknown(wc, p2) | |
447 | if not util.checkcase(repo.path): |
|
457 | if not util.checkcase(repo.path): | |
448 | _checkcollision(p2) |
|
458 | _checkcollision(p2) | |
449 | action += _forgetremoved(wc, p2, branchmerge) |
|
459 | action += _forgetremoved(wc, p2, branchmerge) | |
450 | action += manifestmerge(repo, wc, p2, pa, overwrite, partial) |
|
460 | action += manifestmerge(repo, wc, p2, pa, overwrite, partial) | |
451 |
|
461 | |||
452 | ### apply phase |
|
462 | ### apply phase | |
453 | if not branchmerge: # just jump to the new rev |
|
463 | if not branchmerge: # just jump to the new rev | |
454 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' |
|
464 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' | |
455 | if not partial: |
|
465 | if not partial: | |
456 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
466 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) | |
457 |
|
467 | |||
458 | stats = applyupdates(repo, action, wc, p2) |
|
468 | stats = applyupdates(repo, action, wc, p2) | |
459 |
|
469 | |||
460 | if not partial: |
|
470 | if not partial: | |
461 | recordupdates(repo, action, branchmerge) |
|
471 | recordupdates(repo, action, branchmerge) | |
462 | repo.dirstate.setparents(fp1, fp2) |
|
472 | repo.dirstate.setparents(fp1, fp2) | |
463 | if not branchmerge and not fastforward: |
|
473 | if not branchmerge and not fastforward: | |
464 | repo.dirstate.setbranch(p2.branch()) |
|
474 | repo.dirstate.setbranch(p2.branch()) | |
465 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) |
|
475 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) | |
466 |
|
476 | |||
467 | return stats |
|
477 | return stats | |
468 | finally: |
|
478 | finally: | |
469 | wlock.release() |
|
479 | wlock.release() |
@@ -1,82 +1,178 b'' | |||||
1 | # subrepo.py - sub-repository handling for Mercurial |
|
1 | # subrepo.py - sub-repository handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2, incorporated herein by reference. |
|
6 | # GNU General Public License version 2, incorporated herein by reference. | |
7 |
|
7 | |||
8 | import errno, os |
|
8 | import errno, os | |
|
9 | from i18n import _ | |||
9 | import config, util, node, error |
|
10 | import config, util, node, error | |
10 | localrepo = None |
|
11 | localrepo = hg = None | |
11 |
|
12 | |||
12 | nullstate = ('', '') |
|
13 | nullstate = ('', '') | |
13 |
|
14 | |||
14 | def state(ctx): |
|
15 | def state(ctx): | |
15 | p = config.config() |
|
16 | p = config.config() | |
16 | def read(f, sections=None, remap=None): |
|
17 | def read(f, sections=None, remap=None): | |
17 | if f in ctx: |
|
18 | if f in ctx: | |
18 | try: |
|
19 | try: | |
19 | p.parse(f, ctx[f].data(), sections, remap) |
|
20 | p.parse(f, ctx[f].data(), sections, remap) | |
20 | except IOError, err: |
|
21 | except IOError, err: | |
21 | if err.errno != errno.ENOENT: |
|
22 | if err.errno != errno.ENOENT: | |
22 | raise |
|
23 | raise | |
23 | read('.hgsub') |
|
24 | read('.hgsub') | |
24 |
|
25 | |||
25 | rev = {} |
|
26 | rev = {} | |
26 | if '.hgsubstate' in ctx: |
|
27 | if '.hgsubstate' in ctx: | |
27 | try: |
|
28 | try: | |
28 | for l in ctx['.hgsubstate'].data().splitlines(): |
|
29 | for l in ctx['.hgsubstate'].data().splitlines(): | |
29 | revision, path = l.split() |
|
30 | revision, path = l.split() | |
30 | rev[path] = revision |
|
31 | rev[path] = revision | |
31 | except IOError, err: |
|
32 | except IOError, err: | |
32 | if err.errno != errno.ENOENT: |
|
33 | if err.errno != errno.ENOENT: | |
33 | raise |
|
34 | raise | |
34 |
|
35 | |||
35 | state = {} |
|
36 | state = {} | |
36 | for path, src in p[''].items(): |
|
37 | for path, src in p[''].items(): | |
37 | state[path] = (src, rev.get(path, '')) |
|
38 | state[path] = (src, rev.get(path, '')) | |
38 |
|
39 | |||
39 | return state |
|
40 | return state | |
40 |
|
41 | |||
41 | def writestate(repo, state): |
|
42 | def writestate(repo, state): | |
42 | repo.wwrite('.hgsubstate', |
|
43 | repo.wwrite('.hgsubstate', | |
43 | ''.join(['%s %s\n' % (state[s][1], s) |
|
44 | ''.join(['%s %s\n' % (state[s][1], s) | |
44 | for s in sorted(state)]), '') |
|
45 | for s in sorted(state)]), '') | |
45 |
|
46 | |||
|
47 | def submerge(repo, wctx, mctx, actx): | |||
|
48 | if mctx == actx: # backwards? | |||
|
49 | actx = wctx.p1() | |||
|
50 | s1 = wctx.substate | |||
|
51 | s2 = mctx.substate | |||
|
52 | sa = actx.substate | |||
|
53 | sm = {} | |||
|
54 | ||||
|
55 | for s, l in s1.items(): | |||
|
56 | a = sa.get(s, nullstate) | |||
|
57 | if s in s2: | |||
|
58 | r = s2[s] | |||
|
59 | if l == r or r == a: # no change or local is newer | |||
|
60 | sm[s] = l | |||
|
61 | continue | |||
|
62 | elif l == a: # other side changed | |||
|
63 | wctx.sub(s).get(r) | |||
|
64 | sm[s] = r | |||
|
65 | elif l[0] != r[0]: # sources differ | |||
|
66 | if repo.ui.prompt( | |||
|
67 | _(' subrepository sources for %s differ\n' | |||
|
68 | 'use (l)ocal source (%s) or (r)emote source (%s)?' | |||
|
69 | % (s, l[0], r[0]), | |||
|
70 | (_('&Local'), _('&Remote')), _('l'))) == _('r'): | |||
|
71 | wctx.sub(s).get(r) | |||
|
72 | sm[s] = r | |||
|
73 | elif l[1] == a[1]: # local side is unchanged | |||
|
74 | wctx.sub(s).get(r) | |||
|
75 | sm[s] = r | |||
|
76 | else: | |||
|
77 | wctx.sub(s).merge(r) | |||
|
78 | sm[s] = l | |||
|
79 | elif l == a: # remote removed, local unchanged | |||
|
80 | wctx.sub(s).remove() | |||
|
81 | else: | |||
|
82 | if repo.ui.prompt( | |||
|
83 | _(' local changed subrepository %s which remote removed\n' | |||
|
84 | 'use (c)hanged version or (d)elete?' % s, | |||
|
85 | (_('&Changed'), _('&Delete')), _('c'))) == _('d'): | |||
|
86 | wctx.sub(s).remove() | |||
|
87 | ||||
|
88 | for s, r in s2.items(): | |||
|
89 | if s in s1: | |||
|
90 | continue | |||
|
91 | elif s not in sa: | |||
|
92 | wctx.sub(s).get(r) | |||
|
93 | sm[s] = r | |||
|
94 | elif r != sa[s]: | |||
|
95 | if repo.ui.prompt( | |||
|
96 | _(' remote changed subrepository %s which local removed\n' | |||
|
97 | 'use (c)hanged version or (d)elete?' % s, | |||
|
98 | (_('&Changed'), _('&Delete')), _('c'))) == _('c'): | |||
|
99 | wctx.sub(s).get(r) | |||
|
100 | sm[s] = r | |||
|
101 | ||||
|
102 | # record merged .hgsubstate | |||
|
103 | writestate(repo, sm) | |||
|
104 | ||||
|
105 | def _abssource(repo): | |||
|
106 | if hasattr(repo, '_subparent'): | |||
|
107 | source = repo._subsource | |||
|
108 | if source.startswith('/') or '://' in source: | |||
|
109 | return source | |||
|
110 | return os.path.join(_abssource(repo._subparent), repo._subsource) | |||
|
111 | return repo.ui.config('paths', 'default', repo.root) | |||
|
112 | ||||
46 | def subrepo(ctx, path): |
|
113 | def subrepo(ctx, path): | |
47 | # subrepo inherently violates our import layering rules |
|
114 | # subrepo inherently violates our import layering rules | |
48 | # because it wants to make repo objects from deep inside the stack |
|
115 | # because it wants to make repo objects from deep inside the stack | |
49 | # so we manually delay the circular imports to not break |
|
116 | # so we manually delay the circular imports to not break | |
50 | # scripts that don't use our demand-loading |
|
117 | # scripts that don't use our demand-loading | |
51 | global localrepo |
|
118 | global localrepo, hg | |
52 | import localrepo as l |
|
119 | import localrepo as l, hg as h | |
53 | localrepo = l |
|
120 | localrepo = l | |
|
121 | hg = h | |||
54 |
|
122 | |||
55 | state = ctx.substate.get(path, nullstate) |
|
123 | state = ctx.substate.get(path, nullstate) | |
56 | if state[0].startswith('['): # future expansion |
|
124 | if state[0].startswith('['): # future expansion | |
57 | raise error.Abort('unknown subrepo source %s' % state[0]) |
|
125 | raise error.Abort('unknown subrepo source %s' % state[0]) | |
58 | return hgsubrepo(ctx, path, state) |
|
126 | return hgsubrepo(ctx, path, state) | |
59 |
|
127 | |||
60 | class hgsubrepo(object): |
|
128 | class hgsubrepo(object): | |
61 | def __init__(self, ctx, path, state): |
|
129 | def __init__(self, ctx, path, state): | |
62 | self._parent = ctx |
|
130 | self._parent = ctx | |
63 | self._path = path |
|
131 | self._path = path | |
64 | self._state = state |
|
132 | self._state = state | |
65 | r = ctx._repo |
|
133 | r = ctx._repo | |
66 | root = r.wjoin(path) |
|
134 | root = r.wjoin(path) | |
67 | self._repo = localrepo.localrepository(r.ui, root) |
|
135 | if os.path.exists(os.path.join(root, '.hg')): | |
|
136 | self._repo = localrepo.localrepository(r.ui, root) | |||
|
137 | else: | |||
|
138 | util.makedirs(root) | |||
|
139 | self._repo = localrepo.localrepository(r.ui, root, create=True) | |||
|
140 | self._repo._subparent = r | |||
|
141 | self._repo._subsource = state[0] | |||
68 |
|
142 | |||
69 | def dirty(self): |
|
143 | def dirty(self): | |
70 | r = self._state[1] |
|
144 | r = self._state[1] | |
71 | if r == '': |
|
145 | if r == '': | |
72 | return True |
|
146 | return True | |
73 | w = self._repo[None] |
|
147 | w = self._repo[None] | |
74 | if w.p1() != self._repo[r]: # version checked out changed |
|
148 | if w.p1() != self._repo[r]: # version checked out changed | |
75 | return True |
|
149 | return True | |
76 | return w.dirty() # working directory changed |
|
150 | return w.dirty() # working directory changed | |
77 |
|
151 | |||
78 | def commit(self, text, user, date): |
|
152 | def commit(self, text, user, date): | |
79 | n = self._repo.commit(text, user, date) |
|
153 | n = self._repo.commit(text, user, date) | |
80 | if not n: |
|
154 | if not n: | |
81 | return self._repo['.'].hex() # different version checked out |
|
155 | return self._repo['.'].hex() # different version checked out | |
82 | return node.hex(n) |
|
156 | return node.hex(n) | |
|
157 | ||||
|
158 | def remove(self): | |||
|
159 | # we can't fully delete the repository as it may contain | |||
|
160 | # local-only history | |||
|
161 | self._repo.ui.note(_('removing subrepo %s\n') % self._path) | |||
|
162 | hg.clean(self._repo, node.nullid, False) | |||
|
163 | ||||
|
164 | def get(self, state): | |||
|
165 | source, revision = state | |||
|
166 | try: | |||
|
167 | self._repo.lookup(revision) | |||
|
168 | except error.RepoError: | |||
|
169 | self._repo._subsource = source | |||
|
170 | self._repo.ui.status(_('pulling subrepo %s\n') % self._path) | |||
|
171 | srcurl = _abssource(self._repo) | |||
|
172 | other = hg.repository(self._repo.ui, srcurl) | |||
|
173 | self._repo.pull(other) | |||
|
174 | ||||
|
175 | hg.clean(self._repo, revision, False) | |||
|
176 | ||||
|
177 | def merge(self, state): | |||
|
178 | hg.merge(self._repo, state[1], remind=False) |
General Comments 0
You need to be logged in to leave comments.
Login now