Show More
@@ -1,230 +1,230 | |||||
1 | # hg.py - repository classes for mercurial |
|
1 | # hg.py - repository classes for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from node import * |
|
8 | from node import * | |
9 | from repo import * |
|
9 | from repo import * | |
10 | from demandload import * |
|
10 | from demandload import * | |
11 | from i18n import gettext as _ |
|
11 | from i18n import gettext as _ | |
12 | demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo") |
|
12 | demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo") | |
13 | demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify") |
|
13 | demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify") | |
14 |
|
14 | |||
15 | def _local(path): |
|
15 | def _local(path): | |
16 | return (os.path.isfile(path and util.drop_scheme('file', path)) and |
|
16 | return (os.path.isfile(path and util.drop_scheme('file', path)) and | |
17 | bundlerepo or localrepo) |
|
17 | bundlerepo or localrepo) | |
18 |
|
18 | |||
19 | schemes = { |
|
19 | schemes = { | |
20 | 'bundle': bundlerepo, |
|
20 | 'bundle': bundlerepo, | |
21 | 'file': _local, |
|
21 | 'file': _local, | |
22 | 'hg': httprepo, |
|
22 | 'hg': httprepo, | |
23 | 'http': httprepo, |
|
23 | 'http': httprepo, | |
24 | 'https': httprepo, |
|
24 | 'https': httprepo, | |
25 | 'old-http': statichttprepo, |
|
25 | 'old-http': statichttprepo, | |
26 | 'ssh': sshrepo, |
|
26 | 'ssh': sshrepo, | |
27 | 'static-http': statichttprepo, |
|
27 | 'static-http': statichttprepo, | |
28 | } |
|
28 | } | |
29 |
|
29 | |||
30 | def _lookup(path): |
|
30 | def _lookup(path): | |
31 | scheme = 'file' |
|
31 | scheme = 'file' | |
32 | if path: |
|
32 | if path: | |
33 | c = path.find(':') |
|
33 | c = path.find(':') | |
34 | if c > 0: |
|
34 | if c > 0: | |
35 | scheme = path[:c] |
|
35 | scheme = path[:c] | |
36 | thing = schemes.get(scheme) or schemes['file'] |
|
36 | thing = schemes.get(scheme) or schemes['file'] | |
37 | try: |
|
37 | try: | |
38 | return thing(path) |
|
38 | return thing(path) | |
39 | except TypeError: |
|
39 | except TypeError: | |
40 | return thing |
|
40 | return thing | |
41 |
|
41 | |||
42 | def islocal(repo): |
|
42 | def islocal(repo): | |
43 | '''return true if repo or path is local''' |
|
43 | '''return true if repo or path is local''' | |
44 | if isinstance(repo, str): |
|
44 | if isinstance(repo, str): | |
45 | try: |
|
45 | try: | |
46 | return _lookup(repo).islocal(repo) |
|
46 | return _lookup(repo).islocal(repo) | |
47 | except AttributeError: |
|
47 | except AttributeError: | |
48 | return False |
|
48 | return False | |
49 | return repo.local() |
|
49 | return repo.local() | |
50 |
|
50 | |||
51 | def repository(ui, path=None, create=False): |
|
51 | def repository(ui, path=None, create=False): | |
52 | """return a repository object for the specified path""" |
|
52 | """return a repository object for the specified path""" | |
53 | return _lookup(path).instance(ui, path, create) |
|
53 | return _lookup(path).instance(ui, path, create) | |
54 |
|
54 | |||
55 | def defaultdest(source): |
|
55 | def defaultdest(source): | |
56 | '''return default destination of clone if none is given''' |
|
56 | '''return default destination of clone if none is given''' | |
57 | return os.path.basename(os.path.normpath(source)) |
|
57 | return os.path.basename(os.path.normpath(source)) | |
58 |
|
58 | |||
59 | def clone(ui, source, dest=None, pull=False, rev=None, update=True, |
|
59 | def clone(ui, source, dest=None, pull=False, rev=None, update=True, | |
60 | stream=False): |
|
60 | stream=False): | |
61 | """Make a copy of an existing repository. |
|
61 | """Make a copy of an existing repository. | |
62 |
|
62 | |||
63 | Create a copy of an existing repository in a new directory. The |
|
63 | Create a copy of an existing repository in a new directory. The | |
64 | source and destination are URLs, as passed to the repository |
|
64 | source and destination are URLs, as passed to the repository | |
65 | function. Returns a pair of repository objects, the source and |
|
65 | function. Returns a pair of repository objects, the source and | |
66 | newly created destination. |
|
66 | newly created destination. | |
67 |
|
67 | |||
68 | The location of the source is added to the new repository's |
|
68 | The location of the source is added to the new repository's | |
69 | .hg/hgrc file, as the default to be used for future pulls and |
|
69 | .hg/hgrc file, as the default to be used for future pulls and | |
70 | pushes. |
|
70 | pushes. | |
71 |
|
71 | |||
72 | If an exception is raised, the partly cloned/updated destination |
|
72 | If an exception is raised, the partly cloned/updated destination | |
73 | repository will be deleted. |
|
73 | repository will be deleted. | |
74 |
|
74 | |||
75 | Arguments: |
|
75 | Arguments: | |
76 |
|
76 | |||
77 | source: repository object or URL |
|
77 | source: repository object or URL | |
78 |
|
78 | |||
79 | dest: URL of destination repository to create (defaults to base |
|
79 | dest: URL of destination repository to create (defaults to base | |
80 | name of source repository) |
|
80 | name of source repository) | |
81 |
|
81 | |||
82 | pull: always pull from source repository, even in local case |
|
82 | pull: always pull from source repository, even in local case | |
83 |
|
83 | |||
84 | stream: stream raw data uncompressed from repository (fast over |
|
84 | stream: stream raw data uncompressed from repository (fast over | |
85 | LAN, slow over WAN) |
|
85 | LAN, slow over WAN) | |
86 |
|
86 | |||
87 | rev: revision to clone up to (implies pull=True) |
|
87 | rev: revision to clone up to (implies pull=True) | |
88 |
|
88 | |||
89 | update: update working directory after clone completes, if |
|
89 | update: update working directory after clone completes, if | |
90 | destination is local repository |
|
90 | destination is local repository | |
91 | """ |
|
91 | """ | |
92 | if isinstance(source, str): |
|
92 | if isinstance(source, str): | |
93 | src_repo = repository(ui, source) |
|
93 | src_repo = repository(ui, source) | |
94 | else: |
|
94 | else: | |
95 | src_repo = source |
|
95 | src_repo = source | |
96 | source = src_repo.url() |
|
96 | source = src_repo.url() | |
97 |
|
97 | |||
98 | if dest is None: |
|
98 | if dest is None: | |
99 | dest = defaultdest(source) |
|
99 | dest = defaultdest(source) | |
100 |
|
100 | |||
101 | def localpath(path): |
|
101 | def localpath(path): | |
102 | if path.startswith('file://'): |
|
102 | if path.startswith('file://'): | |
103 | return path[7:] |
|
103 | return path[7:] | |
104 | if path.startswith('file:'): |
|
104 | if path.startswith('file:'): | |
105 | return path[5:] |
|
105 | return path[5:] | |
106 | return path |
|
106 | return path | |
107 |
|
107 | |||
108 | dest = localpath(dest) |
|
108 | dest = localpath(dest) | |
109 | source = localpath(source) |
|
109 | source = localpath(source) | |
110 |
|
110 | |||
111 | if os.path.exists(dest): |
|
111 | if os.path.exists(dest): | |
112 | raise util.Abort(_("destination '%s' already exists"), dest) |
|
112 | raise util.Abort(_("destination '%s' already exists"), dest) | |
113 |
|
113 | |||
114 | class DirCleanup(object): |
|
114 | class DirCleanup(object): | |
115 | def __init__(self, dir_): |
|
115 | def __init__(self, dir_): | |
116 | self.rmtree = shutil.rmtree |
|
116 | self.rmtree = shutil.rmtree | |
117 | self.dir_ = dir_ |
|
117 | self.dir_ = dir_ | |
118 | def close(self): |
|
118 | def close(self): | |
119 | self.dir_ = None |
|
119 | self.dir_ = None | |
120 | def __del__(self): |
|
120 | def __del__(self): | |
121 | if self.dir_: |
|
121 | if self.dir_: | |
122 | self.rmtree(self.dir_, True) |
|
122 | self.rmtree(self.dir_, True) | |
123 |
|
123 | |||
124 | dest_repo = None |
|
124 | dest_repo = None | |
125 | try: |
|
125 | try: | |
126 | dest_repo = repository(ui, dest) |
|
126 | dest_repo = repository(ui, dest) | |
127 | raise util.Abort(_("destination '%s' already exists." % dest)) |
|
127 | raise util.Abort(_("destination '%s' already exists." % dest)) | |
128 | except RepoError: |
|
128 | except RepoError: | |
129 | dest_repo = repository(ui, dest, create=True) |
|
129 | dest_repo = repository(ui, dest, create=True) | |
130 |
|
130 | |||
131 | dest_path = None |
|
131 | dest_path = None | |
132 | dir_cleanup = None |
|
132 | dir_cleanup = None | |
133 | if dest_repo.local(): |
|
133 | if dest_repo.local(): | |
134 | dest_path = os.path.realpath(dest_repo.root) |
|
134 | dest_path = os.path.realpath(dest_repo.root) | |
135 | dir_cleanup = DirCleanup(dest_path) |
|
135 | dir_cleanup = DirCleanup(dest_path) | |
136 |
|
136 | |||
137 | abspath = source |
|
137 | abspath = source | |
138 | copy = False |
|
138 | copy = False | |
139 | if src_repo.local() and dest_repo.local(): |
|
139 | if src_repo.local() and dest_repo.local(): | |
140 | abspath = os.path.abspath(source) |
|
140 | abspath = os.path.abspath(source) | |
141 | copy = not pull and not rev |
|
141 | copy = not pull and not rev | |
142 |
|
142 | |||
143 | src_lock, dest_lock = None, None |
|
143 | src_lock, dest_lock = None, None | |
144 | if copy: |
|
144 | if copy: | |
145 | try: |
|
145 | try: | |
146 | # we use a lock here because if we race with commit, we |
|
146 | # we use a lock here because if we race with commit, we | |
147 | # can end up with extra data in the cloned revlogs that's |
|
147 | # can end up with extra data in the cloned revlogs that's | |
148 | # not pointed to by changesets, thus causing verify to |
|
148 | # not pointed to by changesets, thus causing verify to | |
149 | # fail |
|
149 | # fail | |
150 | src_lock = src_repo.lock() |
|
150 | src_lock = src_repo.lock() | |
151 | except lock.LockException: |
|
151 | except lock.LockException: | |
152 | copy = False |
|
152 | copy = False | |
153 |
|
153 | |||
154 | if copy: |
|
154 | if copy: | |
155 | # we lock here to avoid premature writing to the target |
|
155 | # we lock here to avoid premature writing to the target | |
156 | dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock")) |
|
156 | dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock")) | |
157 |
|
157 | |||
158 | # we need to remove the (empty) data dir in dest so copyfiles |
|
158 | # we need to remove the (empty) data dir in dest so copyfiles | |
159 | # can do its work |
|
159 | # can do its work | |
160 | os.rmdir(os.path.join(dest_path, ".hg", "data")) |
|
160 | os.rmdir(os.path.join(dest_path, ".hg", "data")) | |
161 | files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i" |
|
161 | files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i" | |
162 | for f in files.split(): |
|
162 | for f in files.split(): | |
163 | src = os.path.join(source, ".hg", f) |
|
163 | src = os.path.join(source, ".hg", f) | |
164 | dst = os.path.join(dest_path, ".hg", f) |
|
164 | dst = os.path.join(dest_path, ".hg", f) | |
165 | try: |
|
165 | try: | |
166 | util.copyfiles(src, dst) |
|
166 | util.copyfiles(src, dst) | |
167 | except OSError, inst: |
|
167 | except OSError, inst: | |
168 | if inst.errno != errno.ENOENT: |
|
168 | if inst.errno != errno.ENOENT: | |
169 | raise |
|
169 | raise | |
170 |
|
170 | |||
171 | # we need to re-init the repo after manually copying the data |
|
171 | # we need to re-init the repo after manually copying the data | |
172 | # into it |
|
172 | # into it | |
173 | dest_repo = repository(ui, dest) |
|
173 | dest_repo = repository(ui, dest) | |
174 |
|
174 | |||
175 | else: |
|
175 | else: | |
176 | revs = None |
|
176 | revs = None | |
177 | if rev: |
|
177 | if rev: | |
178 | if not src_repo.local(): |
|
178 | if not src_repo.local(): | |
179 | raise util.Abort(_("clone by revision not supported yet " |
|
179 | raise util.Abort(_("clone by revision not supported yet " | |
180 | "for remote repositories")) |
|
180 | "for remote repositories")) | |
181 | revs = [src_repo.lookup(r) for r in rev] |
|
181 | revs = [src_repo.lookup(r) for r in rev] | |
182 |
|
182 | |||
183 | if dest_repo.local(): |
|
183 | if dest_repo.local(): | |
184 | dest_repo.clone(src_repo, heads=revs, stream=stream) |
|
184 | dest_repo.clone(src_repo, heads=revs, stream=stream) | |
185 | elif src_repo.local(): |
|
185 | elif src_repo.local(): | |
186 | src_repo.push(dest_repo, revs=revs) |
|
186 | src_repo.push(dest_repo, revs=revs) | |
187 | else: |
|
187 | else: | |
188 | raise util.Abort(_("clone from remote to remote not supported")) |
|
188 | raise util.Abort(_("clone from remote to remote not supported")) | |
189 |
|
189 | |||
190 | if src_lock: |
|
190 | if src_lock: | |
191 | src_lock.release() |
|
191 | src_lock.release() | |
192 |
|
192 | |||
193 | if dest_repo.local(): |
|
193 | if dest_repo.local(): | |
194 | fp = dest_repo.opener("hgrc", "w", text=True) |
|
194 | fp = dest_repo.opener("hgrc", "w", text=True) | |
195 | fp.write("[paths]\n") |
|
195 | fp.write("[paths]\n") | |
196 | fp.write("default = %s\n" % abspath) |
|
196 | fp.write("default = %s\n" % abspath) | |
197 | fp.close() |
|
197 | fp.close() | |
198 |
|
198 | |||
199 | if dest_lock: |
|
199 | if dest_lock: | |
200 | dest_lock.release() |
|
200 | dest_lock.release() | |
201 |
|
201 | |||
202 | if update: |
|
202 | if update: | |
203 | _merge.update(dest_repo, dest_repo.changelog.tip()) |
|
203 | _merge.update(dest_repo, dest_repo.changelog.tip()) | |
204 | if dir_cleanup: |
|
204 | if dir_cleanup: | |
205 | dir_cleanup.close() |
|
205 | dir_cleanup.close() | |
206 |
|
206 | |||
207 | return src_repo, dest_repo |
|
207 | return src_repo, dest_repo | |
208 |
|
208 | |||
209 | def update(repo, node): |
|
209 | def update(repo, node): | |
210 | """update the working directory to node, merging linear changes""" |
|
210 | """update the working directory to node, merging linear changes""" | |
211 | return _merge.update(repo, node) |
|
211 | return _merge.update(repo, node) | |
212 |
|
212 | |||
213 | def clean(repo, node, wlock=None, show_stats=True): |
|
213 | def clean(repo, node, wlock=None, show_stats=True): | |
214 | """forcibly switch the working directory to node, clobbering changes""" |
|
214 | """forcibly switch the working directory to node, clobbering changes""" | |
215 | return _merge.update(repo, node, force=True, wlock=wlock, |
|
215 | return _merge.update(repo, node, force=True, wlock=wlock, | |
216 | show_stats=show_stats) |
|
216 | show_stats=show_stats) | |
217 |
|
217 | |||
218 | def merge(repo, node, force=None, remind=True, wlock=None): |
|
218 | def merge(repo, node, force=None, remind=True, wlock=None): | |
219 | """branch merge with node, resolving changes""" |
|
219 | """branch merge with node, resolving changes""" | |
220 | return _merge.update(repo, node, branchmerge=True, forcemerge=force, |
|
220 | return _merge.update(repo, node, branchmerge=True, forcemerge=force, | |
221 | remind=remind, wlock=wlock) |
|
221 | remind=remind, wlock=wlock) | |
222 |
|
222 | |||
223 | def revert(repo, node, choose): |
|
223 | def revert(repo, node, choose): | |
224 | """revert changes to revision in node without updating dirstate""" |
|
224 | """revert changes to revision in node without updating dirstate""" | |
225 |
return _merge.update(repo, node, force=True, |
|
225 | return _merge.update(repo, node, force=True, partial=choose, | |
226 |
|
|
226 | show_stats=False) | |
227 |
|
227 | |||
228 | def verify(repo): |
|
228 | def verify(repo): | |
229 | """verify the consistency of a repository""" |
|
229 | """verify the consistency of a repository""" | |
230 | return _verify.verify(repo) |
|
230 | return _verify.verify(repo) |
@@ -1,347 +1,345 | |||||
1 | # merge.py - directory-level update/merge handling for Mercurial |
|
1 | # merge.py - directory-level update/merge handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from node import * |
|
8 | from node import * | |
9 | from i18n import gettext as _ |
|
9 | from i18n import gettext as _ | |
10 | from demandload import * |
|
10 | from demandload import * | |
11 | demandload(globals(), "util os tempfile") |
|
11 | demandload(globals(), "util os tempfile") | |
12 |
|
12 | |||
13 | def merge3(repo, fn, my, other, p1, p2): |
|
13 | def merge3(repo, fn, my, other, p1, p2): | |
14 | """perform a 3-way merge in the working directory""" |
|
14 | """perform a 3-way merge in the working directory""" | |
15 |
|
15 | |||
16 | def temp(prefix, node): |
|
16 | def temp(prefix, node): | |
17 | pre = "%s~%s." % (os.path.basename(fn), prefix) |
|
17 | pre = "%s~%s." % (os.path.basename(fn), prefix) | |
18 | (fd, name) = tempfile.mkstemp(prefix=pre) |
|
18 | (fd, name) = tempfile.mkstemp(prefix=pre) | |
19 | f = os.fdopen(fd, "wb") |
|
19 | f = os.fdopen(fd, "wb") | |
20 | repo.wwrite(fn, fl.read(node), f) |
|
20 | repo.wwrite(fn, fl.read(node), f) | |
21 | f.close() |
|
21 | f.close() | |
22 | return name |
|
22 | return name | |
23 |
|
23 | |||
24 | fl = repo.file(fn) |
|
24 | fl = repo.file(fn) | |
25 | base = fl.ancestor(my, other) |
|
25 | base = fl.ancestor(my, other) | |
26 | a = repo.wjoin(fn) |
|
26 | a = repo.wjoin(fn) | |
27 | b = temp("base", base) |
|
27 | b = temp("base", base) | |
28 | c = temp("other", other) |
|
28 | c = temp("other", other) | |
29 |
|
29 | |||
30 | repo.ui.note(_("resolving %s\n") % fn) |
|
30 | repo.ui.note(_("resolving %s\n") % fn) | |
31 | repo.ui.debug(_("file %s: my %s other %s ancestor %s\n") % |
|
31 | repo.ui.debug(_("file %s: my %s other %s ancestor %s\n") % | |
32 | (fn, short(my), short(other), short(base))) |
|
32 | (fn, short(my), short(other), short(base))) | |
33 |
|
33 | |||
34 | cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge") |
|
34 | cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge") | |
35 | or "hgmerge") |
|
35 | or "hgmerge") | |
36 | r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root, |
|
36 | r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root, | |
37 | environ={'HG_FILE': fn, |
|
37 | environ={'HG_FILE': fn, | |
38 | 'HG_MY_NODE': p1, |
|
38 | 'HG_MY_NODE': p1, | |
39 | 'HG_OTHER_NODE': p2, |
|
39 | 'HG_OTHER_NODE': p2, | |
40 | 'HG_FILE_MY_NODE': hex(my), |
|
40 | 'HG_FILE_MY_NODE': hex(my), | |
41 | 'HG_FILE_OTHER_NODE': hex(other), |
|
41 | 'HG_FILE_OTHER_NODE': hex(other), | |
42 | 'HG_FILE_BASE_NODE': hex(base)}) |
|
42 | 'HG_FILE_BASE_NODE': hex(base)}) | |
43 | if r: |
|
43 | if r: | |
44 | repo.ui.warn(_("merging %s failed!\n") % fn) |
|
44 | repo.ui.warn(_("merging %s failed!\n") % fn) | |
45 |
|
45 | |||
46 | os.unlink(b) |
|
46 | os.unlink(b) | |
47 | os.unlink(c) |
|
47 | os.unlink(c) | |
48 | return r |
|
48 | return r | |
49 |
|
49 | |||
50 |
def update(repo, node, branchmerge=False, force=False, |
|
50 | def update(repo, node, branchmerge=False, force=False, partial=None, | |
51 |
|
|
51 | forcemerge=False, wlock=None, show_stats=True, remind=True): | |
52 | remind=True): |
|
|||
53 | pl = repo.dirstate.parents() |
|
52 | pl = repo.dirstate.parents() | |
54 | if not force and pl[1] != nullid: |
|
53 | if not force and pl[1] != nullid: | |
55 | raise util.Abort(_("outstanding uncommitted merges")) |
|
54 | raise util.Abort(_("outstanding uncommitted merges")) | |
56 |
|
55 | |||
57 | err = False |
|
56 | err = False | |
58 |
|
57 | |||
59 | p1, p2 = pl[0], node |
|
58 | p1, p2 = pl[0], node | |
60 | pa = repo.changelog.ancestor(p1, p2) |
|
59 | pa = repo.changelog.ancestor(p1, p2) | |
61 | m1n = repo.changelog.read(p1)[0] |
|
60 | m1n = repo.changelog.read(p1)[0] | |
62 | m2n = repo.changelog.read(p2)[0] |
|
61 | m2n = repo.changelog.read(p2)[0] | |
63 | man = repo.manifest.ancestor(m1n, m2n) |
|
62 | man = repo.manifest.ancestor(m1n, m2n) | |
64 | m1 = repo.manifest.read(m1n) |
|
63 | m1 = repo.manifest.read(m1n) | |
65 | mf1 = repo.manifest.readflags(m1n) |
|
64 | mf1 = repo.manifest.readflags(m1n) | |
66 | m2 = repo.manifest.read(m2n).copy() |
|
65 | m2 = repo.manifest.read(m2n).copy() | |
67 | mf2 = repo.manifest.readflags(m2n) |
|
66 | mf2 = repo.manifest.readflags(m2n) | |
68 | ma = repo.manifest.read(man) |
|
67 | ma = repo.manifest.read(man) | |
69 | mfa = repo.manifest.readflags(man) |
|
68 | mfa = repo.manifest.readflags(man) | |
70 |
|
69 | |||
71 | modified, added, removed, deleted, unknown = repo.changes() |
|
70 | modified, added, removed, deleted, unknown = repo.changes() | |
72 |
|
71 | |||
73 | # is this a jump, or a merge? i.e. is there a linear path |
|
72 | # is this a jump, or a merge? i.e. is there a linear path | |
74 | # from p1 to p2? |
|
73 | # from p1 to p2? | |
75 | linear_path = (pa == p1 or pa == p2) |
|
74 | linear_path = (pa == p1 or pa == p2) | |
76 |
|
75 | |||
77 | if branchmerge and linear_path: |
|
76 | if branchmerge and linear_path: | |
78 | raise util.Abort(_("there is nothing to merge, just use " |
|
77 | raise util.Abort(_("there is nothing to merge, just use " | |
79 | "'hg update' or look at 'hg heads'")) |
|
78 | "'hg update' or look at 'hg heads'")) | |
80 | if branchmerge and not forcemerge: |
|
79 | if branchmerge and not forcemerge: | |
81 | if modified or added or removed: |
|
80 | if modified or added or removed: | |
82 | raise util.Abort(_("outstanding uncommitted changes")) |
|
81 | raise util.Abort(_("outstanding uncommitted changes")) | |
83 |
|
82 | |||
84 | if not forcemerge and not force: |
|
83 | if not forcemerge and not force: | |
85 | for f in unknown: |
|
84 | for f in unknown: | |
86 | if f in m2: |
|
85 | if f in m2: | |
87 | t1 = repo.wread(f) |
|
86 | t1 = repo.wread(f) | |
88 | t2 = repo.file(f).read(m2[f]) |
|
87 | t2 = repo.file(f).read(m2[f]) | |
89 | if cmp(t1, t2) != 0: |
|
88 | if cmp(t1, t2) != 0: | |
90 | raise util.Abort(_("'%s' already exists in the working" |
|
89 | raise util.Abort(_("'%s' already exists in the working" | |
91 | " dir and differs from remote") % f) |
|
90 | " dir and differs from remote") % f) | |
92 |
|
91 | |||
93 | # resolve the manifest to determine which files |
|
92 | # resolve the manifest to determine which files | |
94 | # we care about merging |
|
93 | # we care about merging | |
95 | repo.ui.note(_("resolving manifests\n")) |
|
94 | repo.ui.note(_("resolving manifests\n")) | |
96 |
repo.ui.debug(_(" force %s branchmerge %s |
|
95 | repo.ui.debug(_(" force %s branchmerge %s partial %s linear %s\n") % | |
97 |
(force, branchmerge, |
|
96 | (force, branchmerge, partial and True or False, linear_path)) | |
98 | repo.ui.debug(_(" ancestor %s local %s remote %s\n") % |
|
97 | repo.ui.debug(_(" ancestor %s local %s remote %s\n") % | |
99 | (short(man), short(m1n), short(m2n))) |
|
98 | (short(man), short(m1n), short(m2n))) | |
100 |
|
99 | |||
101 | merge = {} |
|
100 | merge = {} | |
102 | get = {} |
|
101 | get = {} | |
103 | remove = [] |
|
102 | remove = [] | |
104 |
|
103 | |||
105 | # construct a working dir manifest |
|
104 | # construct a working dir manifest | |
106 | mw = m1.copy() |
|
105 | mw = m1.copy() | |
107 | mfw = mf1.copy() |
|
106 | mfw = mf1.copy() | |
108 | umap = dict.fromkeys(unknown) |
|
107 | umap = dict.fromkeys(unknown) | |
109 |
|
108 | |||
110 | for f in added + modified + unknown: |
|
109 | for f in added + modified + unknown: | |
111 | mw[f] = "" |
|
110 | mw[f] = "" | |
112 | mfw[f] = util.is_exec(repo.wjoin(f), mfw.get(f, False)) |
|
111 | mfw[f] = util.is_exec(repo.wjoin(f), mfw.get(f, False)) | |
113 |
|
112 | |||
114 | if moddirstate and not wlock: |
|
113 | if not partial and not wlock: wlock = repo.wlock() | |
115 | wlock = repo.wlock() |
|
|||
116 |
|
114 | |||
117 | for f in deleted + removed: |
|
115 | for f in deleted + removed: | |
118 | if f in mw: |
|
116 | if f in mw: | |
119 | del mw[f] |
|
117 | del mw[f] | |
120 |
|
118 | |||
121 | # If we're jumping between revisions (as opposed to merging), |
|
119 | # If we're jumping between revisions (as opposed to merging), | |
122 | # and if neither the working directory nor the target rev has |
|
120 | # and if neither the working directory nor the target rev has | |
123 | # the file, then we need to remove it from the dirstate, to |
|
121 | # the file, then we need to remove it from the dirstate, to | |
124 | # prevent the dirstate from listing the file when it is no |
|
122 | # prevent the dirstate from listing the file when it is no | |
125 | # longer in the manifest. |
|
123 | # longer in the manifest. | |
126 |
if |
|
124 | if not partial and linear_path and f not in m2: | |
127 | repo.dirstate.forget((f,)) |
|
125 | repo.dirstate.forget((f,)) | |
128 |
|
126 | |||
129 | # Compare manifests |
|
127 | # Compare manifests | |
130 | for f, n in mw.iteritems(): |
|
128 | for f, n in mw.iteritems(): | |
131 |
if |
|
129 | if partial and not partial(f): | |
132 | continue |
|
130 | continue | |
133 | if f in m2: |
|
131 | if f in m2: | |
134 | s = 0 |
|
132 | s = 0 | |
135 |
|
133 | |||
136 | # is the wfile new since m1, and match m2? |
|
134 | # is the wfile new since m1, and match m2? | |
137 | if f not in m1: |
|
135 | if f not in m1: | |
138 | t1 = repo.wread(f) |
|
136 | t1 = repo.wread(f) | |
139 | t2 = repo.file(f).read(m2[f]) |
|
137 | t2 = repo.file(f).read(m2[f]) | |
140 | if cmp(t1, t2) == 0: |
|
138 | if cmp(t1, t2) == 0: | |
141 | n = m2[f] |
|
139 | n = m2[f] | |
142 | del t1, t2 |
|
140 | del t1, t2 | |
143 |
|
141 | |||
144 | # are files different? |
|
142 | # are files different? | |
145 | if n != m2[f]: |
|
143 | if n != m2[f]: | |
146 | a = ma.get(f, nullid) |
|
144 | a = ma.get(f, nullid) | |
147 | # are both different from the ancestor? |
|
145 | # are both different from the ancestor? | |
148 | if n != a and m2[f] != a: |
|
146 | if n != a and m2[f] != a: | |
149 | repo.ui.debug(_(" %s versions differ, resolve\n") % f) |
|
147 | repo.ui.debug(_(" %s versions differ, resolve\n") % f) | |
150 | # merge executable bits |
|
148 | # merge executable bits | |
151 | # "if we changed or they changed, change in merge" |
|
149 | # "if we changed or they changed, change in merge" | |
152 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] |
|
150 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] | |
153 | mode = ((a^b) | (a^c)) ^ a |
|
151 | mode = ((a^b) | (a^c)) ^ a | |
154 | merge[f] = (m1.get(f, nullid), m2[f], mode) |
|
152 | merge[f] = (m1.get(f, nullid), m2[f], mode) | |
155 | s = 1 |
|
153 | s = 1 | |
156 | # are we clobbering? |
|
154 | # are we clobbering? | |
157 | # is remote's version newer? |
|
155 | # is remote's version newer? | |
158 | # or are we going back in time? |
|
156 | # or are we going back in time? | |
159 | elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]): |
|
157 | elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]): | |
160 | repo.ui.debug(_(" remote %s is newer, get\n") % f) |
|
158 | repo.ui.debug(_(" remote %s is newer, get\n") % f) | |
161 | get[f] = m2[f] |
|
159 | get[f] = m2[f] | |
162 | s = 1 |
|
160 | s = 1 | |
163 | elif f in umap or f in added: |
|
161 | elif f in umap or f in added: | |
164 | # this unknown file is the same as the checkout |
|
162 | # this unknown file is the same as the checkout | |
165 | # we need to reset the dirstate if the file was added |
|
163 | # we need to reset the dirstate if the file was added | |
166 | get[f] = m2[f] |
|
164 | get[f] = m2[f] | |
167 |
|
165 | |||
168 | if not s and mfw[f] != mf2[f]: |
|
166 | if not s and mfw[f] != mf2[f]: | |
169 | if force: |
|
167 | if force: | |
170 | repo.ui.debug(_(" updating permissions for %s\n") % f) |
|
168 | repo.ui.debug(_(" updating permissions for %s\n") % f) | |
171 | util.set_exec(repo.wjoin(f), mf2[f]) |
|
169 | util.set_exec(repo.wjoin(f), mf2[f]) | |
172 | else: |
|
170 | else: | |
173 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] |
|
171 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] | |
174 | mode = ((a^b) | (a^c)) ^ a |
|
172 | mode = ((a^b) | (a^c)) ^ a | |
175 | if mode != b: |
|
173 | if mode != b: | |
176 | repo.ui.debug(_(" updating permissions for %s\n") |
|
174 | repo.ui.debug(_(" updating permissions for %s\n") | |
177 | % f) |
|
175 | % f) | |
178 | util.set_exec(repo.wjoin(f), mode) |
|
176 | util.set_exec(repo.wjoin(f), mode) | |
179 | del m2[f] |
|
177 | del m2[f] | |
180 | elif f in ma: |
|
178 | elif f in ma: | |
181 | if n != ma[f]: |
|
179 | if n != ma[f]: | |
182 | r = _("d") |
|
180 | r = _("d") | |
183 | if not force and (linear_path or branchmerge): |
|
181 | if not force and (linear_path or branchmerge): | |
184 | r = repo.ui.prompt( |
|
182 | r = repo.ui.prompt( | |
185 | (_(" local changed %s which remote deleted\n") % f) + |
|
183 | (_(" local changed %s which remote deleted\n") % f) + | |
186 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) |
|
184 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) | |
187 | if r == _("d"): |
|
185 | if r == _("d"): | |
188 | remove.append(f) |
|
186 | remove.append(f) | |
189 | else: |
|
187 | else: | |
190 | repo.ui.debug(_("other deleted %s\n") % f) |
|
188 | repo.ui.debug(_("other deleted %s\n") % f) | |
191 | remove.append(f) # other deleted it |
|
189 | remove.append(f) # other deleted it | |
192 | else: |
|
190 | else: | |
193 | # file is created on branch or in working directory |
|
191 | # file is created on branch or in working directory | |
194 | if force and f not in umap: |
|
192 | if force and f not in umap: | |
195 | repo.ui.debug(_("remote deleted %s, clobbering\n") % f) |
|
193 | repo.ui.debug(_("remote deleted %s, clobbering\n") % f) | |
196 | remove.append(f) |
|
194 | remove.append(f) | |
197 | elif n == m1.get(f, nullid): # same as parent |
|
195 | elif n == m1.get(f, nullid): # same as parent | |
198 | if p2 == pa: # going backwards? |
|
196 | if p2 == pa: # going backwards? | |
199 | repo.ui.debug(_("remote deleted %s\n") % f) |
|
197 | repo.ui.debug(_("remote deleted %s\n") % f) | |
200 | remove.append(f) |
|
198 | remove.append(f) | |
201 | else: |
|
199 | else: | |
202 | repo.ui.debug(_("local modified %s, keeping\n") % f) |
|
200 | repo.ui.debug(_("local modified %s, keeping\n") % f) | |
203 | else: |
|
201 | else: | |
204 | repo.ui.debug(_("working dir created %s, keeping\n") % f) |
|
202 | repo.ui.debug(_("working dir created %s, keeping\n") % f) | |
205 |
|
203 | |||
206 | for f, n in m2.iteritems(): |
|
204 | for f, n in m2.iteritems(): | |
207 |
if |
|
205 | if partial and not partial(f): | |
208 | continue |
|
206 | continue | |
209 | if f[0] == "/": |
|
207 | if f[0] == "/": | |
210 | continue |
|
208 | continue | |
211 | if f in ma and n != ma[f]: |
|
209 | if f in ma and n != ma[f]: | |
212 | r = _("k") |
|
210 | r = _("k") | |
213 | if not force and (linear_path or branchmerge): |
|
211 | if not force and (linear_path or branchmerge): | |
214 | r = repo.ui.prompt( |
|
212 | r = repo.ui.prompt( | |
215 | (_("remote changed %s which local deleted\n") % f) + |
|
213 | (_("remote changed %s which local deleted\n") % f) + | |
216 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) |
|
214 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) | |
217 | if r == _("k"): |
|
215 | if r == _("k"): | |
218 | get[f] = n |
|
216 | get[f] = n | |
219 | elif f not in ma: |
|
217 | elif f not in ma: | |
220 | repo.ui.debug(_("remote created %s\n") % f) |
|
218 | repo.ui.debug(_("remote created %s\n") % f) | |
221 | get[f] = n |
|
219 | get[f] = n | |
222 | else: |
|
220 | else: | |
223 | if force or p2 == pa: # going backwards? |
|
221 | if force or p2 == pa: # going backwards? | |
224 | repo.ui.debug(_("local deleted %s, recreating\n") % f) |
|
222 | repo.ui.debug(_("local deleted %s, recreating\n") % f) | |
225 | get[f] = n |
|
223 | get[f] = n | |
226 | else: |
|
224 | else: | |
227 | repo.ui.debug(_("local deleted %s\n") % f) |
|
225 | repo.ui.debug(_("local deleted %s\n") % f) | |
228 |
|
226 | |||
229 | del mw, m1, m2, ma |
|
227 | del mw, m1, m2, ma | |
230 |
|
228 | |||
231 | if force: |
|
229 | if force: | |
232 | for f in merge: |
|
230 | for f in merge: | |
233 | get[f] = merge[f][1] |
|
231 | get[f] = merge[f][1] | |
234 | merge = {} |
|
232 | merge = {} | |
235 |
|
233 | |||
236 | if linear_path or force: |
|
234 | if linear_path or force: | |
237 | # we don't need to do any magic, just jump to the new rev |
|
235 | # we don't need to do any magic, just jump to the new rev | |
238 | p1, p2 = p2, nullid |
|
236 | p1, p2 = p2, nullid | |
239 | else: |
|
237 | else: | |
240 | if not branchmerge: |
|
238 | if not branchmerge: | |
241 | repo.ui.status(_("this update spans a branch" |
|
239 | repo.ui.status(_("this update spans a branch" | |
242 | " affecting the following files:\n")) |
|
240 | " affecting the following files:\n")) | |
243 | fl = merge.keys() + get.keys() |
|
241 | fl = merge.keys() + get.keys() | |
244 | fl.sort() |
|
242 | fl.sort() | |
245 | for f in fl: |
|
243 | for f in fl: | |
246 | cf = "" |
|
244 | cf = "" | |
247 | if f in merge: |
|
245 | if f in merge: | |
248 | cf = _(" (resolve)") |
|
246 | cf = _(" (resolve)") | |
249 | repo.ui.status(" %s%s\n" % (f, cf)) |
|
247 | repo.ui.status(" %s%s\n" % (f, cf)) | |
250 | repo.ui.warn(_("aborting update spanning branches!\n")) |
|
248 | repo.ui.warn(_("aborting update spanning branches!\n")) | |
251 | repo.ui.status(_("(use 'hg merge' to merge across branches" |
|
249 | repo.ui.status(_("(use 'hg merge' to merge across branches" | |
252 | " or 'hg update -C' to lose changes)\n")) |
|
250 | " or 'hg update -C' to lose changes)\n")) | |
253 | return 1 |
|
251 | return 1 | |
254 |
|
252 | |||
255 | xp1 = hex(p1) |
|
253 | xp1 = hex(p1) | |
256 | xp2 = hex(p2) |
|
254 | xp2 = hex(p2) | |
257 | if p2 == nullid: xxp2 = '' |
|
255 | if p2 == nullid: xxp2 = '' | |
258 | else: xxp2 = xp2 |
|
256 | else: xxp2 = xp2 | |
259 |
|
257 | |||
260 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2) |
|
258 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2) | |
261 |
|
259 | |||
262 | # get the files we don't need to change |
|
260 | # get the files we don't need to change | |
263 | files = get.keys() |
|
261 | files = get.keys() | |
264 | files.sort() |
|
262 | files.sort() | |
265 | for f in files: |
|
263 | for f in files: | |
266 | if f[0] == "/": |
|
264 | if f[0] == "/": | |
267 | continue |
|
265 | continue | |
268 | repo.ui.note(_("getting %s\n") % f) |
|
266 | repo.ui.note(_("getting %s\n") % f) | |
269 | t = repo.file(f).read(get[f]) |
|
267 | t = repo.file(f).read(get[f]) | |
270 | repo.wwrite(f, t) |
|
268 | repo.wwrite(f, t) | |
271 | util.set_exec(repo.wjoin(f), mf2[f]) |
|
269 | util.set_exec(repo.wjoin(f), mf2[f]) | |
272 |
if |
|
270 | if not partial: | |
273 | if branchmerge: |
|
271 | if branchmerge: | |
274 | repo.dirstate.update([f], 'n', st_mtime=-1) |
|
272 | repo.dirstate.update([f], 'n', st_mtime=-1) | |
275 | else: |
|
273 | else: | |
276 | repo.dirstate.update([f], 'n') |
|
274 | repo.dirstate.update([f], 'n') | |
277 |
|
275 | |||
278 | # merge the tricky bits |
|
276 | # merge the tricky bits | |
279 | failedmerge = [] |
|
277 | failedmerge = [] | |
280 | files = merge.keys() |
|
278 | files = merge.keys() | |
281 | files.sort() |
|
279 | files.sort() | |
282 | for f in files: |
|
280 | for f in files: | |
283 | repo.ui.status(_("merging %s\n") % f) |
|
281 | repo.ui.status(_("merging %s\n") % f) | |
284 | my, other, flag = merge[f] |
|
282 | my, other, flag = merge[f] | |
285 | ret = merge3(repo, f, my, other, xp1, xp2) |
|
283 | ret = merge3(repo, f, my, other, xp1, xp2) | |
286 | if ret: |
|
284 | if ret: | |
287 | err = True |
|
285 | err = True | |
288 | failedmerge.append(f) |
|
286 | failedmerge.append(f) | |
289 | util.set_exec(repo.wjoin(f), flag) |
|
287 | util.set_exec(repo.wjoin(f), flag) | |
290 |
if |
|
288 | if not partial: | |
291 | if branchmerge: |
|
289 | if branchmerge: | |
292 | # We've done a branch merge, mark this file as merged |
|
290 | # We've done a branch merge, mark this file as merged | |
293 | # so that we properly record the merger later |
|
291 | # so that we properly record the merger later | |
294 | repo.dirstate.update([f], 'm') |
|
292 | repo.dirstate.update([f], 'm') | |
295 | else: |
|
293 | else: | |
296 | # We've update-merged a locally modified file, so |
|
294 | # We've update-merged a locally modified file, so | |
297 | # we set the dirstate to emulate a normal checkout |
|
295 | # we set the dirstate to emulate a normal checkout | |
298 | # of that file some time in the past. Thus our |
|
296 | # of that file some time in the past. Thus our | |
299 | # merge will appear as a normal local file |
|
297 | # merge will appear as a normal local file | |
300 | # modification. |
|
298 | # modification. | |
301 | f_len = len(repo.file(f).read(other)) |
|
299 | f_len = len(repo.file(f).read(other)) | |
302 | repo.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1) |
|
300 | repo.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1) | |
303 |
|
301 | |||
304 | remove.sort() |
|
302 | remove.sort() | |
305 | for f in remove: |
|
303 | for f in remove: | |
306 | repo.ui.note(_("removing %s\n") % f) |
|
304 | repo.ui.note(_("removing %s\n") % f) | |
307 | util.audit_path(f) |
|
305 | util.audit_path(f) | |
308 | try: |
|
306 | try: | |
309 | util.unlink(repo.wjoin(f)) |
|
307 | util.unlink(repo.wjoin(f)) | |
310 | except OSError, inst: |
|
308 | except OSError, inst: | |
311 | if inst.errno != errno.ENOENT: |
|
309 | if inst.errno != errno.ENOENT: | |
312 | repo.ui.warn(_("update failed to remove %s: %s!\n") % |
|
310 | repo.ui.warn(_("update failed to remove %s: %s!\n") % | |
313 | (f, inst.strerror)) |
|
311 | (f, inst.strerror)) | |
314 | if moddirstate: |
|
312 | if not partial: | |
315 | if branchmerge: |
|
313 | if branchmerge: | |
316 | repo.dirstate.update(remove, 'r') |
|
314 | repo.dirstate.update(remove, 'r') | |
317 | else: |
|
315 | else: | |
318 | repo.dirstate.forget(remove) |
|
316 | repo.dirstate.forget(remove) | |
319 |
|
317 | |||
320 | if moddirstate: |
|
318 | if not partial: | |
321 | repo.dirstate.setparents(p1, p2) |
|
319 | repo.dirstate.setparents(p1, p2) | |
322 |
|
320 | |||
323 | if show_stats: |
|
321 | if show_stats: | |
324 | stats = ((len(get), _("updated")), |
|
322 | stats = ((len(get), _("updated")), | |
325 | (len(merge) - len(failedmerge), _("merged")), |
|
323 | (len(merge) - len(failedmerge), _("merged")), | |
326 | (len(remove), _("removed")), |
|
324 | (len(remove), _("removed")), | |
327 | (len(failedmerge), _("unresolved"))) |
|
325 | (len(failedmerge), _("unresolved"))) | |
328 | note = ", ".join([_("%d files %s") % s for s in stats]) |
|
326 | note = ", ".join([_("%d files %s") % s for s in stats]) | |
329 | repo.ui.status("%s\n" % note) |
|
327 | repo.ui.status("%s\n" % note) | |
330 | if moddirstate: |
|
328 | if not partial: | |
331 | if branchmerge: |
|
329 | if branchmerge: | |
332 | if failedmerge: |
|
330 | if failedmerge: | |
333 | repo.ui.status(_("There are unresolved merges," |
|
331 | repo.ui.status(_("There are unresolved merges," | |
334 | " you can redo the full merge using:\n" |
|
332 | " you can redo the full merge using:\n" | |
335 | " hg update -C %s\n" |
|
333 | " hg update -C %s\n" | |
336 | " hg merge %s\n" |
|
334 | " hg merge %s\n" | |
337 | % (repo.changelog.rev(p1), |
|
335 | % (repo.changelog.rev(p1), | |
338 | repo.changelog.rev(p2)))) |
|
336 | repo.changelog.rev(p2)))) | |
339 | elif remind: |
|
337 | elif remind: | |
340 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) |
|
338 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) | |
341 | elif failedmerge: |
|
339 | elif failedmerge: | |
342 | repo.ui.status(_("There are unresolved merges with" |
|
340 | repo.ui.status(_("There are unresolved merges with" | |
343 | " locally modified files.\n")) |
|
341 | " locally modified files.\n")) | |
344 |
|
342 | |||
345 | repo.hook('update', parent1=xp1, parent2=xxp2, error=int(err)) |
|
343 | repo.hook('update', parent1=xp1, parent2=xxp2, error=int(err)) | |
346 | return err |
|
344 | return err | |
347 |
|
345 |
General Comments 0
You need to be logged in to leave comments.
Login now