Show More
@@ -1,300 +1,303 b'' | |||||
1 | # hg.py - repository classes for mercurial |
|
1 | # hg.py - repository classes for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo |
|
10 | import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo | |
11 | import errno, lock, os, shutil, util, extensions |
|
11 | import errno, lock, os, shutil, util, extensions | |
12 | import merge as _merge |
|
12 | import merge as _merge | |
13 | import verify as _verify |
|
13 | import verify as _verify | |
14 |
|
14 | |||
15 | def _local(path): |
|
15 | def _local(path): | |
16 | return (os.path.isfile(util.drop_scheme('file', path)) and |
|
16 | return (os.path.isfile(util.drop_scheme('file', path)) and | |
17 | bundlerepo or localrepo) |
|
17 | bundlerepo or localrepo) | |
18 |
|
18 | |||
19 | def parseurl(url, revs=[]): |
|
19 | def parseurl(url, revs=[]): | |
20 | '''parse url#branch, returning url, branch + revs''' |
|
20 | '''parse url#branch, returning url, branch + revs''' | |
21 |
|
21 | |||
22 | if '#' not in url: |
|
22 | if '#' not in url: | |
23 | return url, (revs or None), None |
|
23 | return url, (revs or None), None | |
24 |
|
24 | |||
25 | url, rev = url.split('#', 1) |
|
25 | url, rev = url.split('#', 1) | |
26 | return url, revs + [rev], rev |
|
26 | return url, revs + [rev], rev | |
27 |
|
27 | |||
28 | schemes = { |
|
28 | schemes = { | |
29 | 'bundle': bundlerepo, |
|
29 | 'bundle': bundlerepo, | |
30 | 'file': _local, |
|
30 | 'file': _local, | |
31 | 'http': httprepo, |
|
31 | 'http': httprepo, | |
32 | 'https': httprepo, |
|
32 | 'https': httprepo, | |
33 | 'ssh': sshrepo, |
|
33 | 'ssh': sshrepo, | |
34 | 'static-http': statichttprepo, |
|
34 | 'static-http': statichttprepo, | |
35 | } |
|
35 | } | |
36 |
|
36 | |||
37 | def _lookup(path): |
|
37 | def _lookup(path): | |
38 | scheme = 'file' |
|
38 | scheme = 'file' | |
39 | if path: |
|
39 | if path: | |
40 | c = path.find(':') |
|
40 | c = path.find(':') | |
41 | if c > 0: |
|
41 | if c > 0: | |
42 | scheme = path[:c] |
|
42 | scheme = path[:c] | |
43 | thing = schemes.get(scheme) or schemes['file'] |
|
43 | thing = schemes.get(scheme) or schemes['file'] | |
44 | try: |
|
44 | try: | |
45 | return thing(path) |
|
45 | return thing(path) | |
46 | except TypeError: |
|
46 | except TypeError: | |
47 | return thing |
|
47 | return thing | |
48 |
|
48 | |||
49 | def islocal(repo): |
|
49 | def islocal(repo): | |
50 | '''return true if repo or path is local''' |
|
50 | '''return true if repo or path is local''' | |
51 | if isinstance(repo, str): |
|
51 | if isinstance(repo, str): | |
52 | try: |
|
52 | try: | |
53 | return _lookup(repo).islocal(repo) |
|
53 | return _lookup(repo).islocal(repo) | |
54 | except AttributeError: |
|
54 | except AttributeError: | |
55 | return False |
|
55 | return False | |
56 | return repo.local() |
|
56 | return repo.local() | |
57 |
|
57 | |||
58 | def repository(ui, path='', create=False): |
|
58 | def repository(ui, path='', create=False): | |
59 | """return a repository object for the specified path""" |
|
59 | """return a repository object for the specified path""" | |
60 | repo = _lookup(path).instance(ui, path, create) |
|
60 | repo = _lookup(path).instance(ui, path, create) | |
61 | ui = getattr(repo, "ui", ui) |
|
61 | ui = getattr(repo, "ui", ui) | |
62 | for name, module in extensions.extensions(): |
|
62 | for name, module in extensions.extensions(): | |
63 | hook = getattr(module, 'reposetup', None) |
|
63 | hook = getattr(module, 'reposetup', None) | |
64 | if hook: |
|
64 | if hook: | |
65 | hook(ui, repo) |
|
65 | hook(ui, repo) | |
66 | return repo |
|
66 | return repo | |
67 |
|
67 | |||
68 | def defaultdest(source): |
|
68 | def defaultdest(source): | |
69 | '''return default destination of clone if none is given''' |
|
69 | '''return default destination of clone if none is given''' | |
70 | return os.path.basename(os.path.normpath(source)) |
|
70 | return os.path.basename(os.path.normpath(source)) | |
71 |
|
71 | |||
72 | def localpath(path): |
|
72 | def localpath(path): | |
73 | if path.startswith('file://localhost/'): |
|
73 | if path.startswith('file://localhost/'): | |
74 | return path[16:] |
|
74 | return path[16:] | |
75 | if path.startswith('file://'): |
|
75 | if path.startswith('file://'): | |
76 | return path[7:] |
|
76 | return path[7:] | |
77 | if path.startswith('file:'): |
|
77 | if path.startswith('file:'): | |
78 | return path[5:] |
|
78 | return path[5:] | |
79 | return path |
|
79 | return path | |
80 |
|
80 | |||
81 | def clone(ui, source, dest=None, pull=False, rev=None, update=True, |
|
81 | def clone(ui, source, dest=None, pull=False, rev=None, update=True, | |
82 | stream=False): |
|
82 | stream=False): | |
83 | """Make a copy of an existing repository. |
|
83 | """Make a copy of an existing repository. | |
84 |
|
84 | |||
85 | Create a copy of an existing repository in a new directory. The |
|
85 | Create a copy of an existing repository in a new directory. The | |
86 | source and destination are URLs, as passed to the repository |
|
86 | source and destination are URLs, as passed to the repository | |
87 | function. Returns a pair of repository objects, the source and |
|
87 | function. Returns a pair of repository objects, the source and | |
88 | newly created destination. |
|
88 | newly created destination. | |
89 |
|
89 | |||
90 | The location of the source is added to the new repository's |
|
90 | The location of the source is added to the new repository's | |
91 | .hg/hgrc file, as the default to be used for future pulls and |
|
91 | .hg/hgrc file, as the default to be used for future pulls and | |
92 | pushes. |
|
92 | pushes. | |
93 |
|
93 | |||
94 | If an exception is raised, the partly cloned/updated destination |
|
94 | If an exception is raised, the partly cloned/updated destination | |
95 | repository will be deleted. |
|
95 | repository will be deleted. | |
96 |
|
96 | |||
97 | Arguments: |
|
97 | Arguments: | |
98 |
|
98 | |||
99 | source: repository object or URL |
|
99 | source: repository object or URL | |
100 |
|
100 | |||
101 | dest: URL of destination repository to create (defaults to base |
|
101 | dest: URL of destination repository to create (defaults to base | |
102 | name of source repository) |
|
102 | name of source repository) | |
103 |
|
103 | |||
104 | pull: always pull from source repository, even in local case |
|
104 | pull: always pull from source repository, even in local case | |
105 |
|
105 | |||
106 | stream: stream raw data uncompressed from repository (fast over |
|
106 | stream: stream raw data uncompressed from repository (fast over | |
107 | LAN, slow over WAN) |
|
107 | LAN, slow over WAN) | |
108 |
|
108 | |||
109 | rev: revision to clone up to (implies pull=True) |
|
109 | rev: revision to clone up to (implies pull=True) | |
110 |
|
110 | |||
111 | update: update working directory after clone completes, if |
|
111 | update: update working directory after clone completes, if | |
112 | destination is local repository |
|
112 | destination is local repository (True means update to default rev, | |
|
113 | anything else is treated as a revision) | |||
113 | """ |
|
114 | """ | |
114 |
|
115 | |||
115 | if isinstance(source, str): |
|
116 | if isinstance(source, str): | |
116 | origsource = ui.expandpath(source) |
|
117 | origsource = ui.expandpath(source) | |
117 | source, rev, checkout = parseurl(origsource, rev) |
|
118 | source, rev, checkout = parseurl(origsource, rev) | |
118 | src_repo = repository(ui, source) |
|
119 | src_repo = repository(ui, source) | |
119 | else: |
|
120 | else: | |
120 | src_repo = source |
|
121 | src_repo = source | |
121 | origsource = source = src_repo.url() |
|
122 | origsource = source = src_repo.url() | |
122 | checkout = None |
|
123 | checkout = None | |
123 |
|
124 | |||
124 | if dest is None: |
|
125 | if dest is None: | |
125 | dest = defaultdest(source) |
|
126 | dest = defaultdest(source) | |
126 | ui.status(_("destination directory: %s\n") % dest) |
|
127 | ui.status(_("destination directory: %s\n") % dest) | |
127 |
|
128 | |||
128 | dest = localpath(dest) |
|
129 | dest = localpath(dest) | |
129 | source = localpath(source) |
|
130 | source = localpath(source) | |
130 |
|
131 | |||
131 | if os.path.exists(dest): |
|
132 | if os.path.exists(dest): | |
132 | raise util.Abort(_("destination '%s' already exists") % dest) |
|
133 | raise util.Abort(_("destination '%s' already exists") % dest) | |
133 |
|
134 | |||
134 | class DirCleanup(object): |
|
135 | class DirCleanup(object): | |
135 | def __init__(self, dir_): |
|
136 | def __init__(self, dir_): | |
136 | self.rmtree = shutil.rmtree |
|
137 | self.rmtree = shutil.rmtree | |
137 | self.dir_ = dir_ |
|
138 | self.dir_ = dir_ | |
138 | def close(self): |
|
139 | def close(self): | |
139 | self.dir_ = None |
|
140 | self.dir_ = None | |
140 | def __del__(self): |
|
141 | def __del__(self): | |
141 | if self.dir_: |
|
142 | if self.dir_: | |
142 | self.rmtree(self.dir_, True) |
|
143 | self.rmtree(self.dir_, True) | |
143 |
|
144 | |||
144 | src_lock = dest_lock = dir_cleanup = None |
|
145 | src_lock = dest_lock = dir_cleanup = None | |
145 | try: |
|
146 | try: | |
146 | if islocal(dest): |
|
147 | if islocal(dest): | |
147 | dir_cleanup = DirCleanup(dest) |
|
148 | dir_cleanup = DirCleanup(dest) | |
148 |
|
149 | |||
149 | abspath = origsource |
|
150 | abspath = origsource | |
150 | copy = False |
|
151 | copy = False | |
151 | if src_repo.cancopy() and islocal(dest): |
|
152 | if src_repo.cancopy() and islocal(dest): | |
152 | abspath = os.path.abspath(util.drop_scheme('file', origsource)) |
|
153 | abspath = os.path.abspath(util.drop_scheme('file', origsource)) | |
153 | copy = not pull and not rev |
|
154 | copy = not pull and not rev | |
154 |
|
155 | |||
155 | if copy: |
|
156 | if copy: | |
156 | try: |
|
157 | try: | |
157 | # we use a lock here because if we race with commit, we |
|
158 | # we use a lock here because if we race with commit, we | |
158 | # can end up with extra data in the cloned revlogs that's |
|
159 | # can end up with extra data in the cloned revlogs that's | |
159 | # not pointed to by changesets, thus causing verify to |
|
160 | # not pointed to by changesets, thus causing verify to | |
160 | # fail |
|
161 | # fail | |
161 | src_lock = src_repo.lock() |
|
162 | src_lock = src_repo.lock() | |
162 | except lock.LockException: |
|
163 | except lock.LockException: | |
163 | copy = False |
|
164 | copy = False | |
164 |
|
165 | |||
165 | if copy: |
|
166 | if copy: | |
166 | def force_copy(src, dst): |
|
167 | def force_copy(src, dst): | |
167 | if not os.path.exists(src): |
|
168 | if not os.path.exists(src): | |
168 | # Tolerate empty source repository and optional files |
|
169 | # Tolerate empty source repository and optional files | |
169 | return |
|
170 | return | |
170 | util.copyfiles(src, dst) |
|
171 | util.copyfiles(src, dst) | |
171 |
|
172 | |||
172 | src_store = os.path.realpath(src_repo.spath) |
|
173 | src_store = os.path.realpath(src_repo.spath) | |
173 | if not os.path.exists(dest): |
|
174 | if not os.path.exists(dest): | |
174 | os.mkdir(dest) |
|
175 | os.mkdir(dest) | |
175 | try: |
|
176 | try: | |
176 | dest_path = os.path.realpath(os.path.join(dest, ".hg")) |
|
177 | dest_path = os.path.realpath(os.path.join(dest, ".hg")) | |
177 | os.mkdir(dest_path) |
|
178 | os.mkdir(dest_path) | |
178 | except OSError, inst: |
|
179 | except OSError, inst: | |
179 | if inst.errno == errno.EEXIST: |
|
180 | if inst.errno == errno.EEXIST: | |
180 | dir_cleanup.close() |
|
181 | dir_cleanup.close() | |
181 | raise util.Abort(_("destination '%s' already exists") |
|
182 | raise util.Abort(_("destination '%s' already exists") | |
182 | % dest) |
|
183 | % dest) | |
183 | raise |
|
184 | raise | |
184 | if src_repo.spath != src_repo.path: |
|
185 | if src_repo.spath != src_repo.path: | |
185 | # XXX racy |
|
186 | # XXX racy | |
186 | dummy_changelog = os.path.join(dest_path, "00changelog.i") |
|
187 | dummy_changelog = os.path.join(dest_path, "00changelog.i") | |
187 | # copy the dummy changelog |
|
188 | # copy the dummy changelog | |
188 | force_copy(src_repo.join("00changelog.i"), dummy_changelog) |
|
189 | force_copy(src_repo.join("00changelog.i"), dummy_changelog) | |
189 | dest_store = os.path.join(dest_path, "store") |
|
190 | dest_store = os.path.join(dest_path, "store") | |
190 | os.mkdir(dest_store) |
|
191 | os.mkdir(dest_store) | |
191 | else: |
|
192 | else: | |
192 | dest_store = dest_path |
|
193 | dest_store = dest_path | |
193 | # copy the requires file |
|
194 | # copy the requires file | |
194 | force_copy(src_repo.join("requires"), |
|
195 | force_copy(src_repo.join("requires"), | |
195 | os.path.join(dest_path, "requires")) |
|
196 | os.path.join(dest_path, "requires")) | |
196 | # we lock here to avoid premature writing to the target |
|
197 | # we lock here to avoid premature writing to the target | |
197 | dest_lock = lock.lock(os.path.join(dest_store, "lock")) |
|
198 | dest_lock = lock.lock(os.path.join(dest_store, "lock")) | |
198 |
|
199 | |||
199 | files = ("data", |
|
200 | files = ("data", | |
200 | "00manifest.d", "00manifest.i", |
|
201 | "00manifest.d", "00manifest.i", | |
201 | "00changelog.d", "00changelog.i") |
|
202 | "00changelog.d", "00changelog.i") | |
202 | for f in files: |
|
203 | for f in files: | |
203 | src = os.path.join(src_store, f) |
|
204 | src = os.path.join(src_store, f) | |
204 | dst = os.path.join(dest_store, f) |
|
205 | dst = os.path.join(dest_store, f) | |
205 | force_copy(src, dst) |
|
206 | force_copy(src, dst) | |
206 |
|
207 | |||
207 | # we need to re-init the repo after manually copying the data |
|
208 | # we need to re-init the repo after manually copying the data | |
208 | # into it |
|
209 | # into it | |
209 | dest_repo = repository(ui, dest) |
|
210 | dest_repo = repository(ui, dest) | |
210 |
|
211 | |||
211 | else: |
|
212 | else: | |
212 | try: |
|
213 | try: | |
213 | dest_repo = repository(ui, dest, create=True) |
|
214 | dest_repo = repository(ui, dest, create=True) | |
214 | except OSError, inst: |
|
215 | except OSError, inst: | |
215 | if inst.errno == errno.EEXIST: |
|
216 | if inst.errno == errno.EEXIST: | |
216 | dir_cleanup.close() |
|
217 | dir_cleanup.close() | |
217 | raise util.Abort(_("destination '%s' already exists") |
|
218 | raise util.Abort(_("destination '%s' already exists") | |
218 | % dest) |
|
219 | % dest) | |
219 | raise |
|
220 | raise | |
220 |
|
221 | |||
221 | revs = None |
|
222 | revs = None | |
222 | if rev: |
|
223 | if rev: | |
223 | if 'lookup' not in src_repo.capabilities: |
|
224 | if 'lookup' not in src_repo.capabilities: | |
224 | raise util.Abort(_("src repository does not support revision " |
|
225 | raise util.Abort(_("src repository does not support revision " | |
225 | "lookup and so doesn't support clone by " |
|
226 | "lookup and so doesn't support clone by " | |
226 | "revision")) |
|
227 | "revision")) | |
227 | revs = [src_repo.lookup(r) for r in rev] |
|
228 | revs = [src_repo.lookup(r) for r in rev] | |
228 |
|
229 | |||
229 | if dest_repo.local(): |
|
230 | if dest_repo.local(): | |
230 | dest_repo.clone(src_repo, heads=revs, stream=stream) |
|
231 | dest_repo.clone(src_repo, heads=revs, stream=stream) | |
231 | elif src_repo.local(): |
|
232 | elif src_repo.local(): | |
232 | src_repo.push(dest_repo, revs=revs) |
|
233 | src_repo.push(dest_repo, revs=revs) | |
233 | else: |
|
234 | else: | |
234 | raise util.Abort(_("clone from remote to remote not supported")) |
|
235 | raise util.Abort(_("clone from remote to remote not supported")) | |
235 |
|
236 | |||
236 | if dir_cleanup: |
|
237 | if dir_cleanup: | |
237 | dir_cleanup.close() |
|
238 | dir_cleanup.close() | |
238 |
|
239 | |||
239 | if dest_repo.local(): |
|
240 | if dest_repo.local(): | |
240 | fp = dest_repo.opener("hgrc", "w", text=True) |
|
241 | fp = dest_repo.opener("hgrc", "w", text=True) | |
241 | fp.write("[paths]\n") |
|
242 | fp.write("[paths]\n") | |
242 | fp.write("default = %s\n" % abspath) |
|
243 | fp.write("default = %s\n" % abspath) | |
243 | fp.close() |
|
244 | fp.close() | |
244 |
|
245 | |||
245 | if update: |
|
246 | if update: | |
246 | dest_repo.ui.status(_("updating working directory\n")) |
|
247 | dest_repo.ui.status(_("updating working directory\n")) | |
247 |
if not |
|
248 | if update is not True: | |
|
249 | checkout = update | |||
|
250 | elif not checkout: | |||
248 | try: |
|
251 | try: | |
249 | checkout = dest_repo.lookup("default") |
|
252 | checkout = dest_repo.lookup("default") | |
250 | except: |
|
253 | except: | |
251 | checkout = dest_repo.changelog.tip() |
|
254 | checkout = dest_repo.changelog.tip() | |
252 | _update(dest_repo, checkout) |
|
255 | _update(dest_repo, checkout) | |
253 |
|
256 | |||
254 | return src_repo, dest_repo |
|
257 | return src_repo, dest_repo | |
255 | finally: |
|
258 | finally: | |
256 | del src_lock, dest_lock, dir_cleanup |
|
259 | del src_lock, dest_lock, dir_cleanup | |
257 |
|
260 | |||
258 | def _showstats(repo, stats): |
|
261 | def _showstats(repo, stats): | |
259 | stats = ((stats[0], _("updated")), |
|
262 | stats = ((stats[0], _("updated")), | |
260 | (stats[1], _("merged")), |
|
263 | (stats[1], _("merged")), | |
261 | (stats[2], _("removed")), |
|
264 | (stats[2], _("removed")), | |
262 | (stats[3], _("unresolved"))) |
|
265 | (stats[3], _("unresolved"))) | |
263 | note = ", ".join([_("%d files %s") % s for s in stats]) |
|
266 | note = ", ".join([_("%d files %s") % s for s in stats]) | |
264 | repo.ui.status("%s\n" % note) |
|
267 | repo.ui.status("%s\n" % note) | |
265 |
|
268 | |||
266 | def _update(repo, node): return update(repo, node) |
|
269 | def _update(repo, node): return update(repo, node) | |
267 |
|
270 | |||
268 | def update(repo, node): |
|
271 | def update(repo, node): | |
269 | """update the working directory to node, merging linear changes""" |
|
272 | """update the working directory to node, merging linear changes""" | |
270 | pl = repo.parents() |
|
273 | pl = repo.parents() | |
271 | stats = _merge.update(repo, node, False, False, None) |
|
274 | stats = _merge.update(repo, node, False, False, None) | |
272 | _showstats(repo, stats) |
|
275 | _showstats(repo, stats) | |
273 | if stats[3]: |
|
276 | if stats[3]: | |
274 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) |
|
277 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) | |
275 | return stats[3] > 0 |
|
278 | return stats[3] > 0 | |
276 |
|
279 | |||
277 | def clean(repo, node, show_stats=True): |
|
280 | def clean(repo, node, show_stats=True): | |
278 | """forcibly switch the working directory to node, clobbering changes""" |
|
281 | """forcibly switch the working directory to node, clobbering changes""" | |
279 | stats = _merge.update(repo, node, False, True, None) |
|
282 | stats = _merge.update(repo, node, False, True, None) | |
280 | if show_stats: _showstats(repo, stats) |
|
283 | if show_stats: _showstats(repo, stats) | |
281 | return stats[3] > 0 |
|
284 | return stats[3] > 0 | |
282 |
|
285 | |||
283 | def merge(repo, node, force=None, remind=True): |
|
286 | def merge(repo, node, force=None, remind=True): | |
284 | """branch merge with node, resolving changes""" |
|
287 | """branch merge with node, resolving changes""" | |
285 | stats = _merge.update(repo, node, True, force, False) |
|
288 | stats = _merge.update(repo, node, True, force, False) | |
286 | _showstats(repo, stats) |
|
289 | _showstats(repo, stats) | |
287 | if stats[3]: |
|
290 | if stats[3]: | |
288 | pl = repo.parents() |
|
291 | pl = repo.parents() | |
289 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) |
|
292 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) | |
290 | elif remind: |
|
293 | elif remind: | |
291 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) |
|
294 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) | |
292 | return stats[3] > 0 |
|
295 | return stats[3] > 0 | |
293 |
|
296 | |||
294 | def revert(repo, node, choose): |
|
297 | def revert(repo, node, choose): | |
295 | """revert changes to revision in node without updating dirstate""" |
|
298 | """revert changes to revision in node without updating dirstate""" | |
296 | return _merge.update(repo, node, False, True, choose)[3] > 0 |
|
299 | return _merge.update(repo, node, False, True, choose)[3] > 0 | |
297 |
|
300 | |||
298 | def verify(repo): |
|
301 | def verify(repo): | |
299 | """verify the consistency of a repository""" |
|
302 | """verify the consistency of a repository""" | |
300 | return _verify.verify(repo) |
|
303 | return _verify.verify(repo) |
@@ -1,2138 +1,2141 b'' | |||||
1 | # localrepo.py - read/write repository class for mercurial |
|
1 | # localrepo.py - read/write repository class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from node import bin, hex, nullid, nullrev, short |
|
8 | from node import bin, hex, nullid, nullrev, short | |
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | import repo, changegroup |
|
10 | import repo, changegroup | |
11 | import changelog, dirstate, filelog, manifest, context, weakref |
|
11 | import changelog, dirstate, filelog, manifest, context, weakref | |
12 | import lock, transaction, stat, errno, ui |
|
12 | import lock, transaction, stat, errno, ui | |
13 | import os, revlog, time, util, extensions, hook, inspect |
|
13 | import os, revlog, time, util, extensions, hook, inspect | |
14 |
|
14 | |||
15 | class localrepository(repo.repository): |
|
15 | class localrepository(repo.repository): | |
16 | capabilities = util.set(('lookup', 'changegroupsubset')) |
|
16 | capabilities = util.set(('lookup', 'changegroupsubset')) | |
17 | supported = ('revlogv1', 'store') |
|
17 | supported = ('revlogv1', 'store') | |
18 |
|
18 | |||
19 | def __init__(self, parentui, path=None, create=0): |
|
19 | def __init__(self, parentui, path=None, create=0): | |
20 | repo.repository.__init__(self) |
|
20 | repo.repository.__init__(self) | |
21 | self.root = os.path.realpath(path) |
|
21 | self.root = os.path.realpath(path) | |
22 | self.path = os.path.join(self.root, ".hg") |
|
22 | self.path = os.path.join(self.root, ".hg") | |
23 | self.origroot = path |
|
23 | self.origroot = path | |
24 | self.opener = util.opener(self.path) |
|
24 | self.opener = util.opener(self.path) | |
25 | self.wopener = util.opener(self.root) |
|
25 | self.wopener = util.opener(self.root) | |
26 |
|
26 | |||
27 | if not os.path.isdir(self.path): |
|
27 | if not os.path.isdir(self.path): | |
28 | if create: |
|
28 | if create: | |
29 | if not os.path.exists(path): |
|
29 | if not os.path.exists(path): | |
30 | os.mkdir(path) |
|
30 | os.mkdir(path) | |
31 | os.mkdir(self.path) |
|
31 | os.mkdir(self.path) | |
32 | requirements = ["revlogv1"] |
|
32 | requirements = ["revlogv1"] | |
33 | if parentui.configbool('format', 'usestore', True): |
|
33 | if parentui.configbool('format', 'usestore', True): | |
34 | os.mkdir(os.path.join(self.path, "store")) |
|
34 | os.mkdir(os.path.join(self.path, "store")) | |
35 | requirements.append("store") |
|
35 | requirements.append("store") | |
36 | # create an invalid changelog |
|
36 | # create an invalid changelog | |
37 | self.opener("00changelog.i", "a").write( |
|
37 | self.opener("00changelog.i", "a").write( | |
38 | '\0\0\0\2' # represents revlogv2 |
|
38 | '\0\0\0\2' # represents revlogv2 | |
39 | ' dummy changelog to prevent using the old repo layout' |
|
39 | ' dummy changelog to prevent using the old repo layout' | |
40 | ) |
|
40 | ) | |
41 | reqfile = self.opener("requires", "w") |
|
41 | reqfile = self.opener("requires", "w") | |
42 | for r in requirements: |
|
42 | for r in requirements: | |
43 | reqfile.write("%s\n" % r) |
|
43 | reqfile.write("%s\n" % r) | |
44 | reqfile.close() |
|
44 | reqfile.close() | |
45 | else: |
|
45 | else: | |
46 | raise repo.RepoError(_("repository %s not found") % path) |
|
46 | raise repo.RepoError(_("repository %s not found") % path) | |
47 | elif create: |
|
47 | elif create: | |
48 | raise repo.RepoError(_("repository %s already exists") % path) |
|
48 | raise repo.RepoError(_("repository %s already exists") % path) | |
49 | else: |
|
49 | else: | |
50 | # find requirements |
|
50 | # find requirements | |
51 | try: |
|
51 | try: | |
52 | requirements = self.opener("requires").read().splitlines() |
|
52 | requirements = self.opener("requires").read().splitlines() | |
53 | except IOError, inst: |
|
53 | except IOError, inst: | |
54 | if inst.errno != errno.ENOENT: |
|
54 | if inst.errno != errno.ENOENT: | |
55 | raise |
|
55 | raise | |
56 | requirements = [] |
|
56 | requirements = [] | |
57 | # check them |
|
57 | # check them | |
58 | for r in requirements: |
|
58 | for r in requirements: | |
59 | if r not in self.supported: |
|
59 | if r not in self.supported: | |
60 | raise repo.RepoError(_("requirement '%s' not supported") % r) |
|
60 | raise repo.RepoError(_("requirement '%s' not supported") % r) | |
61 |
|
61 | |||
62 | # setup store |
|
62 | # setup store | |
63 | if "store" in requirements: |
|
63 | if "store" in requirements: | |
64 | self.encodefn = util.encodefilename |
|
64 | self.encodefn = util.encodefilename | |
65 | self.decodefn = util.decodefilename |
|
65 | self.decodefn = util.decodefilename | |
66 | self.spath = os.path.join(self.path, "store") |
|
66 | self.spath = os.path.join(self.path, "store") | |
67 | else: |
|
67 | else: | |
68 | self.encodefn = lambda x: x |
|
68 | self.encodefn = lambda x: x | |
69 | self.decodefn = lambda x: x |
|
69 | self.decodefn = lambda x: x | |
70 | self.spath = self.path |
|
70 | self.spath = self.path | |
71 |
|
71 | |||
72 | try: |
|
72 | try: | |
73 | # files in .hg/ will be created using this mode |
|
73 | # files in .hg/ will be created using this mode | |
74 | mode = os.stat(self.spath).st_mode |
|
74 | mode = os.stat(self.spath).st_mode | |
75 | # avoid some useless chmods |
|
75 | # avoid some useless chmods | |
76 | if (0777 & ~util._umask) == (0777 & mode): |
|
76 | if (0777 & ~util._umask) == (0777 & mode): | |
77 | mode = None |
|
77 | mode = None | |
78 | except OSError: |
|
78 | except OSError: | |
79 | mode = None |
|
79 | mode = None | |
80 |
|
80 | |||
81 | self._createmode = mode |
|
81 | self._createmode = mode | |
82 | self.opener.createmode = mode |
|
82 | self.opener.createmode = mode | |
83 | sopener = util.opener(self.spath) |
|
83 | sopener = util.opener(self.spath) | |
84 | sopener.createmode = mode |
|
84 | sopener.createmode = mode | |
85 | self.sopener = util.encodedopener(sopener, self.encodefn) |
|
85 | self.sopener = util.encodedopener(sopener, self.encodefn) | |
86 |
|
86 | |||
87 | self.ui = ui.ui(parentui=parentui) |
|
87 | self.ui = ui.ui(parentui=parentui) | |
88 | try: |
|
88 | try: | |
89 | self.ui.readconfig(self.join("hgrc"), self.root) |
|
89 | self.ui.readconfig(self.join("hgrc"), self.root) | |
90 | extensions.loadall(self.ui) |
|
90 | extensions.loadall(self.ui) | |
91 | except IOError: |
|
91 | except IOError: | |
92 | pass |
|
92 | pass | |
93 |
|
93 | |||
94 | self.tagscache = None |
|
94 | self.tagscache = None | |
95 | self._tagstypecache = None |
|
95 | self._tagstypecache = None | |
96 | self.branchcache = None |
|
96 | self.branchcache = None | |
97 | self._ubranchcache = None # UTF-8 version of branchcache |
|
97 | self._ubranchcache = None # UTF-8 version of branchcache | |
98 | self._branchcachetip = None |
|
98 | self._branchcachetip = None | |
99 | self.nodetagscache = None |
|
99 | self.nodetagscache = None | |
100 | self.filterpats = {} |
|
100 | self.filterpats = {} | |
101 | self._datafilters = {} |
|
101 | self._datafilters = {} | |
102 | self._transref = self._lockref = self._wlockref = None |
|
102 | self._transref = self._lockref = self._wlockref = None | |
103 |
|
103 | |||
104 | def __getattr__(self, name): |
|
104 | def __getattr__(self, name): | |
105 | if name == 'changelog': |
|
105 | if name == 'changelog': | |
106 | self.changelog = changelog.changelog(self.sopener) |
|
106 | self.changelog = changelog.changelog(self.sopener) | |
107 | self.sopener.defversion = self.changelog.version |
|
107 | self.sopener.defversion = self.changelog.version | |
108 | return self.changelog |
|
108 | return self.changelog | |
109 | if name == 'manifest': |
|
109 | if name == 'manifest': | |
110 | self.changelog |
|
110 | self.changelog | |
111 | self.manifest = manifest.manifest(self.sopener) |
|
111 | self.manifest = manifest.manifest(self.sopener) | |
112 | return self.manifest |
|
112 | return self.manifest | |
113 | if name == 'dirstate': |
|
113 | if name == 'dirstate': | |
114 | self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) |
|
114 | self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) | |
115 | return self.dirstate |
|
115 | return self.dirstate | |
116 | else: |
|
116 | else: | |
117 | raise AttributeError, name |
|
117 | raise AttributeError, name | |
118 |
|
118 | |||
119 | def url(self): |
|
119 | def url(self): | |
120 | return 'file:' + self.root |
|
120 | return 'file:' + self.root | |
121 |
|
121 | |||
122 | def hook(self, name, throw=False, **args): |
|
122 | def hook(self, name, throw=False, **args): | |
123 | return hook.hook(self.ui, self, name, throw, **args) |
|
123 | return hook.hook(self.ui, self, name, throw, **args) | |
124 |
|
124 | |||
125 | tag_disallowed = ':\r\n' |
|
125 | tag_disallowed = ':\r\n' | |
126 |
|
126 | |||
127 | def _tag(self, names, node, message, local, user, date, parent=None, |
|
127 | def _tag(self, names, node, message, local, user, date, parent=None, | |
128 | extra={}): |
|
128 | extra={}): | |
129 | use_dirstate = parent is None |
|
129 | use_dirstate = parent is None | |
130 |
|
130 | |||
131 | if isinstance(names, str): |
|
131 | if isinstance(names, str): | |
132 | allchars = names |
|
132 | allchars = names | |
133 | names = (names,) |
|
133 | names = (names,) | |
134 | else: |
|
134 | else: | |
135 | allchars = ''.join(names) |
|
135 | allchars = ''.join(names) | |
136 | for c in self.tag_disallowed: |
|
136 | for c in self.tag_disallowed: | |
137 | if c in allchars: |
|
137 | if c in allchars: | |
138 | raise util.Abort(_('%r cannot be used in a tag name') % c) |
|
138 | raise util.Abort(_('%r cannot be used in a tag name') % c) | |
139 |
|
139 | |||
140 | for name in names: |
|
140 | for name in names: | |
141 | self.hook('pretag', throw=True, node=hex(node), tag=name, |
|
141 | self.hook('pretag', throw=True, node=hex(node), tag=name, | |
142 | local=local) |
|
142 | local=local) | |
143 |
|
143 | |||
144 | def writetags(fp, names, munge, prevtags): |
|
144 | def writetags(fp, names, munge, prevtags): | |
145 | fp.seek(0, 2) |
|
145 | fp.seek(0, 2) | |
146 | if prevtags and prevtags[-1] != '\n': |
|
146 | if prevtags and prevtags[-1] != '\n': | |
147 | fp.write('\n') |
|
147 | fp.write('\n') | |
148 | for name in names: |
|
148 | for name in names: | |
149 | fp.write('%s %s\n' % (hex(node), munge and munge(name) or name)) |
|
149 | fp.write('%s %s\n' % (hex(node), munge and munge(name) or name)) | |
150 | fp.close() |
|
150 | fp.close() | |
151 |
|
151 | |||
152 | prevtags = '' |
|
152 | prevtags = '' | |
153 | if local: |
|
153 | if local: | |
154 | try: |
|
154 | try: | |
155 | fp = self.opener('localtags', 'r+') |
|
155 | fp = self.opener('localtags', 'r+') | |
156 | except IOError, err: |
|
156 | except IOError, err: | |
157 | fp = self.opener('localtags', 'a') |
|
157 | fp = self.opener('localtags', 'a') | |
158 | else: |
|
158 | else: | |
159 | prevtags = fp.read() |
|
159 | prevtags = fp.read() | |
160 |
|
160 | |||
161 | # local tags are stored in the current charset |
|
161 | # local tags are stored in the current charset | |
162 | writetags(fp, names, None, prevtags) |
|
162 | writetags(fp, names, None, prevtags) | |
163 | for name in names: |
|
163 | for name in names: | |
164 | self.hook('tag', node=hex(node), tag=name, local=local) |
|
164 | self.hook('tag', node=hex(node), tag=name, local=local) | |
165 | return |
|
165 | return | |
166 |
|
166 | |||
167 | if use_dirstate: |
|
167 | if use_dirstate: | |
168 | try: |
|
168 | try: | |
169 | fp = self.wfile('.hgtags', 'rb+') |
|
169 | fp = self.wfile('.hgtags', 'rb+') | |
170 | except IOError, err: |
|
170 | except IOError, err: | |
171 | fp = self.wfile('.hgtags', 'ab') |
|
171 | fp = self.wfile('.hgtags', 'ab') | |
172 | else: |
|
172 | else: | |
173 | prevtags = fp.read() |
|
173 | prevtags = fp.read() | |
174 | else: |
|
174 | else: | |
175 | try: |
|
175 | try: | |
176 | prevtags = self.filectx('.hgtags', parent).data() |
|
176 | prevtags = self.filectx('.hgtags', parent).data() | |
177 | except revlog.LookupError: |
|
177 | except revlog.LookupError: | |
178 | pass |
|
178 | pass | |
179 | fp = self.wfile('.hgtags', 'wb') |
|
179 | fp = self.wfile('.hgtags', 'wb') | |
180 | if prevtags: |
|
180 | if prevtags: | |
181 | fp.write(prevtags) |
|
181 | fp.write(prevtags) | |
182 |
|
182 | |||
183 | # committed tags are stored in UTF-8 |
|
183 | # committed tags are stored in UTF-8 | |
184 | writetags(fp, names, util.fromlocal, prevtags) |
|
184 | writetags(fp, names, util.fromlocal, prevtags) | |
185 |
|
185 | |||
186 | if use_dirstate and '.hgtags' not in self.dirstate: |
|
186 | if use_dirstate and '.hgtags' not in self.dirstate: | |
187 | self.add(['.hgtags']) |
|
187 | self.add(['.hgtags']) | |
188 |
|
188 | |||
189 | tagnode = self.commit(['.hgtags'], message, user, date, p1=parent, |
|
189 | tagnode = self.commit(['.hgtags'], message, user, date, p1=parent, | |
190 | extra=extra) |
|
190 | extra=extra) | |
191 |
|
191 | |||
192 | for name in names: |
|
192 | for name in names: | |
193 | self.hook('tag', node=hex(node), tag=name, local=local) |
|
193 | self.hook('tag', node=hex(node), tag=name, local=local) | |
194 |
|
194 | |||
195 | return tagnode |
|
195 | return tagnode | |
196 |
|
196 | |||
197 | def tag(self, names, node, message, local, user, date): |
|
197 | def tag(self, names, node, message, local, user, date): | |
198 | '''tag a revision with one or more symbolic names. |
|
198 | '''tag a revision with one or more symbolic names. | |
199 |
|
199 | |||
200 | names is a list of strings or, when adding a single tag, names may be a |
|
200 | names is a list of strings or, when adding a single tag, names may be a | |
201 | string. |
|
201 | string. | |
202 |
|
202 | |||
203 | if local is True, the tags are stored in a per-repository file. |
|
203 | if local is True, the tags are stored in a per-repository file. | |
204 | otherwise, they are stored in the .hgtags file, and a new |
|
204 | otherwise, they are stored in the .hgtags file, and a new | |
205 | changeset is committed with the change. |
|
205 | changeset is committed with the change. | |
206 |
|
206 | |||
207 | keyword arguments: |
|
207 | keyword arguments: | |
208 |
|
208 | |||
209 | local: whether to store tags in non-version-controlled file |
|
209 | local: whether to store tags in non-version-controlled file | |
210 | (default False) |
|
210 | (default False) | |
211 |
|
211 | |||
212 | message: commit message to use if committing |
|
212 | message: commit message to use if committing | |
213 |
|
213 | |||
214 | user: name of user to use if committing |
|
214 | user: name of user to use if committing | |
215 |
|
215 | |||
216 | date: date tuple to use if committing''' |
|
216 | date: date tuple to use if committing''' | |
217 |
|
217 | |||
218 | for x in self.status()[:5]: |
|
218 | for x in self.status()[:5]: | |
219 | if '.hgtags' in x: |
|
219 | if '.hgtags' in x: | |
220 | raise util.Abort(_('working copy of .hgtags is changed ' |
|
220 | raise util.Abort(_('working copy of .hgtags is changed ' | |
221 | '(please commit .hgtags manually)')) |
|
221 | '(please commit .hgtags manually)')) | |
222 |
|
222 | |||
223 | self._tag(names, node, message, local, user, date) |
|
223 | self._tag(names, node, message, local, user, date) | |
224 |
|
224 | |||
225 | def tags(self): |
|
225 | def tags(self): | |
226 | '''return a mapping of tag to node''' |
|
226 | '''return a mapping of tag to node''' | |
227 | if self.tagscache: |
|
227 | if self.tagscache: | |
228 | return self.tagscache |
|
228 | return self.tagscache | |
229 |
|
229 | |||
230 | globaltags = {} |
|
230 | globaltags = {} | |
231 | tagtypes = {} |
|
231 | tagtypes = {} | |
232 |
|
232 | |||
233 | def readtags(lines, fn, tagtype): |
|
233 | def readtags(lines, fn, tagtype): | |
234 | filetags = {} |
|
234 | filetags = {} | |
235 | count = 0 |
|
235 | count = 0 | |
236 |
|
236 | |||
237 | def warn(msg): |
|
237 | def warn(msg): | |
238 | self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) |
|
238 | self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) | |
239 |
|
239 | |||
240 | for l in lines: |
|
240 | for l in lines: | |
241 | count += 1 |
|
241 | count += 1 | |
242 | if not l: |
|
242 | if not l: | |
243 | continue |
|
243 | continue | |
244 | s = l.split(" ", 1) |
|
244 | s = l.split(" ", 1) | |
245 | if len(s) != 2: |
|
245 | if len(s) != 2: | |
246 | warn(_("cannot parse entry")) |
|
246 | warn(_("cannot parse entry")) | |
247 | continue |
|
247 | continue | |
248 | node, key = s |
|
248 | node, key = s | |
249 | key = util.tolocal(key.strip()) # stored in UTF-8 |
|
249 | key = util.tolocal(key.strip()) # stored in UTF-8 | |
250 | try: |
|
250 | try: | |
251 | bin_n = bin(node) |
|
251 | bin_n = bin(node) | |
252 | except TypeError: |
|
252 | except TypeError: | |
253 | warn(_("node '%s' is not well formed") % node) |
|
253 | warn(_("node '%s' is not well formed") % node) | |
254 | continue |
|
254 | continue | |
255 | if bin_n not in self.changelog.nodemap: |
|
255 | if bin_n not in self.changelog.nodemap: | |
256 | warn(_("tag '%s' refers to unknown node") % key) |
|
256 | warn(_("tag '%s' refers to unknown node") % key) | |
257 | continue |
|
257 | continue | |
258 |
|
258 | |||
259 | h = [] |
|
259 | h = [] | |
260 | if key in filetags: |
|
260 | if key in filetags: | |
261 | n, h = filetags[key] |
|
261 | n, h = filetags[key] | |
262 | h.append(n) |
|
262 | h.append(n) | |
263 | filetags[key] = (bin_n, h) |
|
263 | filetags[key] = (bin_n, h) | |
264 |
|
264 | |||
265 | for k, nh in filetags.items(): |
|
265 | for k, nh in filetags.items(): | |
266 | if k not in globaltags: |
|
266 | if k not in globaltags: | |
267 | globaltags[k] = nh |
|
267 | globaltags[k] = nh | |
268 | tagtypes[k] = tagtype |
|
268 | tagtypes[k] = tagtype | |
269 | continue |
|
269 | continue | |
270 |
|
270 | |||
271 | # we prefer the global tag if: |
|
271 | # we prefer the global tag if: | |
272 | # it supercedes us OR |
|
272 | # it supercedes us OR | |
273 | # mutual supercedes and it has a higher rank |
|
273 | # mutual supercedes and it has a higher rank | |
274 | # otherwise we win because we're tip-most |
|
274 | # otherwise we win because we're tip-most | |
275 | an, ah = nh |
|
275 | an, ah = nh | |
276 | bn, bh = globaltags[k] |
|
276 | bn, bh = globaltags[k] | |
277 | if (bn != an and an in bh and |
|
277 | if (bn != an and an in bh and | |
278 | (bn not in ah or len(bh) > len(ah))): |
|
278 | (bn not in ah or len(bh) > len(ah))): | |
279 | an = bn |
|
279 | an = bn | |
280 | ah.extend([n for n in bh if n not in ah]) |
|
280 | ah.extend([n for n in bh if n not in ah]) | |
281 | globaltags[k] = an, ah |
|
281 | globaltags[k] = an, ah | |
282 | tagtypes[k] = tagtype |
|
282 | tagtypes[k] = tagtype | |
283 |
|
283 | |||
284 | # read the tags file from each head, ending with the tip |
|
284 | # read the tags file from each head, ending with the tip | |
285 | f = None |
|
285 | f = None | |
286 | for rev, node, fnode in self._hgtagsnodes(): |
|
286 | for rev, node, fnode in self._hgtagsnodes(): | |
287 | f = (f and f.filectx(fnode) or |
|
287 | f = (f and f.filectx(fnode) or | |
288 | self.filectx('.hgtags', fileid=fnode)) |
|
288 | self.filectx('.hgtags', fileid=fnode)) | |
289 | readtags(f.data().splitlines(), f, "global") |
|
289 | readtags(f.data().splitlines(), f, "global") | |
290 |
|
290 | |||
291 | try: |
|
291 | try: | |
292 | data = util.fromlocal(self.opener("localtags").read()) |
|
292 | data = util.fromlocal(self.opener("localtags").read()) | |
293 | # localtags are stored in the local character set |
|
293 | # localtags are stored in the local character set | |
294 | # while the internal tag table is stored in UTF-8 |
|
294 | # while the internal tag table is stored in UTF-8 | |
295 | readtags(data.splitlines(), "localtags", "local") |
|
295 | readtags(data.splitlines(), "localtags", "local") | |
296 | except IOError: |
|
296 | except IOError: | |
297 | pass |
|
297 | pass | |
298 |
|
298 | |||
299 | self.tagscache = {} |
|
299 | self.tagscache = {} | |
300 | self._tagstypecache = {} |
|
300 | self._tagstypecache = {} | |
301 | for k,nh in globaltags.items(): |
|
301 | for k,nh in globaltags.items(): | |
302 | n = nh[0] |
|
302 | n = nh[0] | |
303 | if n != nullid: |
|
303 | if n != nullid: | |
304 | self.tagscache[k] = n |
|
304 | self.tagscache[k] = n | |
305 | self._tagstypecache[k] = tagtypes[k] |
|
305 | self._tagstypecache[k] = tagtypes[k] | |
306 | self.tagscache['tip'] = self.changelog.tip() |
|
306 | self.tagscache['tip'] = self.changelog.tip() | |
307 |
|
307 | |||
308 | return self.tagscache |
|
308 | return self.tagscache | |
309 |
|
309 | |||
310 | def tagtype(self, tagname): |
|
310 | def tagtype(self, tagname): | |
311 | ''' |
|
311 | ''' | |
312 | return the type of the given tag. result can be: |
|
312 | return the type of the given tag. result can be: | |
313 |
|
313 | |||
314 | 'local' : a local tag |
|
314 | 'local' : a local tag | |
315 | 'global' : a global tag |
|
315 | 'global' : a global tag | |
316 | None : tag does not exist |
|
316 | None : tag does not exist | |
317 | ''' |
|
317 | ''' | |
318 |
|
318 | |||
319 | self.tags() |
|
319 | self.tags() | |
320 |
|
320 | |||
321 | return self._tagstypecache.get(tagname) |
|
321 | return self._tagstypecache.get(tagname) | |
322 |
|
322 | |||
323 | def _hgtagsnodes(self): |
|
323 | def _hgtagsnodes(self): | |
324 | heads = self.heads() |
|
324 | heads = self.heads() | |
325 | heads.reverse() |
|
325 | heads.reverse() | |
326 | last = {} |
|
326 | last = {} | |
327 | ret = [] |
|
327 | ret = [] | |
328 | for node in heads: |
|
328 | for node in heads: | |
329 | c = self.changectx(node) |
|
329 | c = self.changectx(node) | |
330 | rev = c.rev() |
|
330 | rev = c.rev() | |
331 | try: |
|
331 | try: | |
332 | fnode = c.filenode('.hgtags') |
|
332 | fnode = c.filenode('.hgtags') | |
333 | except revlog.LookupError: |
|
333 | except revlog.LookupError: | |
334 | continue |
|
334 | continue | |
335 | ret.append((rev, node, fnode)) |
|
335 | ret.append((rev, node, fnode)) | |
336 | if fnode in last: |
|
336 | if fnode in last: | |
337 | ret[last[fnode]] = None |
|
337 | ret[last[fnode]] = None | |
338 | last[fnode] = len(ret) - 1 |
|
338 | last[fnode] = len(ret) - 1 | |
339 | return [item for item in ret if item] |
|
339 | return [item for item in ret if item] | |
340 |
|
340 | |||
341 | def tagslist(self): |
|
341 | def tagslist(self): | |
342 | '''return a list of tags ordered by revision''' |
|
342 | '''return a list of tags ordered by revision''' | |
343 | l = [] |
|
343 | l = [] | |
344 | for t, n in self.tags().items(): |
|
344 | for t, n in self.tags().items(): | |
345 | try: |
|
345 | try: | |
346 | r = self.changelog.rev(n) |
|
346 | r = self.changelog.rev(n) | |
347 | except: |
|
347 | except: | |
348 | r = -2 # sort to the beginning of the list if unknown |
|
348 | r = -2 # sort to the beginning of the list if unknown | |
349 | l.append((r, t, n)) |
|
349 | l.append((r, t, n)) | |
350 | l.sort() |
|
350 | l.sort() | |
351 | return [(t, n) for r, t, n in l] |
|
351 | return [(t, n) for r, t, n in l] | |
352 |
|
352 | |||
353 | def nodetags(self, node): |
|
353 | def nodetags(self, node): | |
354 | '''return the tags associated with a node''' |
|
354 | '''return the tags associated with a node''' | |
355 | if not self.nodetagscache: |
|
355 | if not self.nodetagscache: | |
356 | self.nodetagscache = {} |
|
356 | self.nodetagscache = {} | |
357 | for t, n in self.tags().items(): |
|
357 | for t, n in self.tags().items(): | |
358 | self.nodetagscache.setdefault(n, []).append(t) |
|
358 | self.nodetagscache.setdefault(n, []).append(t) | |
359 | return self.nodetagscache.get(node, []) |
|
359 | return self.nodetagscache.get(node, []) | |
360 |
|
360 | |||
361 | def _branchtags(self, partial, lrev): |
|
361 | def _branchtags(self, partial, lrev): | |
362 | tiprev = self.changelog.count() - 1 |
|
362 | tiprev = self.changelog.count() - 1 | |
363 | if lrev != tiprev: |
|
363 | if lrev != tiprev: | |
364 | self._updatebranchcache(partial, lrev+1, tiprev+1) |
|
364 | self._updatebranchcache(partial, lrev+1, tiprev+1) | |
365 | self._writebranchcache(partial, self.changelog.tip(), tiprev) |
|
365 | self._writebranchcache(partial, self.changelog.tip(), tiprev) | |
366 |
|
366 | |||
367 | return partial |
|
367 | return partial | |
368 |
|
368 | |||
369 | def branchtags(self): |
|
369 | def branchtags(self): | |
370 | tip = self.changelog.tip() |
|
370 | tip = self.changelog.tip() | |
371 | if self.branchcache is not None and self._branchcachetip == tip: |
|
371 | if self.branchcache is not None and self._branchcachetip == tip: | |
372 | return self.branchcache |
|
372 | return self.branchcache | |
373 |
|
373 | |||
374 | oldtip = self._branchcachetip |
|
374 | oldtip = self._branchcachetip | |
375 | self._branchcachetip = tip |
|
375 | self._branchcachetip = tip | |
376 | if self.branchcache is None: |
|
376 | if self.branchcache is None: | |
377 | self.branchcache = {} # avoid recursion in changectx |
|
377 | self.branchcache = {} # avoid recursion in changectx | |
378 | else: |
|
378 | else: | |
379 | self.branchcache.clear() # keep using the same dict |
|
379 | self.branchcache.clear() # keep using the same dict | |
380 | if oldtip is None or oldtip not in self.changelog.nodemap: |
|
380 | if oldtip is None or oldtip not in self.changelog.nodemap: | |
381 | partial, last, lrev = self._readbranchcache() |
|
381 | partial, last, lrev = self._readbranchcache() | |
382 | else: |
|
382 | else: | |
383 | lrev = self.changelog.rev(oldtip) |
|
383 | lrev = self.changelog.rev(oldtip) | |
384 | partial = self._ubranchcache |
|
384 | partial = self._ubranchcache | |
385 |
|
385 | |||
386 | self._branchtags(partial, lrev) |
|
386 | self._branchtags(partial, lrev) | |
387 |
|
387 | |||
388 | # the branch cache is stored on disk as UTF-8, but in the local |
|
388 | # the branch cache is stored on disk as UTF-8, but in the local | |
389 | # charset internally |
|
389 | # charset internally | |
390 | for k, v in partial.items(): |
|
390 | for k, v in partial.items(): | |
391 | self.branchcache[util.tolocal(k)] = v |
|
391 | self.branchcache[util.tolocal(k)] = v | |
392 | self._ubranchcache = partial |
|
392 | self._ubranchcache = partial | |
393 | return self.branchcache |
|
393 | return self.branchcache | |
394 |
|
394 | |||
395 | def _readbranchcache(self): |
|
395 | def _readbranchcache(self): | |
396 | partial = {} |
|
396 | partial = {} | |
397 | try: |
|
397 | try: | |
398 | f = self.opener("branch.cache") |
|
398 | f = self.opener("branch.cache") | |
399 | lines = f.read().split('\n') |
|
399 | lines = f.read().split('\n') | |
400 | f.close() |
|
400 | f.close() | |
401 | except (IOError, OSError): |
|
401 | except (IOError, OSError): | |
402 | return {}, nullid, nullrev |
|
402 | return {}, nullid, nullrev | |
403 |
|
403 | |||
404 | try: |
|
404 | try: | |
405 | last, lrev = lines.pop(0).split(" ", 1) |
|
405 | last, lrev = lines.pop(0).split(" ", 1) | |
406 | last, lrev = bin(last), int(lrev) |
|
406 | last, lrev = bin(last), int(lrev) | |
407 | if not (lrev < self.changelog.count() and |
|
407 | if not (lrev < self.changelog.count() and | |
408 | self.changelog.node(lrev) == last): # sanity check |
|
408 | self.changelog.node(lrev) == last): # sanity check | |
409 | # invalidate the cache |
|
409 | # invalidate the cache | |
410 | raise ValueError('invalidating branch cache (tip differs)') |
|
410 | raise ValueError('invalidating branch cache (tip differs)') | |
411 | for l in lines: |
|
411 | for l in lines: | |
412 | if not l: continue |
|
412 | if not l: continue | |
413 | node, label = l.split(" ", 1) |
|
413 | node, label = l.split(" ", 1) | |
414 | partial[label.strip()] = bin(node) |
|
414 | partial[label.strip()] = bin(node) | |
415 | except (KeyboardInterrupt, util.SignalInterrupt): |
|
415 | except (KeyboardInterrupt, util.SignalInterrupt): | |
416 | raise |
|
416 | raise | |
417 | except Exception, inst: |
|
417 | except Exception, inst: | |
418 | if self.ui.debugflag: |
|
418 | if self.ui.debugflag: | |
419 | self.ui.warn(str(inst), '\n') |
|
419 | self.ui.warn(str(inst), '\n') | |
420 | partial, last, lrev = {}, nullid, nullrev |
|
420 | partial, last, lrev = {}, nullid, nullrev | |
421 | return partial, last, lrev |
|
421 | return partial, last, lrev | |
422 |
|
422 | |||
423 | def _writebranchcache(self, branches, tip, tiprev): |
|
423 | def _writebranchcache(self, branches, tip, tiprev): | |
424 | try: |
|
424 | try: | |
425 | f = self.opener("branch.cache", "w", atomictemp=True) |
|
425 | f = self.opener("branch.cache", "w", atomictemp=True) | |
426 | f.write("%s %s\n" % (hex(tip), tiprev)) |
|
426 | f.write("%s %s\n" % (hex(tip), tiprev)) | |
427 | for label, node in branches.iteritems(): |
|
427 | for label, node in branches.iteritems(): | |
428 | f.write("%s %s\n" % (hex(node), label)) |
|
428 | f.write("%s %s\n" % (hex(node), label)) | |
429 | f.rename() |
|
429 | f.rename() | |
430 | except (IOError, OSError): |
|
430 | except (IOError, OSError): | |
431 | pass |
|
431 | pass | |
432 |
|
432 | |||
433 | def _updatebranchcache(self, partial, start, end): |
|
433 | def _updatebranchcache(self, partial, start, end): | |
434 | for r in xrange(start, end): |
|
434 | for r in xrange(start, end): | |
435 | c = self.changectx(r) |
|
435 | c = self.changectx(r) | |
436 | b = c.branch() |
|
436 | b = c.branch() | |
437 | partial[b] = c.node() |
|
437 | partial[b] = c.node() | |
438 |
|
438 | |||
439 | def lookup(self, key): |
|
439 | def lookup(self, key): | |
440 | if key == '.': |
|
440 | if key == '.': | |
441 | key, second = self.dirstate.parents() |
|
441 | key, second = self.dirstate.parents() | |
442 | if key == nullid: |
|
442 | if key == nullid: | |
443 | raise repo.RepoError(_("no revision checked out")) |
|
443 | raise repo.RepoError(_("no revision checked out")) | |
444 | if second != nullid: |
|
444 | if second != nullid: | |
445 | self.ui.warn(_("warning: working directory has two parents, " |
|
445 | self.ui.warn(_("warning: working directory has two parents, " | |
446 | "tag '.' uses the first\n")) |
|
446 | "tag '.' uses the first\n")) | |
447 | elif key == 'null': |
|
447 | elif key == 'null': | |
448 | return nullid |
|
448 | return nullid | |
449 | n = self.changelog._match(key) |
|
449 | n = self.changelog._match(key) | |
450 | if n: |
|
450 | if n: | |
451 | return n |
|
451 | return n | |
452 | if key in self.tags(): |
|
452 | if key in self.tags(): | |
453 | return self.tags()[key] |
|
453 | return self.tags()[key] | |
454 | if key in self.branchtags(): |
|
454 | if key in self.branchtags(): | |
455 | return self.branchtags()[key] |
|
455 | return self.branchtags()[key] | |
456 | n = self.changelog._partialmatch(key) |
|
456 | n = self.changelog._partialmatch(key) | |
457 | if n: |
|
457 | if n: | |
458 | return n |
|
458 | return n | |
459 | try: |
|
459 | try: | |
460 | if len(key) == 20: |
|
460 | if len(key) == 20: | |
461 | key = hex(key) |
|
461 | key = hex(key) | |
462 | except: |
|
462 | except: | |
463 | pass |
|
463 | pass | |
464 | raise repo.RepoError(_("unknown revision '%s'") % key) |
|
464 | raise repo.RepoError(_("unknown revision '%s'") % key) | |
465 |
|
465 | |||
466 | def local(self): |
|
466 | def local(self): | |
467 | return True |
|
467 | return True | |
468 |
|
468 | |||
469 | def join(self, f): |
|
469 | def join(self, f): | |
470 | return os.path.join(self.path, f) |
|
470 | return os.path.join(self.path, f) | |
471 |
|
471 | |||
472 | def sjoin(self, f): |
|
472 | def sjoin(self, f): | |
473 | f = self.encodefn(f) |
|
473 | f = self.encodefn(f) | |
474 | return os.path.join(self.spath, f) |
|
474 | return os.path.join(self.spath, f) | |
475 |
|
475 | |||
476 | def wjoin(self, f): |
|
476 | def wjoin(self, f): | |
477 | return os.path.join(self.root, f) |
|
477 | return os.path.join(self.root, f) | |
478 |
|
478 | |||
|
479 | def rjoin(self, f): | |||
|
480 | return os.path.join(self.root, util.pconvert(f)) | |||
|
481 | ||||
479 | def file(self, f): |
|
482 | def file(self, f): | |
480 | if f[0] == '/': |
|
483 | if f[0] == '/': | |
481 | f = f[1:] |
|
484 | f = f[1:] | |
482 | return filelog.filelog(self.sopener, f) |
|
485 | return filelog.filelog(self.sopener, f) | |
483 |
|
486 | |||
484 | def changectx(self, changeid=None): |
|
487 | def changectx(self, changeid=None): | |
485 | return context.changectx(self, changeid) |
|
488 | return context.changectx(self, changeid) | |
486 |
|
489 | |||
487 | def workingctx(self): |
|
490 | def workingctx(self): | |
488 | return context.workingctx(self) |
|
491 | return context.workingctx(self) | |
489 |
|
492 | |||
490 | def parents(self, changeid=None): |
|
493 | def parents(self, changeid=None): | |
491 | ''' |
|
494 | ''' | |
492 | get list of changectxs for parents of changeid or working directory |
|
495 | get list of changectxs for parents of changeid or working directory | |
493 | ''' |
|
496 | ''' | |
494 | if changeid is None: |
|
497 | if changeid is None: | |
495 | pl = self.dirstate.parents() |
|
498 | pl = self.dirstate.parents() | |
496 | else: |
|
499 | else: | |
497 | n = self.changelog.lookup(changeid) |
|
500 | n = self.changelog.lookup(changeid) | |
498 | pl = self.changelog.parents(n) |
|
501 | pl = self.changelog.parents(n) | |
499 | if pl[1] == nullid: |
|
502 | if pl[1] == nullid: | |
500 | return [self.changectx(pl[0])] |
|
503 | return [self.changectx(pl[0])] | |
501 | return [self.changectx(pl[0]), self.changectx(pl[1])] |
|
504 | return [self.changectx(pl[0]), self.changectx(pl[1])] | |
502 |
|
505 | |||
503 | def filectx(self, path, changeid=None, fileid=None): |
|
506 | def filectx(self, path, changeid=None, fileid=None): | |
504 | """changeid can be a changeset revision, node, or tag. |
|
507 | """changeid can be a changeset revision, node, or tag. | |
505 | fileid can be a file revision or node.""" |
|
508 | fileid can be a file revision or node.""" | |
506 | return context.filectx(self, path, changeid, fileid) |
|
509 | return context.filectx(self, path, changeid, fileid) | |
507 |
|
510 | |||
508 | def getcwd(self): |
|
511 | def getcwd(self): | |
509 | return self.dirstate.getcwd() |
|
512 | return self.dirstate.getcwd() | |
510 |
|
513 | |||
511 | def pathto(self, f, cwd=None): |
|
514 | def pathto(self, f, cwd=None): | |
512 | return self.dirstate.pathto(f, cwd) |
|
515 | return self.dirstate.pathto(f, cwd) | |
513 |
|
516 | |||
514 | def wfile(self, f, mode='r'): |
|
517 | def wfile(self, f, mode='r'): | |
515 | return self.wopener(f, mode) |
|
518 | return self.wopener(f, mode) | |
516 |
|
519 | |||
517 | def _link(self, f): |
|
520 | def _link(self, f): | |
518 | return os.path.islink(self.wjoin(f)) |
|
521 | return os.path.islink(self.wjoin(f)) | |
519 |
|
522 | |||
520 | def _filter(self, filter, filename, data): |
|
523 | def _filter(self, filter, filename, data): | |
521 | if filter not in self.filterpats: |
|
524 | if filter not in self.filterpats: | |
522 | l = [] |
|
525 | l = [] | |
523 | for pat, cmd in self.ui.configitems(filter): |
|
526 | for pat, cmd in self.ui.configitems(filter): | |
524 | mf = util.matcher(self.root, "", [pat], [], [])[1] |
|
527 | mf = util.matcher(self.root, "", [pat], [], [])[1] | |
525 | fn = None |
|
528 | fn = None | |
526 | params = cmd |
|
529 | params = cmd | |
527 | for name, filterfn in self._datafilters.iteritems(): |
|
530 | for name, filterfn in self._datafilters.iteritems(): | |
528 | if cmd.startswith(name): |
|
531 | if cmd.startswith(name): | |
529 | fn = filterfn |
|
532 | fn = filterfn | |
530 | params = cmd[len(name):].lstrip() |
|
533 | params = cmd[len(name):].lstrip() | |
531 | break |
|
534 | break | |
532 | if not fn: |
|
535 | if not fn: | |
533 | fn = lambda s, c, **kwargs: util.filter(s, c) |
|
536 | fn = lambda s, c, **kwargs: util.filter(s, c) | |
534 | # Wrap old filters not supporting keyword arguments |
|
537 | # Wrap old filters not supporting keyword arguments | |
535 | if not inspect.getargspec(fn)[2]: |
|
538 | if not inspect.getargspec(fn)[2]: | |
536 | oldfn = fn |
|
539 | oldfn = fn | |
537 | fn = lambda s, c, **kwargs: oldfn(s, c) |
|
540 | fn = lambda s, c, **kwargs: oldfn(s, c) | |
538 | l.append((mf, fn, params)) |
|
541 | l.append((mf, fn, params)) | |
539 | self.filterpats[filter] = l |
|
542 | self.filterpats[filter] = l | |
540 |
|
543 | |||
541 | for mf, fn, cmd in self.filterpats[filter]: |
|
544 | for mf, fn, cmd in self.filterpats[filter]: | |
542 | if mf(filename): |
|
545 | if mf(filename): | |
543 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) |
|
546 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) | |
544 | data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) |
|
547 | data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) | |
545 | break |
|
548 | break | |
546 |
|
549 | |||
547 | return data |
|
550 | return data | |
548 |
|
551 | |||
549 | def adddatafilter(self, name, filter): |
|
552 | def adddatafilter(self, name, filter): | |
550 | self._datafilters[name] = filter |
|
553 | self._datafilters[name] = filter | |
551 |
|
554 | |||
552 | def wread(self, filename): |
|
555 | def wread(self, filename): | |
553 | if self._link(filename): |
|
556 | if self._link(filename): | |
554 | data = os.readlink(self.wjoin(filename)) |
|
557 | data = os.readlink(self.wjoin(filename)) | |
555 | else: |
|
558 | else: | |
556 | data = self.wopener(filename, 'r').read() |
|
559 | data = self.wopener(filename, 'r').read() | |
557 | return self._filter("encode", filename, data) |
|
560 | return self._filter("encode", filename, data) | |
558 |
|
561 | |||
559 | def wwrite(self, filename, data, flags): |
|
562 | def wwrite(self, filename, data, flags): | |
560 | data = self._filter("decode", filename, data) |
|
563 | data = self._filter("decode", filename, data) | |
561 | try: |
|
564 | try: | |
562 | os.unlink(self.wjoin(filename)) |
|
565 | os.unlink(self.wjoin(filename)) | |
563 | except OSError: |
|
566 | except OSError: | |
564 | pass |
|
567 | pass | |
565 | self.wopener(filename, 'w').write(data) |
|
568 | self.wopener(filename, 'w').write(data) | |
566 | util.set_flags(self.wjoin(filename), flags) |
|
569 | util.set_flags(self.wjoin(filename), flags) | |
567 |
|
570 | |||
568 | def wwritedata(self, filename, data): |
|
571 | def wwritedata(self, filename, data): | |
569 | return self._filter("decode", filename, data) |
|
572 | return self._filter("decode", filename, data) | |
570 |
|
573 | |||
571 | def transaction(self): |
|
574 | def transaction(self): | |
572 | if self._transref and self._transref(): |
|
575 | if self._transref and self._transref(): | |
573 | return self._transref().nest() |
|
576 | return self._transref().nest() | |
574 |
|
577 | |||
575 | # abort here if the journal already exists |
|
578 | # abort here if the journal already exists | |
576 | if os.path.exists(self.sjoin("journal")): |
|
579 | if os.path.exists(self.sjoin("journal")): | |
577 | raise repo.RepoError(_("journal already exists - run hg recover")) |
|
580 | raise repo.RepoError(_("journal already exists - run hg recover")) | |
578 |
|
581 | |||
579 | # save dirstate for rollback |
|
582 | # save dirstate for rollback | |
580 | try: |
|
583 | try: | |
581 | ds = self.opener("dirstate").read() |
|
584 | ds = self.opener("dirstate").read() | |
582 | except IOError: |
|
585 | except IOError: | |
583 | ds = "" |
|
586 | ds = "" | |
584 | self.opener("journal.dirstate", "w").write(ds) |
|
587 | self.opener("journal.dirstate", "w").write(ds) | |
585 | self.opener("journal.branch", "w").write(self.dirstate.branch()) |
|
588 | self.opener("journal.branch", "w").write(self.dirstate.branch()) | |
586 |
|
589 | |||
587 | renames = [(self.sjoin("journal"), self.sjoin("undo")), |
|
590 | renames = [(self.sjoin("journal"), self.sjoin("undo")), | |
588 | (self.join("journal.dirstate"), self.join("undo.dirstate")), |
|
591 | (self.join("journal.dirstate"), self.join("undo.dirstate")), | |
589 | (self.join("journal.branch"), self.join("undo.branch"))] |
|
592 | (self.join("journal.branch"), self.join("undo.branch"))] | |
590 | tr = transaction.transaction(self.ui.warn, self.sopener, |
|
593 | tr = transaction.transaction(self.ui.warn, self.sopener, | |
591 | self.sjoin("journal"), |
|
594 | self.sjoin("journal"), | |
592 | aftertrans(renames), |
|
595 | aftertrans(renames), | |
593 | self._createmode) |
|
596 | self._createmode) | |
594 | self._transref = weakref.ref(tr) |
|
597 | self._transref = weakref.ref(tr) | |
595 | return tr |
|
598 | return tr | |
596 |
|
599 | |||
597 | def recover(self): |
|
600 | def recover(self): | |
598 | l = self.lock() |
|
601 | l = self.lock() | |
599 | try: |
|
602 | try: | |
600 | if os.path.exists(self.sjoin("journal")): |
|
603 | if os.path.exists(self.sjoin("journal")): | |
601 | self.ui.status(_("rolling back interrupted transaction\n")) |
|
604 | self.ui.status(_("rolling back interrupted transaction\n")) | |
602 | transaction.rollback(self.sopener, self.sjoin("journal")) |
|
605 | transaction.rollback(self.sopener, self.sjoin("journal")) | |
603 | self.invalidate() |
|
606 | self.invalidate() | |
604 | return True |
|
607 | return True | |
605 | else: |
|
608 | else: | |
606 | self.ui.warn(_("no interrupted transaction available\n")) |
|
609 | self.ui.warn(_("no interrupted transaction available\n")) | |
607 | return False |
|
610 | return False | |
608 | finally: |
|
611 | finally: | |
609 | del l |
|
612 | del l | |
610 |
|
613 | |||
611 | def rollback(self): |
|
614 | def rollback(self): | |
612 | wlock = lock = None |
|
615 | wlock = lock = None | |
613 | try: |
|
616 | try: | |
614 | wlock = self.wlock() |
|
617 | wlock = self.wlock() | |
615 | lock = self.lock() |
|
618 | lock = self.lock() | |
616 | if os.path.exists(self.sjoin("undo")): |
|
619 | if os.path.exists(self.sjoin("undo")): | |
617 | self.ui.status(_("rolling back last transaction\n")) |
|
620 | self.ui.status(_("rolling back last transaction\n")) | |
618 | transaction.rollback(self.sopener, self.sjoin("undo")) |
|
621 | transaction.rollback(self.sopener, self.sjoin("undo")) | |
619 | util.rename(self.join("undo.dirstate"), self.join("dirstate")) |
|
622 | util.rename(self.join("undo.dirstate"), self.join("dirstate")) | |
620 | try: |
|
623 | try: | |
621 | branch = self.opener("undo.branch").read() |
|
624 | branch = self.opener("undo.branch").read() | |
622 | self.dirstate.setbranch(branch) |
|
625 | self.dirstate.setbranch(branch) | |
623 | except IOError: |
|
626 | except IOError: | |
624 | self.ui.warn(_("Named branch could not be reset, " |
|
627 | self.ui.warn(_("Named branch could not be reset, " | |
625 | "current branch still is: %s\n") |
|
628 | "current branch still is: %s\n") | |
626 | % util.tolocal(self.dirstate.branch())) |
|
629 | % util.tolocal(self.dirstate.branch())) | |
627 | self.invalidate() |
|
630 | self.invalidate() | |
628 | self.dirstate.invalidate() |
|
631 | self.dirstate.invalidate() | |
629 | else: |
|
632 | else: | |
630 | self.ui.warn(_("no rollback information available\n")) |
|
633 | self.ui.warn(_("no rollback information available\n")) | |
631 | finally: |
|
634 | finally: | |
632 | del lock, wlock |
|
635 | del lock, wlock | |
633 |
|
636 | |||
634 | def invalidate(self): |
|
637 | def invalidate(self): | |
635 | for a in "changelog manifest".split(): |
|
638 | for a in "changelog manifest".split(): | |
636 | if a in self.__dict__: |
|
639 | if a in self.__dict__: | |
637 | delattr(self, a) |
|
640 | delattr(self, a) | |
638 | self.tagscache = None |
|
641 | self.tagscache = None | |
639 | self._tagstypecache = None |
|
642 | self._tagstypecache = None | |
640 | self.nodetagscache = None |
|
643 | self.nodetagscache = None | |
641 | self.branchcache = None |
|
644 | self.branchcache = None | |
642 | self._ubranchcache = None |
|
645 | self._ubranchcache = None | |
643 | self._branchcachetip = None |
|
646 | self._branchcachetip = None | |
644 |
|
647 | |||
645 | def _lock(self, lockname, wait, releasefn, acquirefn, desc): |
|
648 | def _lock(self, lockname, wait, releasefn, acquirefn, desc): | |
646 | try: |
|
649 | try: | |
647 | l = lock.lock(lockname, 0, releasefn, desc=desc) |
|
650 | l = lock.lock(lockname, 0, releasefn, desc=desc) | |
648 | except lock.LockHeld, inst: |
|
651 | except lock.LockHeld, inst: | |
649 | if not wait: |
|
652 | if not wait: | |
650 | raise |
|
653 | raise | |
651 | self.ui.warn(_("waiting for lock on %s held by %r\n") % |
|
654 | self.ui.warn(_("waiting for lock on %s held by %r\n") % | |
652 | (desc, inst.locker)) |
|
655 | (desc, inst.locker)) | |
653 | # default to 600 seconds timeout |
|
656 | # default to 600 seconds timeout | |
654 | l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), |
|
657 | l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), | |
655 | releasefn, desc=desc) |
|
658 | releasefn, desc=desc) | |
656 | if acquirefn: |
|
659 | if acquirefn: | |
657 | acquirefn() |
|
660 | acquirefn() | |
658 | return l |
|
661 | return l | |
659 |
|
662 | |||
660 | def lock(self, wait=True): |
|
663 | def lock(self, wait=True): | |
661 | if self._lockref and self._lockref(): |
|
664 | if self._lockref and self._lockref(): | |
662 | return self._lockref() |
|
665 | return self._lockref() | |
663 |
|
666 | |||
664 | l = self._lock(self.sjoin("lock"), wait, None, self.invalidate, |
|
667 | l = self._lock(self.sjoin("lock"), wait, None, self.invalidate, | |
665 | _('repository %s') % self.origroot) |
|
668 | _('repository %s') % self.origroot) | |
666 | self._lockref = weakref.ref(l) |
|
669 | self._lockref = weakref.ref(l) | |
667 | return l |
|
670 | return l | |
668 |
|
671 | |||
669 | def wlock(self, wait=True): |
|
672 | def wlock(self, wait=True): | |
670 | if self._wlockref and self._wlockref(): |
|
673 | if self._wlockref and self._wlockref(): | |
671 | return self._wlockref() |
|
674 | return self._wlockref() | |
672 |
|
675 | |||
673 | l = self._lock(self.join("wlock"), wait, self.dirstate.write, |
|
676 | l = self._lock(self.join("wlock"), wait, self.dirstate.write, | |
674 | self.dirstate.invalidate, _('working directory of %s') % |
|
677 | self.dirstate.invalidate, _('working directory of %s') % | |
675 | self.origroot) |
|
678 | self.origroot) | |
676 | self._wlockref = weakref.ref(l) |
|
679 | self._wlockref = weakref.ref(l) | |
677 | return l |
|
680 | return l | |
678 |
|
681 | |||
679 | def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist): |
|
682 | def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist): | |
680 | """ |
|
683 | """ | |
681 | commit an individual file as part of a larger transaction |
|
684 | commit an individual file as part of a larger transaction | |
682 | """ |
|
685 | """ | |
683 |
|
686 | |||
684 | t = self.wread(fn) |
|
687 | t = self.wread(fn) | |
685 | fl = self.file(fn) |
|
688 | fl = self.file(fn) | |
686 | fp1 = manifest1.get(fn, nullid) |
|
689 | fp1 = manifest1.get(fn, nullid) | |
687 | fp2 = manifest2.get(fn, nullid) |
|
690 | fp2 = manifest2.get(fn, nullid) | |
688 |
|
691 | |||
689 | meta = {} |
|
692 | meta = {} | |
690 | cp = self.dirstate.copied(fn) |
|
693 | cp = self.dirstate.copied(fn) | |
691 | if cp: |
|
694 | if cp: | |
692 | # Mark the new revision of this file as a copy of another |
|
695 | # Mark the new revision of this file as a copy of another | |
693 | # file. This copy data will effectively act as a parent |
|
696 | # file. This copy data will effectively act as a parent | |
694 | # of this new revision. If this is a merge, the first |
|
697 | # of this new revision. If this is a merge, the first | |
695 | # parent will be the nullid (meaning "look up the copy data") |
|
698 | # parent will be the nullid (meaning "look up the copy data") | |
696 | # and the second one will be the other parent. For example: |
|
699 | # and the second one will be the other parent. For example: | |
697 | # |
|
700 | # | |
698 | # 0 --- 1 --- 3 rev1 changes file foo |
|
701 | # 0 --- 1 --- 3 rev1 changes file foo | |
699 | # \ / rev2 renames foo to bar and changes it |
|
702 | # \ / rev2 renames foo to bar and changes it | |
700 | # \- 2 -/ rev3 should have bar with all changes and |
|
703 | # \- 2 -/ rev3 should have bar with all changes and | |
701 | # should record that bar descends from |
|
704 | # should record that bar descends from | |
702 | # bar in rev2 and foo in rev1 |
|
705 | # bar in rev2 and foo in rev1 | |
703 | # |
|
706 | # | |
704 | # this allows this merge to succeed: |
|
707 | # this allows this merge to succeed: | |
705 | # |
|
708 | # | |
706 | # 0 --- 1 --- 3 rev4 reverts the content change from rev2 |
|
709 | # 0 --- 1 --- 3 rev4 reverts the content change from rev2 | |
707 | # \ / merging rev3 and rev4 should use bar@rev2 |
|
710 | # \ / merging rev3 and rev4 should use bar@rev2 | |
708 | # \- 2 --- 4 as the merge base |
|
711 | # \- 2 --- 4 as the merge base | |
709 | # |
|
712 | # | |
710 | meta["copy"] = cp |
|
713 | meta["copy"] = cp | |
711 | if not manifest2: # not a branch merge |
|
714 | if not manifest2: # not a branch merge | |
712 | meta["copyrev"] = hex(manifest1.get(cp, nullid)) |
|
715 | meta["copyrev"] = hex(manifest1.get(cp, nullid)) | |
713 | fp2 = nullid |
|
716 | fp2 = nullid | |
714 | elif fp2 != nullid: # copied on remote side |
|
717 | elif fp2 != nullid: # copied on remote side | |
715 | meta["copyrev"] = hex(manifest1.get(cp, nullid)) |
|
718 | meta["copyrev"] = hex(manifest1.get(cp, nullid)) | |
716 | elif fp1 != nullid: # copied on local side, reversed |
|
719 | elif fp1 != nullid: # copied on local side, reversed | |
717 | meta["copyrev"] = hex(manifest2.get(cp)) |
|
720 | meta["copyrev"] = hex(manifest2.get(cp)) | |
718 | fp2 = fp1 |
|
721 | fp2 = fp1 | |
719 | elif cp in manifest2: # directory rename on local side |
|
722 | elif cp in manifest2: # directory rename on local side | |
720 | meta["copyrev"] = hex(manifest2[cp]) |
|
723 | meta["copyrev"] = hex(manifest2[cp]) | |
721 | else: # directory rename on remote side |
|
724 | else: # directory rename on remote side | |
722 | meta["copyrev"] = hex(manifest1.get(cp, nullid)) |
|
725 | meta["copyrev"] = hex(manifest1.get(cp, nullid)) | |
723 | self.ui.debug(_(" %s: copy %s:%s\n") % |
|
726 | self.ui.debug(_(" %s: copy %s:%s\n") % | |
724 | (fn, cp, meta["copyrev"])) |
|
727 | (fn, cp, meta["copyrev"])) | |
725 | fp1 = nullid |
|
728 | fp1 = nullid | |
726 | elif fp2 != nullid: |
|
729 | elif fp2 != nullid: | |
727 | # is one parent an ancestor of the other? |
|
730 | # is one parent an ancestor of the other? | |
728 | fpa = fl.ancestor(fp1, fp2) |
|
731 | fpa = fl.ancestor(fp1, fp2) | |
729 | if fpa == fp1: |
|
732 | if fpa == fp1: | |
730 | fp1, fp2 = fp2, nullid |
|
733 | fp1, fp2 = fp2, nullid | |
731 | elif fpa == fp2: |
|
734 | elif fpa == fp2: | |
732 | fp2 = nullid |
|
735 | fp2 = nullid | |
733 |
|
736 | |||
734 | # is the file unmodified from the parent? report existing entry |
|
737 | # is the file unmodified from the parent? report existing entry | |
735 | if fp2 == nullid and not fl.cmp(fp1, t) and not meta: |
|
738 | if fp2 == nullid and not fl.cmp(fp1, t) and not meta: | |
736 | return fp1 |
|
739 | return fp1 | |
737 |
|
740 | |||
738 | changelist.append(fn) |
|
741 | changelist.append(fn) | |
739 | return fl.add(t, meta, tr, linkrev, fp1, fp2) |
|
742 | return fl.add(t, meta, tr, linkrev, fp1, fp2) | |
740 |
|
743 | |||
741 | def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}): |
|
744 | def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}): | |
742 | if p1 is None: |
|
745 | if p1 is None: | |
743 | p1, p2 = self.dirstate.parents() |
|
746 | p1, p2 = self.dirstate.parents() | |
744 | return self.commit(files=files, text=text, user=user, date=date, |
|
747 | return self.commit(files=files, text=text, user=user, date=date, | |
745 | p1=p1, p2=p2, extra=extra, empty_ok=True) |
|
748 | p1=p1, p2=p2, extra=extra, empty_ok=True) | |
746 |
|
749 | |||
747 | def commit(self, files=None, text="", user=None, date=None, |
|
750 | def commit(self, files=None, text="", user=None, date=None, | |
748 | match=util.always, force=False, force_editor=False, |
|
751 | match=util.always, force=False, force_editor=False, | |
749 | p1=None, p2=None, extra={}, empty_ok=False): |
|
752 | p1=None, p2=None, extra={}, empty_ok=False): | |
750 | wlock = lock = tr = None |
|
753 | wlock = lock = tr = None | |
751 | valid = 0 # don't save the dirstate if this isn't set |
|
754 | valid = 0 # don't save the dirstate if this isn't set | |
752 | if files: |
|
755 | if files: | |
753 | files = util.unique(files) |
|
756 | files = util.unique(files) | |
754 | try: |
|
757 | try: | |
755 | wlock = self.wlock() |
|
758 | wlock = self.wlock() | |
756 | lock = self.lock() |
|
759 | lock = self.lock() | |
757 | commit = [] |
|
760 | commit = [] | |
758 | remove = [] |
|
761 | remove = [] | |
759 | changed = [] |
|
762 | changed = [] | |
760 | use_dirstate = (p1 is None) # not rawcommit |
|
763 | use_dirstate = (p1 is None) # not rawcommit | |
761 | extra = extra.copy() |
|
764 | extra = extra.copy() | |
762 |
|
765 | |||
763 | if use_dirstate: |
|
766 | if use_dirstate: | |
764 | if files: |
|
767 | if files: | |
765 | for f in files: |
|
768 | for f in files: | |
766 | s = self.dirstate[f] |
|
769 | s = self.dirstate[f] | |
767 | if s in 'nma': |
|
770 | if s in 'nma': | |
768 | commit.append(f) |
|
771 | commit.append(f) | |
769 | elif s == 'r': |
|
772 | elif s == 'r': | |
770 | remove.append(f) |
|
773 | remove.append(f) | |
771 | else: |
|
774 | else: | |
772 | self.ui.warn(_("%s not tracked!\n") % f) |
|
775 | self.ui.warn(_("%s not tracked!\n") % f) | |
773 | else: |
|
776 | else: | |
774 | changes = self.status(match=match)[:5] |
|
777 | changes = self.status(match=match)[:5] | |
775 | modified, added, removed, deleted, unknown = changes |
|
778 | modified, added, removed, deleted, unknown = changes | |
776 | commit = modified + added |
|
779 | commit = modified + added | |
777 | remove = removed |
|
780 | remove = removed | |
778 | else: |
|
781 | else: | |
779 | commit = files |
|
782 | commit = files | |
780 |
|
783 | |||
781 | if use_dirstate: |
|
784 | if use_dirstate: | |
782 | p1, p2 = self.dirstate.parents() |
|
785 | p1, p2 = self.dirstate.parents() | |
783 | update_dirstate = True |
|
786 | update_dirstate = True | |
784 |
|
787 | |||
785 | if (not force and p2 != nullid and |
|
788 | if (not force and p2 != nullid and | |
786 | (files or match != util.always)): |
|
789 | (files or match != util.always)): | |
787 | raise util.Abort(_('cannot partially commit a merge ' |
|
790 | raise util.Abort(_('cannot partially commit a merge ' | |
788 | '(do not specify files or patterns)')) |
|
791 | '(do not specify files or patterns)')) | |
789 | else: |
|
792 | else: | |
790 | p1, p2 = p1, p2 or nullid |
|
793 | p1, p2 = p1, p2 or nullid | |
791 | update_dirstate = (self.dirstate.parents()[0] == p1) |
|
794 | update_dirstate = (self.dirstate.parents()[0] == p1) | |
792 |
|
795 | |||
793 | c1 = self.changelog.read(p1) |
|
796 | c1 = self.changelog.read(p1) | |
794 | c2 = self.changelog.read(p2) |
|
797 | c2 = self.changelog.read(p2) | |
795 | m1 = self.manifest.read(c1[0]).copy() |
|
798 | m1 = self.manifest.read(c1[0]).copy() | |
796 | m2 = self.manifest.read(c2[0]) |
|
799 | m2 = self.manifest.read(c2[0]) | |
797 |
|
800 | |||
798 | if use_dirstate: |
|
801 | if use_dirstate: | |
799 | branchname = self.workingctx().branch() |
|
802 | branchname = self.workingctx().branch() | |
800 | try: |
|
803 | try: | |
801 | branchname = branchname.decode('UTF-8').encode('UTF-8') |
|
804 | branchname = branchname.decode('UTF-8').encode('UTF-8') | |
802 | except UnicodeDecodeError: |
|
805 | except UnicodeDecodeError: | |
803 | raise util.Abort(_('branch name not in UTF-8!')) |
|
806 | raise util.Abort(_('branch name not in UTF-8!')) | |
804 | else: |
|
807 | else: | |
805 | branchname = "" |
|
808 | branchname = "" | |
806 |
|
809 | |||
807 | if use_dirstate: |
|
810 | if use_dirstate: | |
808 | oldname = c1[5].get("branch") # stored in UTF-8 |
|
811 | oldname = c1[5].get("branch") # stored in UTF-8 | |
809 | if (not commit and not remove and not force and p2 == nullid |
|
812 | if (not commit and not remove and not force and p2 == nullid | |
810 | and branchname == oldname): |
|
813 | and branchname == oldname): | |
811 | self.ui.status(_("nothing changed\n")) |
|
814 | self.ui.status(_("nothing changed\n")) | |
812 | return None |
|
815 | return None | |
813 |
|
816 | |||
814 | xp1 = hex(p1) |
|
817 | xp1 = hex(p1) | |
815 | if p2 == nullid: xp2 = '' |
|
818 | if p2 == nullid: xp2 = '' | |
816 | else: xp2 = hex(p2) |
|
819 | else: xp2 = hex(p2) | |
817 |
|
820 | |||
818 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) |
|
821 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) | |
819 |
|
822 | |||
820 | tr = self.transaction() |
|
823 | tr = self.transaction() | |
821 | trp = weakref.proxy(tr) |
|
824 | trp = weakref.proxy(tr) | |
822 |
|
825 | |||
823 | # check in files |
|
826 | # check in files | |
824 | new = {} |
|
827 | new = {} | |
825 | linkrev = self.changelog.count() |
|
828 | linkrev = self.changelog.count() | |
826 | commit.sort() |
|
829 | commit.sort() | |
827 | is_exec = util.execfunc(self.root, m1.execf) |
|
830 | is_exec = util.execfunc(self.root, m1.execf) | |
828 | is_link = util.linkfunc(self.root, m1.linkf) |
|
831 | is_link = util.linkfunc(self.root, m1.linkf) | |
829 | for f in commit: |
|
832 | for f in commit: | |
830 | self.ui.note(f + "\n") |
|
833 | self.ui.note(f + "\n") | |
831 | try: |
|
834 | try: | |
832 | new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed) |
|
835 | new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed) | |
833 | new_exec = is_exec(f) |
|
836 | new_exec = is_exec(f) | |
834 | new_link = is_link(f) |
|
837 | new_link = is_link(f) | |
835 | if ((not changed or changed[-1] != f) and |
|
838 | if ((not changed or changed[-1] != f) and | |
836 | m2.get(f) != new[f]): |
|
839 | m2.get(f) != new[f]): | |
837 | # mention the file in the changelog if some |
|
840 | # mention the file in the changelog if some | |
838 | # flag changed, even if there was no content |
|
841 | # flag changed, even if there was no content | |
839 | # change. |
|
842 | # change. | |
840 | old_exec = m1.execf(f) |
|
843 | old_exec = m1.execf(f) | |
841 | old_link = m1.linkf(f) |
|
844 | old_link = m1.linkf(f) | |
842 | if old_exec != new_exec or old_link != new_link: |
|
845 | if old_exec != new_exec or old_link != new_link: | |
843 | changed.append(f) |
|
846 | changed.append(f) | |
844 | m1.set(f, new_exec, new_link) |
|
847 | m1.set(f, new_exec, new_link) | |
845 | if use_dirstate: |
|
848 | if use_dirstate: | |
846 | self.dirstate.normal(f) |
|
849 | self.dirstate.normal(f) | |
847 |
|
850 | |||
848 | except (OSError, IOError): |
|
851 | except (OSError, IOError): | |
849 | if use_dirstate: |
|
852 | if use_dirstate: | |
850 | self.ui.warn(_("trouble committing %s!\n") % f) |
|
853 | self.ui.warn(_("trouble committing %s!\n") % f) | |
851 | raise |
|
854 | raise | |
852 | else: |
|
855 | else: | |
853 | remove.append(f) |
|
856 | remove.append(f) | |
854 |
|
857 | |||
855 | # update manifest |
|
858 | # update manifest | |
856 | m1.update(new) |
|
859 | m1.update(new) | |
857 | remove.sort() |
|
860 | remove.sort() | |
858 | removed = [] |
|
861 | removed = [] | |
859 |
|
862 | |||
860 | for f in remove: |
|
863 | for f in remove: | |
861 | if f in m1: |
|
864 | if f in m1: | |
862 | del m1[f] |
|
865 | del m1[f] | |
863 | removed.append(f) |
|
866 | removed.append(f) | |
864 | elif f in m2: |
|
867 | elif f in m2: | |
865 | removed.append(f) |
|
868 | removed.append(f) | |
866 | mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0], |
|
869 | mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0], | |
867 | (new, removed)) |
|
870 | (new, removed)) | |
868 |
|
871 | |||
869 | # add changeset |
|
872 | # add changeset | |
870 | new = new.keys() |
|
873 | new = new.keys() | |
871 | new.sort() |
|
874 | new.sort() | |
872 |
|
875 | |||
873 | user = user or self.ui.username() |
|
876 | user = user or self.ui.username() | |
874 | if (not empty_ok and not text) or force_editor: |
|
877 | if (not empty_ok and not text) or force_editor: | |
875 | edittext = [] |
|
878 | edittext = [] | |
876 | if text: |
|
879 | if text: | |
877 | edittext.append(text) |
|
880 | edittext.append(text) | |
878 | edittext.append("") |
|
881 | edittext.append("") | |
879 | edittext.append(_("HG: Enter commit message." |
|
882 | edittext.append(_("HG: Enter commit message." | |
880 | " Lines beginning with 'HG:' are removed.")) |
|
883 | " Lines beginning with 'HG:' are removed.")) | |
881 | edittext.append("HG: --") |
|
884 | edittext.append("HG: --") | |
882 | edittext.append("HG: user: %s" % user) |
|
885 | edittext.append("HG: user: %s" % user) | |
883 | if p2 != nullid: |
|
886 | if p2 != nullid: | |
884 | edittext.append("HG: branch merge") |
|
887 | edittext.append("HG: branch merge") | |
885 | if branchname: |
|
888 | if branchname: | |
886 | edittext.append("HG: branch '%s'" % util.tolocal(branchname)) |
|
889 | edittext.append("HG: branch '%s'" % util.tolocal(branchname)) | |
887 | edittext.extend(["HG: changed %s" % f for f in changed]) |
|
890 | edittext.extend(["HG: changed %s" % f for f in changed]) | |
888 | edittext.extend(["HG: removed %s" % f for f in removed]) |
|
891 | edittext.extend(["HG: removed %s" % f for f in removed]) | |
889 | if not changed and not remove: |
|
892 | if not changed and not remove: | |
890 | edittext.append("HG: no files changed") |
|
893 | edittext.append("HG: no files changed") | |
891 | edittext.append("") |
|
894 | edittext.append("") | |
892 | # run editor in the repository root |
|
895 | # run editor in the repository root | |
893 | olddir = os.getcwd() |
|
896 | olddir = os.getcwd() | |
894 | os.chdir(self.root) |
|
897 | os.chdir(self.root) | |
895 | text = self.ui.edit("\n".join(edittext), user) |
|
898 | text = self.ui.edit("\n".join(edittext), user) | |
896 | os.chdir(olddir) |
|
899 | os.chdir(olddir) | |
897 |
|
900 | |||
898 | if branchname: |
|
901 | if branchname: | |
899 | extra["branch"] = branchname |
|
902 | extra["branch"] = branchname | |
900 |
|
903 | |||
901 | lines = [line.rstrip() for line in text.rstrip().splitlines()] |
|
904 | lines = [line.rstrip() for line in text.rstrip().splitlines()] | |
902 | while lines and not lines[0]: |
|
905 | while lines and not lines[0]: | |
903 | del lines[0] |
|
906 | del lines[0] | |
904 | if not lines and use_dirstate: |
|
907 | if not lines and use_dirstate: | |
905 | raise util.Abort(_("empty commit message")) |
|
908 | raise util.Abort(_("empty commit message")) | |
906 | text = '\n'.join(lines) |
|
909 | text = '\n'.join(lines) | |
907 |
|
910 | |||
908 | n = self.changelog.add(mn, changed + removed, text, trp, p1, p2, |
|
911 | n = self.changelog.add(mn, changed + removed, text, trp, p1, p2, | |
909 | user, date, extra) |
|
912 | user, date, extra) | |
910 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, |
|
913 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, | |
911 | parent2=xp2) |
|
914 | parent2=xp2) | |
912 | tr.close() |
|
915 | tr.close() | |
913 |
|
916 | |||
914 | if self.branchcache: |
|
917 | if self.branchcache: | |
915 | self.branchtags() |
|
918 | self.branchtags() | |
916 |
|
919 | |||
917 | if use_dirstate or update_dirstate: |
|
920 | if use_dirstate or update_dirstate: | |
918 | self.dirstate.setparents(n) |
|
921 | self.dirstate.setparents(n) | |
919 | if use_dirstate: |
|
922 | if use_dirstate: | |
920 | for f in removed: |
|
923 | for f in removed: | |
921 | self.dirstate.forget(f) |
|
924 | self.dirstate.forget(f) | |
922 | valid = 1 # our dirstate updates are complete |
|
925 | valid = 1 # our dirstate updates are complete | |
923 |
|
926 | |||
924 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) |
|
927 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) | |
925 | return n |
|
928 | return n | |
926 | finally: |
|
929 | finally: | |
927 | if not valid: # don't save our updated dirstate |
|
930 | if not valid: # don't save our updated dirstate | |
928 | self.dirstate.invalidate() |
|
931 | self.dirstate.invalidate() | |
929 | del tr, lock, wlock |
|
932 | del tr, lock, wlock | |
930 |
|
933 | |||
931 | def walk(self, node=None, files=[], match=util.always, badmatch=None): |
|
934 | def walk(self, node=None, files=[], match=util.always, badmatch=None): | |
932 | ''' |
|
935 | ''' | |
933 | walk recursively through the directory tree or a given |
|
936 | walk recursively through the directory tree or a given | |
934 | changeset, finding all files matched by the match |
|
937 | changeset, finding all files matched by the match | |
935 | function |
|
938 | function | |
936 |
|
939 | |||
937 | results are yielded in a tuple (src, filename), where src |
|
940 | results are yielded in a tuple (src, filename), where src | |
938 | is one of: |
|
941 | is one of: | |
939 | 'f' the file was found in the directory tree |
|
942 | 'f' the file was found in the directory tree | |
940 | 'm' the file was only in the dirstate and not in the tree |
|
943 | 'm' the file was only in the dirstate and not in the tree | |
941 | 'b' file was not found and matched badmatch |
|
944 | 'b' file was not found and matched badmatch | |
942 | ''' |
|
945 | ''' | |
943 |
|
946 | |||
944 | if node: |
|
947 | if node: | |
945 | fdict = dict.fromkeys(files) |
|
948 | fdict = dict.fromkeys(files) | |
946 | # for dirstate.walk, files=['.'] means "walk the whole tree". |
|
949 | # for dirstate.walk, files=['.'] means "walk the whole tree". | |
947 | # follow that here, too |
|
950 | # follow that here, too | |
948 | fdict.pop('.', None) |
|
951 | fdict.pop('.', None) | |
949 | mdict = self.manifest.read(self.changelog.read(node)[0]) |
|
952 | mdict = self.manifest.read(self.changelog.read(node)[0]) | |
950 | mfiles = mdict.keys() |
|
953 | mfiles = mdict.keys() | |
951 | mfiles.sort() |
|
954 | mfiles.sort() | |
952 | for fn in mfiles: |
|
955 | for fn in mfiles: | |
953 | for ffn in fdict: |
|
956 | for ffn in fdict: | |
954 | # match if the file is the exact name or a directory |
|
957 | # match if the file is the exact name or a directory | |
955 | if ffn == fn or fn.startswith("%s/" % ffn): |
|
958 | if ffn == fn or fn.startswith("%s/" % ffn): | |
956 | del fdict[ffn] |
|
959 | del fdict[ffn] | |
957 | break |
|
960 | break | |
958 | if match(fn): |
|
961 | if match(fn): | |
959 | yield 'm', fn |
|
962 | yield 'm', fn | |
960 | ffiles = fdict.keys() |
|
963 | ffiles = fdict.keys() | |
961 | ffiles.sort() |
|
964 | ffiles.sort() | |
962 | for fn in ffiles: |
|
965 | for fn in ffiles: | |
963 | if badmatch and badmatch(fn): |
|
966 | if badmatch and badmatch(fn): | |
964 | if match(fn): |
|
967 | if match(fn): | |
965 | yield 'b', fn |
|
968 | yield 'b', fn | |
966 | else: |
|
969 | else: | |
967 | self.ui.warn(_('%s: No such file in rev %s\n') |
|
970 | self.ui.warn(_('%s: No such file in rev %s\n') | |
968 | % (self.pathto(fn), short(node))) |
|
971 | % (self.pathto(fn), short(node))) | |
969 | else: |
|
972 | else: | |
970 | for src, fn in self.dirstate.walk(files, match, badmatch=badmatch): |
|
973 | for src, fn in self.dirstate.walk(files, match, badmatch=badmatch): | |
971 | yield src, fn |
|
974 | yield src, fn | |
972 |
|
975 | |||
973 | def status(self, node1=None, node2=None, files=[], match=util.always, |
|
976 | def status(self, node1=None, node2=None, files=[], match=util.always, | |
974 | list_ignored=False, list_clean=False, list_unknown=True): |
|
977 | list_ignored=False, list_clean=False, list_unknown=True): | |
975 | """return status of files between two nodes or node and working directory |
|
978 | """return status of files between two nodes or node and working directory | |
976 |
|
979 | |||
977 | If node1 is None, use the first dirstate parent instead. |
|
980 | If node1 is None, use the first dirstate parent instead. | |
978 | If node2 is None, compare node1 with working directory. |
|
981 | If node2 is None, compare node1 with working directory. | |
979 | """ |
|
982 | """ | |
980 |
|
983 | |||
981 | def fcmp(fn, getnode): |
|
984 | def fcmp(fn, getnode): | |
982 | t1 = self.wread(fn) |
|
985 | t1 = self.wread(fn) | |
983 | return self.file(fn).cmp(getnode(fn), t1) |
|
986 | return self.file(fn).cmp(getnode(fn), t1) | |
984 |
|
987 | |||
985 | def mfmatches(node): |
|
988 | def mfmatches(node): | |
986 | change = self.changelog.read(node) |
|
989 | change = self.changelog.read(node) | |
987 | mf = self.manifest.read(change[0]).copy() |
|
990 | mf = self.manifest.read(change[0]).copy() | |
988 | for fn in mf.keys(): |
|
991 | for fn in mf.keys(): | |
989 | if not match(fn): |
|
992 | if not match(fn): | |
990 | del mf[fn] |
|
993 | del mf[fn] | |
991 | return mf |
|
994 | return mf | |
992 |
|
995 | |||
993 | modified, added, removed, deleted, unknown = [], [], [], [], [] |
|
996 | modified, added, removed, deleted, unknown = [], [], [], [], [] | |
994 | ignored, clean = [], [] |
|
997 | ignored, clean = [], [] | |
995 |
|
998 | |||
996 | compareworking = False |
|
999 | compareworking = False | |
997 | if not node1 or (not node2 and node1 == self.dirstate.parents()[0]): |
|
1000 | if not node1 or (not node2 and node1 == self.dirstate.parents()[0]): | |
998 | compareworking = True |
|
1001 | compareworking = True | |
999 |
|
1002 | |||
1000 | if not compareworking: |
|
1003 | if not compareworking: | |
1001 | # read the manifest from node1 before the manifest from node2, |
|
1004 | # read the manifest from node1 before the manifest from node2, | |
1002 | # so that we'll hit the manifest cache if we're going through |
|
1005 | # so that we'll hit the manifest cache if we're going through | |
1003 | # all the revisions in parent->child order. |
|
1006 | # all the revisions in parent->child order. | |
1004 | mf1 = mfmatches(node1) |
|
1007 | mf1 = mfmatches(node1) | |
1005 |
|
1008 | |||
1006 | # are we comparing the working directory? |
|
1009 | # are we comparing the working directory? | |
1007 | if not node2: |
|
1010 | if not node2: | |
1008 | (lookup, modified, added, removed, deleted, unknown, |
|
1011 | (lookup, modified, added, removed, deleted, unknown, | |
1009 | ignored, clean) = self.dirstate.status(files, match, |
|
1012 | ignored, clean) = self.dirstate.status(files, match, | |
1010 | list_ignored, list_clean, |
|
1013 | list_ignored, list_clean, | |
1011 | list_unknown) |
|
1014 | list_unknown) | |
1012 |
|
1015 | |||
1013 | # are we comparing working dir against its parent? |
|
1016 | # are we comparing working dir against its parent? | |
1014 | if compareworking: |
|
1017 | if compareworking: | |
1015 | if lookup: |
|
1018 | if lookup: | |
1016 | fixup = [] |
|
1019 | fixup = [] | |
1017 | # do a full compare of any files that might have changed |
|
1020 | # do a full compare of any files that might have changed | |
1018 | ctx = self.changectx() |
|
1021 | ctx = self.changectx() | |
1019 | mexec = lambda f: 'x' in ctx.fileflags(f) |
|
1022 | mexec = lambda f: 'x' in ctx.fileflags(f) | |
1020 | mlink = lambda f: 'l' in ctx.fileflags(f) |
|
1023 | mlink = lambda f: 'l' in ctx.fileflags(f) | |
1021 | is_exec = util.execfunc(self.root, mexec) |
|
1024 | is_exec = util.execfunc(self.root, mexec) | |
1022 | is_link = util.linkfunc(self.root, mlink) |
|
1025 | is_link = util.linkfunc(self.root, mlink) | |
1023 | def flags(f): |
|
1026 | def flags(f): | |
1024 | return is_link(f) and 'l' or is_exec(f) and 'x' or '' |
|
1027 | return is_link(f) and 'l' or is_exec(f) and 'x' or '' | |
1025 | for f in lookup: |
|
1028 | for f in lookup: | |
1026 | if (f not in ctx or flags(f) != ctx.fileflags(f) |
|
1029 | if (f not in ctx or flags(f) != ctx.fileflags(f) | |
1027 | or ctx[f].cmp(self.wread(f))): |
|
1030 | or ctx[f].cmp(self.wread(f))): | |
1028 | modified.append(f) |
|
1031 | modified.append(f) | |
1029 | else: |
|
1032 | else: | |
1030 | fixup.append(f) |
|
1033 | fixup.append(f) | |
1031 | if list_clean: |
|
1034 | if list_clean: | |
1032 | clean.append(f) |
|
1035 | clean.append(f) | |
1033 |
|
1036 | |||
1034 | # update dirstate for files that are actually clean |
|
1037 | # update dirstate for files that are actually clean | |
1035 | if fixup: |
|
1038 | if fixup: | |
1036 | wlock = None |
|
1039 | wlock = None | |
1037 | try: |
|
1040 | try: | |
1038 | try: |
|
1041 | try: | |
1039 | wlock = self.wlock(False) |
|
1042 | wlock = self.wlock(False) | |
1040 | except lock.LockException: |
|
1043 | except lock.LockException: | |
1041 | pass |
|
1044 | pass | |
1042 | if wlock: |
|
1045 | if wlock: | |
1043 | for f in fixup: |
|
1046 | for f in fixup: | |
1044 | self.dirstate.normal(f) |
|
1047 | self.dirstate.normal(f) | |
1045 | finally: |
|
1048 | finally: | |
1046 | del wlock |
|
1049 | del wlock | |
1047 | else: |
|
1050 | else: | |
1048 | # we are comparing working dir against non-parent |
|
1051 | # we are comparing working dir against non-parent | |
1049 | # generate a pseudo-manifest for the working dir |
|
1052 | # generate a pseudo-manifest for the working dir | |
1050 | # XXX: create it in dirstate.py ? |
|
1053 | # XXX: create it in dirstate.py ? | |
1051 | mf2 = mfmatches(self.dirstate.parents()[0]) |
|
1054 | mf2 = mfmatches(self.dirstate.parents()[0]) | |
1052 | is_exec = util.execfunc(self.root, mf2.execf) |
|
1055 | is_exec = util.execfunc(self.root, mf2.execf) | |
1053 | is_link = util.linkfunc(self.root, mf2.linkf) |
|
1056 | is_link = util.linkfunc(self.root, mf2.linkf) | |
1054 | for f in lookup + modified + added: |
|
1057 | for f in lookup + modified + added: | |
1055 | mf2[f] = "" |
|
1058 | mf2[f] = "" | |
1056 | mf2.set(f, is_exec(f), is_link(f)) |
|
1059 | mf2.set(f, is_exec(f), is_link(f)) | |
1057 | for f in removed: |
|
1060 | for f in removed: | |
1058 | if f in mf2: |
|
1061 | if f in mf2: | |
1059 | del mf2[f] |
|
1062 | del mf2[f] | |
1060 |
|
1063 | |||
1061 | else: |
|
1064 | else: | |
1062 | # we are comparing two revisions |
|
1065 | # we are comparing two revisions | |
1063 | mf2 = mfmatches(node2) |
|
1066 | mf2 = mfmatches(node2) | |
1064 |
|
1067 | |||
1065 | if not compareworking: |
|
1068 | if not compareworking: | |
1066 | # flush lists from dirstate before comparing manifests |
|
1069 | # flush lists from dirstate before comparing manifests | |
1067 | modified, added, clean = [], [], [] |
|
1070 | modified, added, clean = [], [], [] | |
1068 |
|
1071 | |||
1069 | # make sure to sort the files so we talk to the disk in a |
|
1072 | # make sure to sort the files so we talk to the disk in a | |
1070 | # reasonable order |
|
1073 | # reasonable order | |
1071 | mf2keys = mf2.keys() |
|
1074 | mf2keys = mf2.keys() | |
1072 | mf2keys.sort() |
|
1075 | mf2keys.sort() | |
1073 | getnode = lambda fn: mf1.get(fn, nullid) |
|
1076 | getnode = lambda fn: mf1.get(fn, nullid) | |
1074 | for fn in mf2keys: |
|
1077 | for fn in mf2keys: | |
1075 | if fn in mf1: |
|
1078 | if fn in mf1: | |
1076 | if (mf1.flags(fn) != mf2.flags(fn) or |
|
1079 | if (mf1.flags(fn) != mf2.flags(fn) or | |
1077 | (mf1[fn] != mf2[fn] and |
|
1080 | (mf1[fn] != mf2[fn] and | |
1078 | (mf2[fn] != "" or fcmp(fn, getnode)))): |
|
1081 | (mf2[fn] != "" or fcmp(fn, getnode)))): | |
1079 | modified.append(fn) |
|
1082 | modified.append(fn) | |
1080 | elif list_clean: |
|
1083 | elif list_clean: | |
1081 | clean.append(fn) |
|
1084 | clean.append(fn) | |
1082 | del mf1[fn] |
|
1085 | del mf1[fn] | |
1083 | else: |
|
1086 | else: | |
1084 | added.append(fn) |
|
1087 | added.append(fn) | |
1085 |
|
1088 | |||
1086 | removed = mf1.keys() |
|
1089 | removed = mf1.keys() | |
1087 |
|
1090 | |||
1088 | # sort and return results: |
|
1091 | # sort and return results: | |
1089 | for l in modified, added, removed, deleted, unknown, ignored, clean: |
|
1092 | for l in modified, added, removed, deleted, unknown, ignored, clean: | |
1090 | l.sort() |
|
1093 | l.sort() | |
1091 | return (modified, added, removed, deleted, unknown, ignored, clean) |
|
1094 | return (modified, added, removed, deleted, unknown, ignored, clean) | |
1092 |
|
1095 | |||
1093 | def add(self, list): |
|
1096 | def add(self, list): | |
1094 | wlock = self.wlock() |
|
1097 | wlock = self.wlock() | |
1095 | try: |
|
1098 | try: | |
1096 | rejected = [] |
|
1099 | rejected = [] | |
1097 | for f in list: |
|
1100 | for f in list: | |
1098 | p = self.wjoin(f) |
|
1101 | p = self.wjoin(f) | |
1099 | try: |
|
1102 | try: | |
1100 | st = os.lstat(p) |
|
1103 | st = os.lstat(p) | |
1101 | except: |
|
1104 | except: | |
1102 | self.ui.warn(_("%s does not exist!\n") % f) |
|
1105 | self.ui.warn(_("%s does not exist!\n") % f) | |
1103 | rejected.append(f) |
|
1106 | rejected.append(f) | |
1104 | continue |
|
1107 | continue | |
1105 | if st.st_size > 10000000: |
|
1108 | if st.st_size > 10000000: | |
1106 | self.ui.warn(_("%s: files over 10MB may cause memory and" |
|
1109 | self.ui.warn(_("%s: files over 10MB may cause memory and" | |
1107 | " performance problems\n" |
|
1110 | " performance problems\n" | |
1108 | "(use 'hg revert %s' to unadd the file)\n") |
|
1111 | "(use 'hg revert %s' to unadd the file)\n") | |
1109 | % (f, f)) |
|
1112 | % (f, f)) | |
1110 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1113 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | |
1111 | self.ui.warn(_("%s not added: only files and symlinks " |
|
1114 | self.ui.warn(_("%s not added: only files and symlinks " | |
1112 | "supported currently\n") % f) |
|
1115 | "supported currently\n") % f) | |
1113 | rejected.append(p) |
|
1116 | rejected.append(p) | |
1114 | elif self.dirstate[f] in 'amn': |
|
1117 | elif self.dirstate[f] in 'amn': | |
1115 | self.ui.warn(_("%s already tracked!\n") % f) |
|
1118 | self.ui.warn(_("%s already tracked!\n") % f) | |
1116 | elif self.dirstate[f] == 'r': |
|
1119 | elif self.dirstate[f] == 'r': | |
1117 | self.dirstate.normallookup(f) |
|
1120 | self.dirstate.normallookup(f) | |
1118 | else: |
|
1121 | else: | |
1119 | self.dirstate.add(f) |
|
1122 | self.dirstate.add(f) | |
1120 | return rejected |
|
1123 | return rejected | |
1121 | finally: |
|
1124 | finally: | |
1122 | del wlock |
|
1125 | del wlock | |
1123 |
|
1126 | |||
1124 | def forget(self, list): |
|
1127 | def forget(self, list): | |
1125 | wlock = self.wlock() |
|
1128 | wlock = self.wlock() | |
1126 | try: |
|
1129 | try: | |
1127 | for f in list: |
|
1130 | for f in list: | |
1128 | if self.dirstate[f] != 'a': |
|
1131 | if self.dirstate[f] != 'a': | |
1129 | self.ui.warn(_("%s not added!\n") % f) |
|
1132 | self.ui.warn(_("%s not added!\n") % f) | |
1130 | else: |
|
1133 | else: | |
1131 | self.dirstate.forget(f) |
|
1134 | self.dirstate.forget(f) | |
1132 | finally: |
|
1135 | finally: | |
1133 | del wlock |
|
1136 | del wlock | |
1134 |
|
1137 | |||
1135 | def remove(self, list, unlink=False): |
|
1138 | def remove(self, list, unlink=False): | |
1136 | wlock = None |
|
1139 | wlock = None | |
1137 | try: |
|
1140 | try: | |
1138 | if unlink: |
|
1141 | if unlink: | |
1139 | for f in list: |
|
1142 | for f in list: | |
1140 | try: |
|
1143 | try: | |
1141 | util.unlink(self.wjoin(f)) |
|
1144 | util.unlink(self.wjoin(f)) | |
1142 | except OSError, inst: |
|
1145 | except OSError, inst: | |
1143 | if inst.errno != errno.ENOENT: |
|
1146 | if inst.errno != errno.ENOENT: | |
1144 | raise |
|
1147 | raise | |
1145 | wlock = self.wlock() |
|
1148 | wlock = self.wlock() | |
1146 | for f in list: |
|
1149 | for f in list: | |
1147 | if unlink and os.path.exists(self.wjoin(f)): |
|
1150 | if unlink and os.path.exists(self.wjoin(f)): | |
1148 | self.ui.warn(_("%s still exists!\n") % f) |
|
1151 | self.ui.warn(_("%s still exists!\n") % f) | |
1149 | elif self.dirstate[f] == 'a': |
|
1152 | elif self.dirstate[f] == 'a': | |
1150 | self.dirstate.forget(f) |
|
1153 | self.dirstate.forget(f) | |
1151 | elif f not in self.dirstate: |
|
1154 | elif f not in self.dirstate: | |
1152 | self.ui.warn(_("%s not tracked!\n") % f) |
|
1155 | self.ui.warn(_("%s not tracked!\n") % f) | |
1153 | else: |
|
1156 | else: | |
1154 | self.dirstate.remove(f) |
|
1157 | self.dirstate.remove(f) | |
1155 | finally: |
|
1158 | finally: | |
1156 | del wlock |
|
1159 | del wlock | |
1157 |
|
1160 | |||
1158 | def undelete(self, list): |
|
1161 | def undelete(self, list): | |
1159 | wlock = None |
|
1162 | wlock = None | |
1160 | try: |
|
1163 | try: | |
1161 | manifests = [self.manifest.read(self.changelog.read(p)[0]) |
|
1164 | manifests = [self.manifest.read(self.changelog.read(p)[0]) | |
1162 | for p in self.dirstate.parents() if p != nullid] |
|
1165 | for p in self.dirstate.parents() if p != nullid] | |
1163 | wlock = self.wlock() |
|
1166 | wlock = self.wlock() | |
1164 | for f in list: |
|
1167 | for f in list: | |
1165 | if self.dirstate[f] != 'r': |
|
1168 | if self.dirstate[f] != 'r': | |
1166 | self.ui.warn("%s not removed!\n" % f) |
|
1169 | self.ui.warn("%s not removed!\n" % f) | |
1167 | else: |
|
1170 | else: | |
1168 | m = f in manifests[0] and manifests[0] or manifests[1] |
|
1171 | m = f in manifests[0] and manifests[0] or manifests[1] | |
1169 | t = self.file(f).read(m[f]) |
|
1172 | t = self.file(f).read(m[f]) | |
1170 | self.wwrite(f, t, m.flags(f)) |
|
1173 | self.wwrite(f, t, m.flags(f)) | |
1171 | self.dirstate.normal(f) |
|
1174 | self.dirstate.normal(f) | |
1172 | finally: |
|
1175 | finally: | |
1173 | del wlock |
|
1176 | del wlock | |
1174 |
|
1177 | |||
1175 | def copy(self, source, dest): |
|
1178 | def copy(self, source, dest): | |
1176 | wlock = None |
|
1179 | wlock = None | |
1177 | try: |
|
1180 | try: | |
1178 | p = self.wjoin(dest) |
|
1181 | p = self.wjoin(dest) | |
1179 | if not (os.path.exists(p) or os.path.islink(p)): |
|
1182 | if not (os.path.exists(p) or os.path.islink(p)): | |
1180 | self.ui.warn(_("%s does not exist!\n") % dest) |
|
1183 | self.ui.warn(_("%s does not exist!\n") % dest) | |
1181 | elif not (os.path.isfile(p) or os.path.islink(p)): |
|
1184 | elif not (os.path.isfile(p) or os.path.islink(p)): | |
1182 | self.ui.warn(_("copy failed: %s is not a file or a " |
|
1185 | self.ui.warn(_("copy failed: %s is not a file or a " | |
1183 | "symbolic link\n") % dest) |
|
1186 | "symbolic link\n") % dest) | |
1184 | else: |
|
1187 | else: | |
1185 | wlock = self.wlock() |
|
1188 | wlock = self.wlock() | |
1186 | if dest not in self.dirstate: |
|
1189 | if dest not in self.dirstate: | |
1187 | self.dirstate.add(dest) |
|
1190 | self.dirstate.add(dest) | |
1188 | self.dirstate.copy(source, dest) |
|
1191 | self.dirstate.copy(source, dest) | |
1189 | finally: |
|
1192 | finally: | |
1190 | del wlock |
|
1193 | del wlock | |
1191 |
|
1194 | |||
1192 | def heads(self, start=None): |
|
1195 | def heads(self, start=None): | |
1193 | heads = self.changelog.heads(start) |
|
1196 | heads = self.changelog.heads(start) | |
1194 | # sort the output in rev descending order |
|
1197 | # sort the output in rev descending order | |
1195 | heads = [(-self.changelog.rev(h), h) for h in heads] |
|
1198 | heads = [(-self.changelog.rev(h), h) for h in heads] | |
1196 | heads.sort() |
|
1199 | heads.sort() | |
1197 | return [n for (r, n) in heads] |
|
1200 | return [n for (r, n) in heads] | |
1198 |
|
1201 | |||
1199 | def branchheads(self, branch, start=None): |
|
1202 | def branchheads(self, branch, start=None): | |
1200 | branches = self.branchtags() |
|
1203 | branches = self.branchtags() | |
1201 | if branch not in branches: |
|
1204 | if branch not in branches: | |
1202 | return [] |
|
1205 | return [] | |
1203 | # The basic algorithm is this: |
|
1206 | # The basic algorithm is this: | |
1204 | # |
|
1207 | # | |
1205 | # Start from the branch tip since there are no later revisions that can |
|
1208 | # Start from the branch tip since there are no later revisions that can | |
1206 | # possibly be in this branch, and the tip is a guaranteed head. |
|
1209 | # possibly be in this branch, and the tip is a guaranteed head. | |
1207 | # |
|
1210 | # | |
1208 | # Remember the tip's parents as the first ancestors, since these by |
|
1211 | # Remember the tip's parents as the first ancestors, since these by | |
1209 | # definition are not heads. |
|
1212 | # definition are not heads. | |
1210 | # |
|
1213 | # | |
1211 | # Step backwards from the brach tip through all the revisions. We are |
|
1214 | # Step backwards from the brach tip through all the revisions. We are | |
1212 | # guaranteed by the rules of Mercurial that we will now be visiting the |
|
1215 | # guaranteed by the rules of Mercurial that we will now be visiting the | |
1213 | # nodes in reverse topological order (children before parents). |
|
1216 | # nodes in reverse topological order (children before parents). | |
1214 | # |
|
1217 | # | |
1215 | # If a revision is one of the ancestors of a head then we can toss it |
|
1218 | # If a revision is one of the ancestors of a head then we can toss it | |
1216 | # out of the ancestors set (we've already found it and won't be |
|
1219 | # out of the ancestors set (we've already found it and won't be | |
1217 | # visiting it again) and put its parents in the ancestors set. |
|
1220 | # visiting it again) and put its parents in the ancestors set. | |
1218 | # |
|
1221 | # | |
1219 | # Otherwise, if a revision is in the branch it's another head, since it |
|
1222 | # Otherwise, if a revision is in the branch it's another head, since it | |
1220 | # wasn't in the ancestor list of an existing head. So add it to the |
|
1223 | # wasn't in the ancestor list of an existing head. So add it to the | |
1221 | # head list, and add its parents to the ancestor list. |
|
1224 | # head list, and add its parents to the ancestor list. | |
1222 | # |
|
1225 | # | |
1223 | # If it is not in the branch ignore it. |
|
1226 | # If it is not in the branch ignore it. | |
1224 | # |
|
1227 | # | |
1225 | # Once we have a list of heads, use nodesbetween to filter out all the |
|
1228 | # Once we have a list of heads, use nodesbetween to filter out all the | |
1226 | # heads that cannot be reached from startrev. There may be a more |
|
1229 | # heads that cannot be reached from startrev. There may be a more | |
1227 | # efficient way to do this as part of the previous algorithm. |
|
1230 | # efficient way to do this as part of the previous algorithm. | |
1228 |
|
1231 | |||
1229 | set = util.set |
|
1232 | set = util.set | |
1230 | heads = [self.changelog.rev(branches[branch])] |
|
1233 | heads = [self.changelog.rev(branches[branch])] | |
1231 | # Don't care if ancestors contains nullrev or not. |
|
1234 | # Don't care if ancestors contains nullrev or not. | |
1232 | ancestors = set(self.changelog.parentrevs(heads[0])) |
|
1235 | ancestors = set(self.changelog.parentrevs(heads[0])) | |
1233 | for rev in xrange(heads[0] - 1, nullrev, -1): |
|
1236 | for rev in xrange(heads[0] - 1, nullrev, -1): | |
1234 | if rev in ancestors: |
|
1237 | if rev in ancestors: | |
1235 | ancestors.update(self.changelog.parentrevs(rev)) |
|
1238 | ancestors.update(self.changelog.parentrevs(rev)) | |
1236 | ancestors.remove(rev) |
|
1239 | ancestors.remove(rev) | |
1237 | elif self.changectx(rev).branch() == branch: |
|
1240 | elif self.changectx(rev).branch() == branch: | |
1238 | heads.append(rev) |
|
1241 | heads.append(rev) | |
1239 | ancestors.update(self.changelog.parentrevs(rev)) |
|
1242 | ancestors.update(self.changelog.parentrevs(rev)) | |
1240 | heads = [self.changelog.node(rev) for rev in heads] |
|
1243 | heads = [self.changelog.node(rev) for rev in heads] | |
1241 | if start is not None: |
|
1244 | if start is not None: | |
1242 | heads = self.changelog.nodesbetween([start], heads)[2] |
|
1245 | heads = self.changelog.nodesbetween([start], heads)[2] | |
1243 | return heads |
|
1246 | return heads | |
1244 |
|
1247 | |||
1245 | def branches(self, nodes): |
|
1248 | def branches(self, nodes): | |
1246 | if not nodes: |
|
1249 | if not nodes: | |
1247 | nodes = [self.changelog.tip()] |
|
1250 | nodes = [self.changelog.tip()] | |
1248 | b = [] |
|
1251 | b = [] | |
1249 | for n in nodes: |
|
1252 | for n in nodes: | |
1250 | t = n |
|
1253 | t = n | |
1251 | while 1: |
|
1254 | while 1: | |
1252 | p = self.changelog.parents(n) |
|
1255 | p = self.changelog.parents(n) | |
1253 | if p[1] != nullid or p[0] == nullid: |
|
1256 | if p[1] != nullid or p[0] == nullid: | |
1254 | b.append((t, n, p[0], p[1])) |
|
1257 | b.append((t, n, p[0], p[1])) | |
1255 | break |
|
1258 | break | |
1256 | n = p[0] |
|
1259 | n = p[0] | |
1257 | return b |
|
1260 | return b | |
1258 |
|
1261 | |||
1259 | def between(self, pairs): |
|
1262 | def between(self, pairs): | |
1260 | r = [] |
|
1263 | r = [] | |
1261 |
|
1264 | |||
1262 | for top, bottom in pairs: |
|
1265 | for top, bottom in pairs: | |
1263 | n, l, i = top, [], 0 |
|
1266 | n, l, i = top, [], 0 | |
1264 | f = 1 |
|
1267 | f = 1 | |
1265 |
|
1268 | |||
1266 | while n != bottom: |
|
1269 | while n != bottom: | |
1267 | p = self.changelog.parents(n)[0] |
|
1270 | p = self.changelog.parents(n)[0] | |
1268 | if i == f: |
|
1271 | if i == f: | |
1269 | l.append(n) |
|
1272 | l.append(n) | |
1270 | f = f * 2 |
|
1273 | f = f * 2 | |
1271 | n = p |
|
1274 | n = p | |
1272 | i += 1 |
|
1275 | i += 1 | |
1273 |
|
1276 | |||
1274 | r.append(l) |
|
1277 | r.append(l) | |
1275 |
|
1278 | |||
1276 | return r |
|
1279 | return r | |
1277 |
|
1280 | |||
1278 | def findincoming(self, remote, base=None, heads=None, force=False): |
|
1281 | def findincoming(self, remote, base=None, heads=None, force=False): | |
1279 | """Return list of roots of the subsets of missing nodes from remote |
|
1282 | """Return list of roots of the subsets of missing nodes from remote | |
1280 |
|
1283 | |||
1281 | If base dict is specified, assume that these nodes and their parents |
|
1284 | If base dict is specified, assume that these nodes and their parents | |
1282 | exist on the remote side and that no child of a node of base exists |
|
1285 | exist on the remote side and that no child of a node of base exists | |
1283 | in both remote and self. |
|
1286 | in both remote and self. | |
1284 | Furthermore base will be updated to include the nodes that exists |
|
1287 | Furthermore base will be updated to include the nodes that exists | |
1285 | in self and remote but no children exists in self and remote. |
|
1288 | in self and remote but no children exists in self and remote. | |
1286 | If a list of heads is specified, return only nodes which are heads |
|
1289 | If a list of heads is specified, return only nodes which are heads | |
1287 | or ancestors of these heads. |
|
1290 | or ancestors of these heads. | |
1288 |
|
1291 | |||
1289 | All the ancestors of base are in self and in remote. |
|
1292 | All the ancestors of base are in self and in remote. | |
1290 | All the descendants of the list returned are missing in self. |
|
1293 | All the descendants of the list returned are missing in self. | |
1291 | (and so we know that the rest of the nodes are missing in remote, see |
|
1294 | (and so we know that the rest of the nodes are missing in remote, see | |
1292 | outgoing) |
|
1295 | outgoing) | |
1293 | """ |
|
1296 | """ | |
1294 | m = self.changelog.nodemap |
|
1297 | m = self.changelog.nodemap | |
1295 | search = [] |
|
1298 | search = [] | |
1296 | fetch = {} |
|
1299 | fetch = {} | |
1297 | seen = {} |
|
1300 | seen = {} | |
1298 | seenbranch = {} |
|
1301 | seenbranch = {} | |
1299 | if base == None: |
|
1302 | if base == None: | |
1300 | base = {} |
|
1303 | base = {} | |
1301 |
|
1304 | |||
1302 | if not heads: |
|
1305 | if not heads: | |
1303 | heads = remote.heads() |
|
1306 | heads = remote.heads() | |
1304 |
|
1307 | |||
1305 | if self.changelog.tip() == nullid: |
|
1308 | if self.changelog.tip() == nullid: | |
1306 | base[nullid] = 1 |
|
1309 | base[nullid] = 1 | |
1307 | if heads != [nullid]: |
|
1310 | if heads != [nullid]: | |
1308 | return [nullid] |
|
1311 | return [nullid] | |
1309 | return [] |
|
1312 | return [] | |
1310 |
|
1313 | |||
1311 | # assume we're closer to the tip than the root |
|
1314 | # assume we're closer to the tip than the root | |
1312 | # and start by examining the heads |
|
1315 | # and start by examining the heads | |
1313 | self.ui.status(_("searching for changes\n")) |
|
1316 | self.ui.status(_("searching for changes\n")) | |
1314 |
|
1317 | |||
1315 | unknown = [] |
|
1318 | unknown = [] | |
1316 | for h in heads: |
|
1319 | for h in heads: | |
1317 | if h not in m: |
|
1320 | if h not in m: | |
1318 | unknown.append(h) |
|
1321 | unknown.append(h) | |
1319 | else: |
|
1322 | else: | |
1320 | base[h] = 1 |
|
1323 | base[h] = 1 | |
1321 |
|
1324 | |||
1322 | if not unknown: |
|
1325 | if not unknown: | |
1323 | return [] |
|
1326 | return [] | |
1324 |
|
1327 | |||
1325 | req = dict.fromkeys(unknown) |
|
1328 | req = dict.fromkeys(unknown) | |
1326 | reqcnt = 0 |
|
1329 | reqcnt = 0 | |
1327 |
|
1330 | |||
1328 | # search through remote branches |
|
1331 | # search through remote branches | |
1329 | # a 'branch' here is a linear segment of history, with four parts: |
|
1332 | # a 'branch' here is a linear segment of history, with four parts: | |
1330 | # head, root, first parent, second parent |
|
1333 | # head, root, first parent, second parent | |
1331 | # (a branch always has two parents (or none) by definition) |
|
1334 | # (a branch always has two parents (or none) by definition) | |
1332 | unknown = remote.branches(unknown) |
|
1335 | unknown = remote.branches(unknown) | |
1333 | while unknown: |
|
1336 | while unknown: | |
1334 | r = [] |
|
1337 | r = [] | |
1335 | while unknown: |
|
1338 | while unknown: | |
1336 | n = unknown.pop(0) |
|
1339 | n = unknown.pop(0) | |
1337 | if n[0] in seen: |
|
1340 | if n[0] in seen: | |
1338 | continue |
|
1341 | continue | |
1339 |
|
1342 | |||
1340 | self.ui.debug(_("examining %s:%s\n") |
|
1343 | self.ui.debug(_("examining %s:%s\n") | |
1341 | % (short(n[0]), short(n[1]))) |
|
1344 | % (short(n[0]), short(n[1]))) | |
1342 | if n[0] == nullid: # found the end of the branch |
|
1345 | if n[0] == nullid: # found the end of the branch | |
1343 | pass |
|
1346 | pass | |
1344 | elif n in seenbranch: |
|
1347 | elif n in seenbranch: | |
1345 | self.ui.debug(_("branch already found\n")) |
|
1348 | self.ui.debug(_("branch already found\n")) | |
1346 | continue |
|
1349 | continue | |
1347 | elif n[1] and n[1] in m: # do we know the base? |
|
1350 | elif n[1] and n[1] in m: # do we know the base? | |
1348 | self.ui.debug(_("found incomplete branch %s:%s\n") |
|
1351 | self.ui.debug(_("found incomplete branch %s:%s\n") | |
1349 | % (short(n[0]), short(n[1]))) |
|
1352 | % (short(n[0]), short(n[1]))) | |
1350 | search.append(n) # schedule branch range for scanning |
|
1353 | search.append(n) # schedule branch range for scanning | |
1351 | seenbranch[n] = 1 |
|
1354 | seenbranch[n] = 1 | |
1352 | else: |
|
1355 | else: | |
1353 | if n[1] not in seen and n[1] not in fetch: |
|
1356 | if n[1] not in seen and n[1] not in fetch: | |
1354 | if n[2] in m and n[3] in m: |
|
1357 | if n[2] in m and n[3] in m: | |
1355 | self.ui.debug(_("found new changeset %s\n") % |
|
1358 | self.ui.debug(_("found new changeset %s\n") % | |
1356 | short(n[1])) |
|
1359 | short(n[1])) | |
1357 | fetch[n[1]] = 1 # earliest unknown |
|
1360 | fetch[n[1]] = 1 # earliest unknown | |
1358 | for p in n[2:4]: |
|
1361 | for p in n[2:4]: | |
1359 | if p in m: |
|
1362 | if p in m: | |
1360 | base[p] = 1 # latest known |
|
1363 | base[p] = 1 # latest known | |
1361 |
|
1364 | |||
1362 | for p in n[2:4]: |
|
1365 | for p in n[2:4]: | |
1363 | if p not in req and p not in m: |
|
1366 | if p not in req and p not in m: | |
1364 | r.append(p) |
|
1367 | r.append(p) | |
1365 | req[p] = 1 |
|
1368 | req[p] = 1 | |
1366 | seen[n[0]] = 1 |
|
1369 | seen[n[0]] = 1 | |
1367 |
|
1370 | |||
1368 | if r: |
|
1371 | if r: | |
1369 | reqcnt += 1 |
|
1372 | reqcnt += 1 | |
1370 | self.ui.debug(_("request %d: %s\n") % |
|
1373 | self.ui.debug(_("request %d: %s\n") % | |
1371 | (reqcnt, " ".join(map(short, r)))) |
|
1374 | (reqcnt, " ".join(map(short, r)))) | |
1372 | for p in xrange(0, len(r), 10): |
|
1375 | for p in xrange(0, len(r), 10): | |
1373 | for b in remote.branches(r[p:p+10]): |
|
1376 | for b in remote.branches(r[p:p+10]): | |
1374 | self.ui.debug(_("received %s:%s\n") % |
|
1377 | self.ui.debug(_("received %s:%s\n") % | |
1375 | (short(b[0]), short(b[1]))) |
|
1378 | (short(b[0]), short(b[1]))) | |
1376 | unknown.append(b) |
|
1379 | unknown.append(b) | |
1377 |
|
1380 | |||
1378 | # do binary search on the branches we found |
|
1381 | # do binary search on the branches we found | |
1379 | while search: |
|
1382 | while search: | |
1380 | n = search.pop(0) |
|
1383 | n = search.pop(0) | |
1381 | reqcnt += 1 |
|
1384 | reqcnt += 1 | |
1382 | l = remote.between([(n[0], n[1])])[0] |
|
1385 | l = remote.between([(n[0], n[1])])[0] | |
1383 | l.append(n[1]) |
|
1386 | l.append(n[1]) | |
1384 | p = n[0] |
|
1387 | p = n[0] | |
1385 | f = 1 |
|
1388 | f = 1 | |
1386 | for i in l: |
|
1389 | for i in l: | |
1387 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) |
|
1390 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) | |
1388 | if i in m: |
|
1391 | if i in m: | |
1389 | if f <= 2: |
|
1392 | if f <= 2: | |
1390 | self.ui.debug(_("found new branch changeset %s\n") % |
|
1393 | self.ui.debug(_("found new branch changeset %s\n") % | |
1391 | short(p)) |
|
1394 | short(p)) | |
1392 | fetch[p] = 1 |
|
1395 | fetch[p] = 1 | |
1393 | base[i] = 1 |
|
1396 | base[i] = 1 | |
1394 | else: |
|
1397 | else: | |
1395 | self.ui.debug(_("narrowed branch search to %s:%s\n") |
|
1398 | self.ui.debug(_("narrowed branch search to %s:%s\n") | |
1396 | % (short(p), short(i))) |
|
1399 | % (short(p), short(i))) | |
1397 | search.append((p, i)) |
|
1400 | search.append((p, i)) | |
1398 | break |
|
1401 | break | |
1399 | p, f = i, f * 2 |
|
1402 | p, f = i, f * 2 | |
1400 |
|
1403 | |||
1401 | # sanity check our fetch list |
|
1404 | # sanity check our fetch list | |
1402 | for f in fetch.keys(): |
|
1405 | for f in fetch.keys(): | |
1403 | if f in m: |
|
1406 | if f in m: | |
1404 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) |
|
1407 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) | |
1405 |
|
1408 | |||
1406 | if base.keys() == [nullid]: |
|
1409 | if base.keys() == [nullid]: | |
1407 | if force: |
|
1410 | if force: | |
1408 | self.ui.warn(_("warning: repository is unrelated\n")) |
|
1411 | self.ui.warn(_("warning: repository is unrelated\n")) | |
1409 | else: |
|
1412 | else: | |
1410 | raise util.Abort(_("repository is unrelated")) |
|
1413 | raise util.Abort(_("repository is unrelated")) | |
1411 |
|
1414 | |||
1412 | self.ui.debug(_("found new changesets starting at ") + |
|
1415 | self.ui.debug(_("found new changesets starting at ") + | |
1413 | " ".join([short(f) for f in fetch]) + "\n") |
|
1416 | " ".join([short(f) for f in fetch]) + "\n") | |
1414 |
|
1417 | |||
1415 | self.ui.debug(_("%d total queries\n") % reqcnt) |
|
1418 | self.ui.debug(_("%d total queries\n") % reqcnt) | |
1416 |
|
1419 | |||
1417 | return fetch.keys() |
|
1420 | return fetch.keys() | |
1418 |
|
1421 | |||
1419 | def findoutgoing(self, remote, base=None, heads=None, force=False): |
|
1422 | def findoutgoing(self, remote, base=None, heads=None, force=False): | |
1420 | """Return list of nodes that are roots of subsets not in remote |
|
1423 | """Return list of nodes that are roots of subsets not in remote | |
1421 |
|
1424 | |||
1422 | If base dict is specified, assume that these nodes and their parents |
|
1425 | If base dict is specified, assume that these nodes and their parents | |
1423 | exist on the remote side. |
|
1426 | exist on the remote side. | |
1424 | If a list of heads is specified, return only nodes which are heads |
|
1427 | If a list of heads is specified, return only nodes which are heads | |
1425 | or ancestors of these heads, and return a second element which |
|
1428 | or ancestors of these heads, and return a second element which | |
1426 | contains all remote heads which get new children. |
|
1429 | contains all remote heads which get new children. | |
1427 | """ |
|
1430 | """ | |
1428 | if base == None: |
|
1431 | if base == None: | |
1429 | base = {} |
|
1432 | base = {} | |
1430 | self.findincoming(remote, base, heads, force=force) |
|
1433 | self.findincoming(remote, base, heads, force=force) | |
1431 |
|
1434 | |||
1432 | self.ui.debug(_("common changesets up to ") |
|
1435 | self.ui.debug(_("common changesets up to ") | |
1433 | + " ".join(map(short, base.keys())) + "\n") |
|
1436 | + " ".join(map(short, base.keys())) + "\n") | |
1434 |
|
1437 | |||
1435 | remain = dict.fromkeys(self.changelog.nodemap) |
|
1438 | remain = dict.fromkeys(self.changelog.nodemap) | |
1436 |
|
1439 | |||
1437 | # prune everything remote has from the tree |
|
1440 | # prune everything remote has from the tree | |
1438 | del remain[nullid] |
|
1441 | del remain[nullid] | |
1439 | remove = base.keys() |
|
1442 | remove = base.keys() | |
1440 | while remove: |
|
1443 | while remove: | |
1441 | n = remove.pop(0) |
|
1444 | n = remove.pop(0) | |
1442 | if n in remain: |
|
1445 | if n in remain: | |
1443 | del remain[n] |
|
1446 | del remain[n] | |
1444 | for p in self.changelog.parents(n): |
|
1447 | for p in self.changelog.parents(n): | |
1445 | remove.append(p) |
|
1448 | remove.append(p) | |
1446 |
|
1449 | |||
1447 | # find every node whose parents have been pruned |
|
1450 | # find every node whose parents have been pruned | |
1448 | subset = [] |
|
1451 | subset = [] | |
1449 | # find every remote head that will get new children |
|
1452 | # find every remote head that will get new children | |
1450 | updated_heads = {} |
|
1453 | updated_heads = {} | |
1451 | for n in remain: |
|
1454 | for n in remain: | |
1452 | p1, p2 = self.changelog.parents(n) |
|
1455 | p1, p2 = self.changelog.parents(n) | |
1453 | if p1 not in remain and p2 not in remain: |
|
1456 | if p1 not in remain and p2 not in remain: | |
1454 | subset.append(n) |
|
1457 | subset.append(n) | |
1455 | if heads: |
|
1458 | if heads: | |
1456 | if p1 in heads: |
|
1459 | if p1 in heads: | |
1457 | updated_heads[p1] = True |
|
1460 | updated_heads[p1] = True | |
1458 | if p2 in heads: |
|
1461 | if p2 in heads: | |
1459 | updated_heads[p2] = True |
|
1462 | updated_heads[p2] = True | |
1460 |
|
1463 | |||
1461 | # this is the set of all roots we have to push |
|
1464 | # this is the set of all roots we have to push | |
1462 | if heads: |
|
1465 | if heads: | |
1463 | return subset, updated_heads.keys() |
|
1466 | return subset, updated_heads.keys() | |
1464 | else: |
|
1467 | else: | |
1465 | return subset |
|
1468 | return subset | |
1466 |
|
1469 | |||
1467 | def pull(self, remote, heads=None, force=False): |
|
1470 | def pull(self, remote, heads=None, force=False): | |
1468 | lock = self.lock() |
|
1471 | lock = self.lock() | |
1469 | try: |
|
1472 | try: | |
1470 | fetch = self.findincoming(remote, heads=heads, force=force) |
|
1473 | fetch = self.findincoming(remote, heads=heads, force=force) | |
1471 | if fetch == [nullid]: |
|
1474 | if fetch == [nullid]: | |
1472 | self.ui.status(_("requesting all changes\n")) |
|
1475 | self.ui.status(_("requesting all changes\n")) | |
1473 |
|
1476 | |||
1474 | if not fetch: |
|
1477 | if not fetch: | |
1475 | self.ui.status(_("no changes found\n")) |
|
1478 | self.ui.status(_("no changes found\n")) | |
1476 | return 0 |
|
1479 | return 0 | |
1477 |
|
1480 | |||
1478 | if heads is None: |
|
1481 | if heads is None: | |
1479 | cg = remote.changegroup(fetch, 'pull') |
|
1482 | cg = remote.changegroup(fetch, 'pull') | |
1480 | else: |
|
1483 | else: | |
1481 | if 'changegroupsubset' not in remote.capabilities: |
|
1484 | if 'changegroupsubset' not in remote.capabilities: | |
1482 | raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset.")) |
|
1485 | raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset.")) | |
1483 | cg = remote.changegroupsubset(fetch, heads, 'pull') |
|
1486 | cg = remote.changegroupsubset(fetch, heads, 'pull') | |
1484 | return self.addchangegroup(cg, 'pull', remote.url()) |
|
1487 | return self.addchangegroup(cg, 'pull', remote.url()) | |
1485 | finally: |
|
1488 | finally: | |
1486 | del lock |
|
1489 | del lock | |
1487 |
|
1490 | |||
1488 | def push(self, remote, force=False, revs=None): |
|
1491 | def push(self, remote, force=False, revs=None): | |
1489 | # there are two ways to push to remote repo: |
|
1492 | # there are two ways to push to remote repo: | |
1490 | # |
|
1493 | # | |
1491 | # addchangegroup assumes local user can lock remote |
|
1494 | # addchangegroup assumes local user can lock remote | |
1492 | # repo (local filesystem, old ssh servers). |
|
1495 | # repo (local filesystem, old ssh servers). | |
1493 | # |
|
1496 | # | |
1494 | # unbundle assumes local user cannot lock remote repo (new ssh |
|
1497 | # unbundle assumes local user cannot lock remote repo (new ssh | |
1495 | # servers, http servers). |
|
1498 | # servers, http servers). | |
1496 |
|
1499 | |||
1497 | if remote.capable('unbundle'): |
|
1500 | if remote.capable('unbundle'): | |
1498 | return self.push_unbundle(remote, force, revs) |
|
1501 | return self.push_unbundle(remote, force, revs) | |
1499 | return self.push_addchangegroup(remote, force, revs) |
|
1502 | return self.push_addchangegroup(remote, force, revs) | |
1500 |
|
1503 | |||
1501 | def prepush(self, remote, force, revs): |
|
1504 | def prepush(self, remote, force, revs): | |
1502 | base = {} |
|
1505 | base = {} | |
1503 | remote_heads = remote.heads() |
|
1506 | remote_heads = remote.heads() | |
1504 | inc = self.findincoming(remote, base, remote_heads, force=force) |
|
1507 | inc = self.findincoming(remote, base, remote_heads, force=force) | |
1505 |
|
1508 | |||
1506 | update, updated_heads = self.findoutgoing(remote, base, remote_heads) |
|
1509 | update, updated_heads = self.findoutgoing(remote, base, remote_heads) | |
1507 | if revs is not None: |
|
1510 | if revs is not None: | |
1508 | msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) |
|
1511 | msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) | |
1509 | else: |
|
1512 | else: | |
1510 | bases, heads = update, self.changelog.heads() |
|
1513 | bases, heads = update, self.changelog.heads() | |
1511 |
|
1514 | |||
1512 | if not bases: |
|
1515 | if not bases: | |
1513 | self.ui.status(_("no changes found\n")) |
|
1516 | self.ui.status(_("no changes found\n")) | |
1514 | return None, 1 |
|
1517 | return None, 1 | |
1515 | elif not force: |
|
1518 | elif not force: | |
1516 | # check if we're creating new remote heads |
|
1519 | # check if we're creating new remote heads | |
1517 | # to be a remote head after push, node must be either |
|
1520 | # to be a remote head after push, node must be either | |
1518 | # - unknown locally |
|
1521 | # - unknown locally | |
1519 | # - a local outgoing head descended from update |
|
1522 | # - a local outgoing head descended from update | |
1520 | # - a remote head that's known locally and not |
|
1523 | # - a remote head that's known locally and not | |
1521 | # ancestral to an outgoing head |
|
1524 | # ancestral to an outgoing head | |
1522 |
|
1525 | |||
1523 | warn = 0 |
|
1526 | warn = 0 | |
1524 |
|
1527 | |||
1525 | if remote_heads == [nullid]: |
|
1528 | if remote_heads == [nullid]: | |
1526 | warn = 0 |
|
1529 | warn = 0 | |
1527 | elif not revs and len(heads) > len(remote_heads): |
|
1530 | elif not revs and len(heads) > len(remote_heads): | |
1528 | warn = 1 |
|
1531 | warn = 1 | |
1529 | else: |
|
1532 | else: | |
1530 | newheads = list(heads) |
|
1533 | newheads = list(heads) | |
1531 | for r in remote_heads: |
|
1534 | for r in remote_heads: | |
1532 | if r in self.changelog.nodemap: |
|
1535 | if r in self.changelog.nodemap: | |
1533 | desc = self.changelog.heads(r, heads) |
|
1536 | desc = self.changelog.heads(r, heads) | |
1534 | l = [h for h in heads if h in desc] |
|
1537 | l = [h for h in heads if h in desc] | |
1535 | if not l: |
|
1538 | if not l: | |
1536 | newheads.append(r) |
|
1539 | newheads.append(r) | |
1537 | else: |
|
1540 | else: | |
1538 | newheads.append(r) |
|
1541 | newheads.append(r) | |
1539 | if len(newheads) > len(remote_heads): |
|
1542 | if len(newheads) > len(remote_heads): | |
1540 | warn = 1 |
|
1543 | warn = 1 | |
1541 |
|
1544 | |||
1542 | if warn: |
|
1545 | if warn: | |
1543 | self.ui.warn(_("abort: push creates new remote heads!\n")) |
|
1546 | self.ui.warn(_("abort: push creates new remote heads!\n")) | |
1544 | self.ui.status(_("(did you forget to merge?" |
|
1547 | self.ui.status(_("(did you forget to merge?" | |
1545 | " use push -f to force)\n")) |
|
1548 | " use push -f to force)\n")) | |
1546 | return None, 0 |
|
1549 | return None, 0 | |
1547 | elif inc: |
|
1550 | elif inc: | |
1548 | self.ui.warn(_("note: unsynced remote changes!\n")) |
|
1551 | self.ui.warn(_("note: unsynced remote changes!\n")) | |
1549 |
|
1552 | |||
1550 |
|
1553 | |||
1551 | if revs is None: |
|
1554 | if revs is None: | |
1552 | cg = self.changegroup(update, 'push') |
|
1555 | cg = self.changegroup(update, 'push') | |
1553 | else: |
|
1556 | else: | |
1554 | cg = self.changegroupsubset(update, revs, 'push') |
|
1557 | cg = self.changegroupsubset(update, revs, 'push') | |
1555 | return cg, remote_heads |
|
1558 | return cg, remote_heads | |
1556 |
|
1559 | |||
1557 | def push_addchangegroup(self, remote, force, revs): |
|
1560 | def push_addchangegroup(self, remote, force, revs): | |
1558 | lock = remote.lock() |
|
1561 | lock = remote.lock() | |
1559 | try: |
|
1562 | try: | |
1560 | ret = self.prepush(remote, force, revs) |
|
1563 | ret = self.prepush(remote, force, revs) | |
1561 | if ret[0] is not None: |
|
1564 | if ret[0] is not None: | |
1562 | cg, remote_heads = ret |
|
1565 | cg, remote_heads = ret | |
1563 | return remote.addchangegroup(cg, 'push', self.url()) |
|
1566 | return remote.addchangegroup(cg, 'push', self.url()) | |
1564 | return ret[1] |
|
1567 | return ret[1] | |
1565 | finally: |
|
1568 | finally: | |
1566 | del lock |
|
1569 | del lock | |
1567 |
|
1570 | |||
1568 | def push_unbundle(self, remote, force, revs): |
|
1571 | def push_unbundle(self, remote, force, revs): | |
1569 | # local repo finds heads on server, finds out what revs it |
|
1572 | # local repo finds heads on server, finds out what revs it | |
1570 | # must push. once revs transferred, if server finds it has |
|
1573 | # must push. once revs transferred, if server finds it has | |
1571 | # different heads (someone else won commit/push race), server |
|
1574 | # different heads (someone else won commit/push race), server | |
1572 | # aborts. |
|
1575 | # aborts. | |
1573 |
|
1576 | |||
1574 | ret = self.prepush(remote, force, revs) |
|
1577 | ret = self.prepush(remote, force, revs) | |
1575 | if ret[0] is not None: |
|
1578 | if ret[0] is not None: | |
1576 | cg, remote_heads = ret |
|
1579 | cg, remote_heads = ret | |
1577 | if force: remote_heads = ['force'] |
|
1580 | if force: remote_heads = ['force'] | |
1578 | return remote.unbundle(cg, remote_heads, 'push') |
|
1581 | return remote.unbundle(cg, remote_heads, 'push') | |
1579 | return ret[1] |
|
1582 | return ret[1] | |
1580 |
|
1583 | |||
1581 | def changegroupinfo(self, nodes, source): |
|
1584 | def changegroupinfo(self, nodes, source): | |
1582 | if self.ui.verbose or source == 'bundle': |
|
1585 | if self.ui.verbose or source == 'bundle': | |
1583 | self.ui.status(_("%d changesets found\n") % len(nodes)) |
|
1586 | self.ui.status(_("%d changesets found\n") % len(nodes)) | |
1584 | if self.ui.debugflag: |
|
1587 | if self.ui.debugflag: | |
1585 | self.ui.debug(_("List of changesets:\n")) |
|
1588 | self.ui.debug(_("List of changesets:\n")) | |
1586 | for node in nodes: |
|
1589 | for node in nodes: | |
1587 | self.ui.debug("%s\n" % hex(node)) |
|
1590 | self.ui.debug("%s\n" % hex(node)) | |
1588 |
|
1591 | |||
1589 | def changegroupsubset(self, bases, heads, source, extranodes=None): |
|
1592 | def changegroupsubset(self, bases, heads, source, extranodes=None): | |
1590 | """This function generates a changegroup consisting of all the nodes |
|
1593 | """This function generates a changegroup consisting of all the nodes | |
1591 | that are descendents of any of the bases, and ancestors of any of |
|
1594 | that are descendents of any of the bases, and ancestors of any of | |
1592 | the heads. |
|
1595 | the heads. | |
1593 |
|
1596 | |||
1594 | It is fairly complex as determining which filenodes and which |
|
1597 | It is fairly complex as determining which filenodes and which | |
1595 | manifest nodes need to be included for the changeset to be complete |
|
1598 | manifest nodes need to be included for the changeset to be complete | |
1596 | is non-trivial. |
|
1599 | is non-trivial. | |
1597 |
|
1600 | |||
1598 | Another wrinkle is doing the reverse, figuring out which changeset in |
|
1601 | Another wrinkle is doing the reverse, figuring out which changeset in | |
1599 | the changegroup a particular filenode or manifestnode belongs to. |
|
1602 | the changegroup a particular filenode or manifestnode belongs to. | |
1600 |
|
1603 | |||
1601 | The caller can specify some nodes that must be included in the |
|
1604 | The caller can specify some nodes that must be included in the | |
1602 | changegroup using the extranodes argument. It should be a dict |
|
1605 | changegroup using the extranodes argument. It should be a dict | |
1603 | where the keys are the filenames (or 1 for the manifest), and the |
|
1606 | where the keys are the filenames (or 1 for the manifest), and the | |
1604 | values are lists of (node, linknode) tuples, where node is a wanted |
|
1607 | values are lists of (node, linknode) tuples, where node is a wanted | |
1605 | node and linknode is the changelog node that should be transmitted as |
|
1608 | node and linknode is the changelog node that should be transmitted as | |
1606 | the linkrev. |
|
1609 | the linkrev. | |
1607 | """ |
|
1610 | """ | |
1608 |
|
1611 | |||
1609 | self.hook('preoutgoing', throw=True, source=source) |
|
1612 | self.hook('preoutgoing', throw=True, source=source) | |
1610 |
|
1613 | |||
1611 | # Set up some initial variables |
|
1614 | # Set up some initial variables | |
1612 | # Make it easy to refer to self.changelog |
|
1615 | # Make it easy to refer to self.changelog | |
1613 | cl = self.changelog |
|
1616 | cl = self.changelog | |
1614 | # msng is short for missing - compute the list of changesets in this |
|
1617 | # msng is short for missing - compute the list of changesets in this | |
1615 | # changegroup. |
|
1618 | # changegroup. | |
1616 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) |
|
1619 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) | |
1617 | self.changegroupinfo(msng_cl_lst, source) |
|
1620 | self.changegroupinfo(msng_cl_lst, source) | |
1618 | # Some bases may turn out to be superfluous, and some heads may be |
|
1621 | # Some bases may turn out to be superfluous, and some heads may be | |
1619 | # too. nodesbetween will return the minimal set of bases and heads |
|
1622 | # too. nodesbetween will return the minimal set of bases and heads | |
1620 | # necessary to re-create the changegroup. |
|
1623 | # necessary to re-create the changegroup. | |
1621 |
|
1624 | |||
1622 | # Known heads are the list of heads that it is assumed the recipient |
|
1625 | # Known heads are the list of heads that it is assumed the recipient | |
1623 | # of this changegroup will know about. |
|
1626 | # of this changegroup will know about. | |
1624 | knownheads = {} |
|
1627 | knownheads = {} | |
1625 | # We assume that all parents of bases are known heads. |
|
1628 | # We assume that all parents of bases are known heads. | |
1626 | for n in bases: |
|
1629 | for n in bases: | |
1627 | for p in cl.parents(n): |
|
1630 | for p in cl.parents(n): | |
1628 | if p != nullid: |
|
1631 | if p != nullid: | |
1629 | knownheads[p] = 1 |
|
1632 | knownheads[p] = 1 | |
1630 | knownheads = knownheads.keys() |
|
1633 | knownheads = knownheads.keys() | |
1631 | if knownheads: |
|
1634 | if knownheads: | |
1632 | # Now that we know what heads are known, we can compute which |
|
1635 | # Now that we know what heads are known, we can compute which | |
1633 | # changesets are known. The recipient must know about all |
|
1636 | # changesets are known. The recipient must know about all | |
1634 | # changesets required to reach the known heads from the null |
|
1637 | # changesets required to reach the known heads from the null | |
1635 | # changeset. |
|
1638 | # changeset. | |
1636 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) |
|
1639 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) | |
1637 | junk = None |
|
1640 | junk = None | |
1638 | # Transform the list into an ersatz set. |
|
1641 | # Transform the list into an ersatz set. | |
1639 | has_cl_set = dict.fromkeys(has_cl_set) |
|
1642 | has_cl_set = dict.fromkeys(has_cl_set) | |
1640 | else: |
|
1643 | else: | |
1641 | # If there were no known heads, the recipient cannot be assumed to |
|
1644 | # If there were no known heads, the recipient cannot be assumed to | |
1642 | # know about any changesets. |
|
1645 | # know about any changesets. | |
1643 | has_cl_set = {} |
|
1646 | has_cl_set = {} | |
1644 |
|
1647 | |||
1645 | # Make it easy to refer to self.manifest |
|
1648 | # Make it easy to refer to self.manifest | |
1646 | mnfst = self.manifest |
|
1649 | mnfst = self.manifest | |
1647 | # We don't know which manifests are missing yet |
|
1650 | # We don't know which manifests are missing yet | |
1648 | msng_mnfst_set = {} |
|
1651 | msng_mnfst_set = {} | |
1649 | # Nor do we know which filenodes are missing. |
|
1652 | # Nor do we know which filenodes are missing. | |
1650 | msng_filenode_set = {} |
|
1653 | msng_filenode_set = {} | |
1651 |
|
1654 | |||
1652 | junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex |
|
1655 | junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex | |
1653 | junk = None |
|
1656 | junk = None | |
1654 |
|
1657 | |||
1655 | # A changeset always belongs to itself, so the changenode lookup |
|
1658 | # A changeset always belongs to itself, so the changenode lookup | |
1656 | # function for a changenode is identity. |
|
1659 | # function for a changenode is identity. | |
1657 | def identity(x): |
|
1660 | def identity(x): | |
1658 | return x |
|
1661 | return x | |
1659 |
|
1662 | |||
1660 | # A function generating function. Sets up an environment for the |
|
1663 | # A function generating function. Sets up an environment for the | |
1661 | # inner function. |
|
1664 | # inner function. | |
1662 | def cmp_by_rev_func(revlog): |
|
1665 | def cmp_by_rev_func(revlog): | |
1663 | # Compare two nodes by their revision number in the environment's |
|
1666 | # Compare two nodes by their revision number in the environment's | |
1664 | # revision history. Since the revision number both represents the |
|
1667 | # revision history. Since the revision number both represents the | |
1665 | # most efficient order to read the nodes in, and represents a |
|
1668 | # most efficient order to read the nodes in, and represents a | |
1666 | # topological sorting of the nodes, this function is often useful. |
|
1669 | # topological sorting of the nodes, this function is often useful. | |
1667 | def cmp_by_rev(a, b): |
|
1670 | def cmp_by_rev(a, b): | |
1668 | return cmp(revlog.rev(a), revlog.rev(b)) |
|
1671 | return cmp(revlog.rev(a), revlog.rev(b)) | |
1669 | return cmp_by_rev |
|
1672 | return cmp_by_rev | |
1670 |
|
1673 | |||
1671 | # If we determine that a particular file or manifest node must be a |
|
1674 | # If we determine that a particular file or manifest node must be a | |
1672 | # node that the recipient of the changegroup will already have, we can |
|
1675 | # node that the recipient of the changegroup will already have, we can | |
1673 | # also assume the recipient will have all the parents. This function |
|
1676 | # also assume the recipient will have all the parents. This function | |
1674 | # prunes them from the set of missing nodes. |
|
1677 | # prunes them from the set of missing nodes. | |
1675 | def prune_parents(revlog, hasset, msngset): |
|
1678 | def prune_parents(revlog, hasset, msngset): | |
1676 | haslst = hasset.keys() |
|
1679 | haslst = hasset.keys() | |
1677 | haslst.sort(cmp_by_rev_func(revlog)) |
|
1680 | haslst.sort(cmp_by_rev_func(revlog)) | |
1678 | for node in haslst: |
|
1681 | for node in haslst: | |
1679 | parentlst = [p for p in revlog.parents(node) if p != nullid] |
|
1682 | parentlst = [p for p in revlog.parents(node) if p != nullid] | |
1680 | while parentlst: |
|
1683 | while parentlst: | |
1681 | n = parentlst.pop() |
|
1684 | n = parentlst.pop() | |
1682 | if n not in hasset: |
|
1685 | if n not in hasset: | |
1683 | hasset[n] = 1 |
|
1686 | hasset[n] = 1 | |
1684 | p = [p for p in revlog.parents(n) if p != nullid] |
|
1687 | p = [p for p in revlog.parents(n) if p != nullid] | |
1685 | parentlst.extend(p) |
|
1688 | parentlst.extend(p) | |
1686 | for n in hasset: |
|
1689 | for n in hasset: | |
1687 | msngset.pop(n, None) |
|
1690 | msngset.pop(n, None) | |
1688 |
|
1691 | |||
1689 | # This is a function generating function used to set up an environment |
|
1692 | # This is a function generating function used to set up an environment | |
1690 | # for the inner function to execute in. |
|
1693 | # for the inner function to execute in. | |
1691 | def manifest_and_file_collector(changedfileset): |
|
1694 | def manifest_and_file_collector(changedfileset): | |
1692 | # This is an information gathering function that gathers |
|
1695 | # This is an information gathering function that gathers | |
1693 | # information from each changeset node that goes out as part of |
|
1696 | # information from each changeset node that goes out as part of | |
1694 | # the changegroup. The information gathered is a list of which |
|
1697 | # the changegroup. The information gathered is a list of which | |
1695 | # manifest nodes are potentially required (the recipient may |
|
1698 | # manifest nodes are potentially required (the recipient may | |
1696 | # already have them) and total list of all files which were |
|
1699 | # already have them) and total list of all files which were | |
1697 | # changed in any changeset in the changegroup. |
|
1700 | # changed in any changeset in the changegroup. | |
1698 | # |
|
1701 | # | |
1699 | # We also remember the first changenode we saw any manifest |
|
1702 | # We also remember the first changenode we saw any manifest | |
1700 | # referenced by so we can later determine which changenode 'owns' |
|
1703 | # referenced by so we can later determine which changenode 'owns' | |
1701 | # the manifest. |
|
1704 | # the manifest. | |
1702 | def collect_manifests_and_files(clnode): |
|
1705 | def collect_manifests_and_files(clnode): | |
1703 | c = cl.read(clnode) |
|
1706 | c = cl.read(clnode) | |
1704 | for f in c[3]: |
|
1707 | for f in c[3]: | |
1705 | # This is to make sure we only have one instance of each |
|
1708 | # This is to make sure we only have one instance of each | |
1706 | # filename string for each filename. |
|
1709 | # filename string for each filename. | |
1707 | changedfileset.setdefault(f, f) |
|
1710 | changedfileset.setdefault(f, f) | |
1708 | msng_mnfst_set.setdefault(c[0], clnode) |
|
1711 | msng_mnfst_set.setdefault(c[0], clnode) | |
1709 | return collect_manifests_and_files |
|
1712 | return collect_manifests_and_files | |
1710 |
|
1713 | |||
1711 | # Figure out which manifest nodes (of the ones we think might be part |
|
1714 | # Figure out which manifest nodes (of the ones we think might be part | |
1712 | # of the changegroup) the recipient must know about and remove them |
|
1715 | # of the changegroup) the recipient must know about and remove them | |
1713 | # from the changegroup. |
|
1716 | # from the changegroup. | |
1714 | def prune_manifests(): |
|
1717 | def prune_manifests(): | |
1715 | has_mnfst_set = {} |
|
1718 | has_mnfst_set = {} | |
1716 | for n in msng_mnfst_set: |
|
1719 | for n in msng_mnfst_set: | |
1717 | # If a 'missing' manifest thinks it belongs to a changenode |
|
1720 | # If a 'missing' manifest thinks it belongs to a changenode | |
1718 | # the recipient is assumed to have, obviously the recipient |
|
1721 | # the recipient is assumed to have, obviously the recipient | |
1719 | # must have that manifest. |
|
1722 | # must have that manifest. | |
1720 | linknode = cl.node(mnfst.linkrev(n)) |
|
1723 | linknode = cl.node(mnfst.linkrev(n)) | |
1721 | if linknode in has_cl_set: |
|
1724 | if linknode in has_cl_set: | |
1722 | has_mnfst_set[n] = 1 |
|
1725 | has_mnfst_set[n] = 1 | |
1723 | prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) |
|
1726 | prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) | |
1724 |
|
1727 | |||
1725 | # Use the information collected in collect_manifests_and_files to say |
|
1728 | # Use the information collected in collect_manifests_and_files to say | |
1726 | # which changenode any manifestnode belongs to. |
|
1729 | # which changenode any manifestnode belongs to. | |
1727 | def lookup_manifest_link(mnfstnode): |
|
1730 | def lookup_manifest_link(mnfstnode): | |
1728 | return msng_mnfst_set[mnfstnode] |
|
1731 | return msng_mnfst_set[mnfstnode] | |
1729 |
|
1732 | |||
1730 | # A function generating function that sets up the initial environment |
|
1733 | # A function generating function that sets up the initial environment | |
1731 | # the inner function. |
|
1734 | # the inner function. | |
1732 | def filenode_collector(changedfiles): |
|
1735 | def filenode_collector(changedfiles): | |
1733 | next_rev = [0] |
|
1736 | next_rev = [0] | |
1734 | # This gathers information from each manifestnode included in the |
|
1737 | # This gathers information from each manifestnode included in the | |
1735 | # changegroup about which filenodes the manifest node references |
|
1738 | # changegroup about which filenodes the manifest node references | |
1736 | # so we can include those in the changegroup too. |
|
1739 | # so we can include those in the changegroup too. | |
1737 | # |
|
1740 | # | |
1738 | # It also remembers which changenode each filenode belongs to. It |
|
1741 | # It also remembers which changenode each filenode belongs to. It | |
1739 | # does this by assuming the a filenode belongs to the changenode |
|
1742 | # does this by assuming the a filenode belongs to the changenode | |
1740 | # the first manifest that references it belongs to. |
|
1743 | # the first manifest that references it belongs to. | |
1741 | def collect_msng_filenodes(mnfstnode): |
|
1744 | def collect_msng_filenodes(mnfstnode): | |
1742 | r = mnfst.rev(mnfstnode) |
|
1745 | r = mnfst.rev(mnfstnode) | |
1743 | if r == next_rev[0]: |
|
1746 | if r == next_rev[0]: | |
1744 | # If the last rev we looked at was the one just previous, |
|
1747 | # If the last rev we looked at was the one just previous, | |
1745 | # we only need to see a diff. |
|
1748 | # we only need to see a diff. | |
1746 | deltamf = mnfst.readdelta(mnfstnode) |
|
1749 | deltamf = mnfst.readdelta(mnfstnode) | |
1747 | # For each line in the delta |
|
1750 | # For each line in the delta | |
1748 | for f, fnode in deltamf.items(): |
|
1751 | for f, fnode in deltamf.items(): | |
1749 | f = changedfiles.get(f, None) |
|
1752 | f = changedfiles.get(f, None) | |
1750 | # And if the file is in the list of files we care |
|
1753 | # And if the file is in the list of files we care | |
1751 | # about. |
|
1754 | # about. | |
1752 | if f is not None: |
|
1755 | if f is not None: | |
1753 | # Get the changenode this manifest belongs to |
|
1756 | # Get the changenode this manifest belongs to | |
1754 | clnode = msng_mnfst_set[mnfstnode] |
|
1757 | clnode = msng_mnfst_set[mnfstnode] | |
1755 | # Create the set of filenodes for the file if |
|
1758 | # Create the set of filenodes for the file if | |
1756 | # there isn't one already. |
|
1759 | # there isn't one already. | |
1757 | ndset = msng_filenode_set.setdefault(f, {}) |
|
1760 | ndset = msng_filenode_set.setdefault(f, {}) | |
1758 | # And set the filenode's changelog node to the |
|
1761 | # And set the filenode's changelog node to the | |
1759 | # manifest's if it hasn't been set already. |
|
1762 | # manifest's if it hasn't been set already. | |
1760 | ndset.setdefault(fnode, clnode) |
|
1763 | ndset.setdefault(fnode, clnode) | |
1761 | else: |
|
1764 | else: | |
1762 | # Otherwise we need a full manifest. |
|
1765 | # Otherwise we need a full manifest. | |
1763 | m = mnfst.read(mnfstnode) |
|
1766 | m = mnfst.read(mnfstnode) | |
1764 | # For every file in we care about. |
|
1767 | # For every file in we care about. | |
1765 | for f in changedfiles: |
|
1768 | for f in changedfiles: | |
1766 | fnode = m.get(f, None) |
|
1769 | fnode = m.get(f, None) | |
1767 | # If it's in the manifest |
|
1770 | # If it's in the manifest | |
1768 | if fnode is not None: |
|
1771 | if fnode is not None: | |
1769 | # See comments above. |
|
1772 | # See comments above. | |
1770 | clnode = msng_mnfst_set[mnfstnode] |
|
1773 | clnode = msng_mnfst_set[mnfstnode] | |
1771 | ndset = msng_filenode_set.setdefault(f, {}) |
|
1774 | ndset = msng_filenode_set.setdefault(f, {}) | |
1772 | ndset.setdefault(fnode, clnode) |
|
1775 | ndset.setdefault(fnode, clnode) | |
1773 | # Remember the revision we hope to see next. |
|
1776 | # Remember the revision we hope to see next. | |
1774 | next_rev[0] = r + 1 |
|
1777 | next_rev[0] = r + 1 | |
1775 | return collect_msng_filenodes |
|
1778 | return collect_msng_filenodes | |
1776 |
|
1779 | |||
1777 | # We have a list of filenodes we think we need for a file, lets remove |
|
1780 | # We have a list of filenodes we think we need for a file, lets remove | |
1778 | # all those we now the recipient must have. |
|
1781 | # all those we now the recipient must have. | |
1779 | def prune_filenodes(f, filerevlog): |
|
1782 | def prune_filenodes(f, filerevlog): | |
1780 | msngset = msng_filenode_set[f] |
|
1783 | msngset = msng_filenode_set[f] | |
1781 | hasset = {} |
|
1784 | hasset = {} | |
1782 | # If a 'missing' filenode thinks it belongs to a changenode we |
|
1785 | # If a 'missing' filenode thinks it belongs to a changenode we | |
1783 | # assume the recipient must have, then the recipient must have |
|
1786 | # assume the recipient must have, then the recipient must have | |
1784 | # that filenode. |
|
1787 | # that filenode. | |
1785 | for n in msngset: |
|
1788 | for n in msngset: | |
1786 | clnode = cl.node(filerevlog.linkrev(n)) |
|
1789 | clnode = cl.node(filerevlog.linkrev(n)) | |
1787 | if clnode in has_cl_set: |
|
1790 | if clnode in has_cl_set: | |
1788 | hasset[n] = 1 |
|
1791 | hasset[n] = 1 | |
1789 | prune_parents(filerevlog, hasset, msngset) |
|
1792 | prune_parents(filerevlog, hasset, msngset) | |
1790 |
|
1793 | |||
1791 | # A function generator function that sets up the a context for the |
|
1794 | # A function generator function that sets up the a context for the | |
1792 | # inner function. |
|
1795 | # inner function. | |
1793 | def lookup_filenode_link_func(fname): |
|
1796 | def lookup_filenode_link_func(fname): | |
1794 | msngset = msng_filenode_set[fname] |
|
1797 | msngset = msng_filenode_set[fname] | |
1795 | # Lookup the changenode the filenode belongs to. |
|
1798 | # Lookup the changenode the filenode belongs to. | |
1796 | def lookup_filenode_link(fnode): |
|
1799 | def lookup_filenode_link(fnode): | |
1797 | return msngset[fnode] |
|
1800 | return msngset[fnode] | |
1798 | return lookup_filenode_link |
|
1801 | return lookup_filenode_link | |
1799 |
|
1802 | |||
1800 | # Add the nodes that were explicitly requested. |
|
1803 | # Add the nodes that were explicitly requested. | |
1801 | def add_extra_nodes(name, nodes): |
|
1804 | def add_extra_nodes(name, nodes): | |
1802 | if not extranodes or name not in extranodes: |
|
1805 | if not extranodes or name not in extranodes: | |
1803 | return |
|
1806 | return | |
1804 |
|
1807 | |||
1805 | for node, linknode in extranodes[name]: |
|
1808 | for node, linknode in extranodes[name]: | |
1806 | if node not in nodes: |
|
1809 | if node not in nodes: | |
1807 | nodes[node] = linknode |
|
1810 | nodes[node] = linknode | |
1808 |
|
1811 | |||
1809 | # Now that we have all theses utility functions to help out and |
|
1812 | # Now that we have all theses utility functions to help out and | |
1810 | # logically divide up the task, generate the group. |
|
1813 | # logically divide up the task, generate the group. | |
1811 | def gengroup(): |
|
1814 | def gengroup(): | |
1812 | # The set of changed files starts empty. |
|
1815 | # The set of changed files starts empty. | |
1813 | changedfiles = {} |
|
1816 | changedfiles = {} | |
1814 | # Create a changenode group generator that will call our functions |
|
1817 | # Create a changenode group generator that will call our functions | |
1815 | # back to lookup the owning changenode and collect information. |
|
1818 | # back to lookup the owning changenode and collect information. | |
1816 | group = cl.group(msng_cl_lst, identity, |
|
1819 | group = cl.group(msng_cl_lst, identity, | |
1817 | manifest_and_file_collector(changedfiles)) |
|
1820 | manifest_and_file_collector(changedfiles)) | |
1818 | for chnk in group: |
|
1821 | for chnk in group: | |
1819 | yield chnk |
|
1822 | yield chnk | |
1820 |
|
1823 | |||
1821 | # The list of manifests has been collected by the generator |
|
1824 | # The list of manifests has been collected by the generator | |
1822 | # calling our functions back. |
|
1825 | # calling our functions back. | |
1823 | prune_manifests() |
|
1826 | prune_manifests() | |
1824 | add_extra_nodes(1, msng_mnfst_set) |
|
1827 | add_extra_nodes(1, msng_mnfst_set) | |
1825 | msng_mnfst_lst = msng_mnfst_set.keys() |
|
1828 | msng_mnfst_lst = msng_mnfst_set.keys() | |
1826 | # Sort the manifestnodes by revision number. |
|
1829 | # Sort the manifestnodes by revision number. | |
1827 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) |
|
1830 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) | |
1828 | # Create a generator for the manifestnodes that calls our lookup |
|
1831 | # Create a generator for the manifestnodes that calls our lookup | |
1829 | # and data collection functions back. |
|
1832 | # and data collection functions back. | |
1830 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, |
|
1833 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, | |
1831 | filenode_collector(changedfiles)) |
|
1834 | filenode_collector(changedfiles)) | |
1832 | for chnk in group: |
|
1835 | for chnk in group: | |
1833 | yield chnk |
|
1836 | yield chnk | |
1834 |
|
1837 | |||
1835 | # These are no longer needed, dereference and toss the memory for |
|
1838 | # These are no longer needed, dereference and toss the memory for | |
1836 | # them. |
|
1839 | # them. | |
1837 | msng_mnfst_lst = None |
|
1840 | msng_mnfst_lst = None | |
1838 | msng_mnfst_set.clear() |
|
1841 | msng_mnfst_set.clear() | |
1839 |
|
1842 | |||
1840 | if extranodes: |
|
1843 | if extranodes: | |
1841 | for fname in extranodes: |
|
1844 | for fname in extranodes: | |
1842 | if isinstance(fname, int): |
|
1845 | if isinstance(fname, int): | |
1843 | continue |
|
1846 | continue | |
1844 | add_extra_nodes(fname, |
|
1847 | add_extra_nodes(fname, | |
1845 | msng_filenode_set.setdefault(fname, {})) |
|
1848 | msng_filenode_set.setdefault(fname, {})) | |
1846 | changedfiles[fname] = 1 |
|
1849 | changedfiles[fname] = 1 | |
1847 | changedfiles = changedfiles.keys() |
|
1850 | changedfiles = changedfiles.keys() | |
1848 | changedfiles.sort() |
|
1851 | changedfiles.sort() | |
1849 | # Go through all our files in order sorted by name. |
|
1852 | # Go through all our files in order sorted by name. | |
1850 | for fname in changedfiles: |
|
1853 | for fname in changedfiles: | |
1851 | filerevlog = self.file(fname) |
|
1854 | filerevlog = self.file(fname) | |
1852 | if filerevlog.count() == 0: |
|
1855 | if filerevlog.count() == 0: | |
1853 | raise util.Abort(_("empty or missing revlog for %s") % fname) |
|
1856 | raise util.Abort(_("empty or missing revlog for %s") % fname) | |
1854 | # Toss out the filenodes that the recipient isn't really |
|
1857 | # Toss out the filenodes that the recipient isn't really | |
1855 | # missing. |
|
1858 | # missing. | |
1856 | if fname in msng_filenode_set: |
|
1859 | if fname in msng_filenode_set: | |
1857 | prune_filenodes(fname, filerevlog) |
|
1860 | prune_filenodes(fname, filerevlog) | |
1858 | msng_filenode_lst = msng_filenode_set[fname].keys() |
|
1861 | msng_filenode_lst = msng_filenode_set[fname].keys() | |
1859 | else: |
|
1862 | else: | |
1860 | msng_filenode_lst = [] |
|
1863 | msng_filenode_lst = [] | |
1861 | # If any filenodes are left, generate the group for them, |
|
1864 | # If any filenodes are left, generate the group for them, | |
1862 | # otherwise don't bother. |
|
1865 | # otherwise don't bother. | |
1863 | if len(msng_filenode_lst) > 0: |
|
1866 | if len(msng_filenode_lst) > 0: | |
1864 | yield changegroup.chunkheader(len(fname)) |
|
1867 | yield changegroup.chunkheader(len(fname)) | |
1865 | yield fname |
|
1868 | yield fname | |
1866 | # Sort the filenodes by their revision # |
|
1869 | # Sort the filenodes by their revision # | |
1867 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) |
|
1870 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) | |
1868 | # Create a group generator and only pass in a changenode |
|
1871 | # Create a group generator and only pass in a changenode | |
1869 | # lookup function as we need to collect no information |
|
1872 | # lookup function as we need to collect no information | |
1870 | # from filenodes. |
|
1873 | # from filenodes. | |
1871 | group = filerevlog.group(msng_filenode_lst, |
|
1874 | group = filerevlog.group(msng_filenode_lst, | |
1872 | lookup_filenode_link_func(fname)) |
|
1875 | lookup_filenode_link_func(fname)) | |
1873 | for chnk in group: |
|
1876 | for chnk in group: | |
1874 | yield chnk |
|
1877 | yield chnk | |
1875 | if fname in msng_filenode_set: |
|
1878 | if fname in msng_filenode_set: | |
1876 | # Don't need this anymore, toss it to free memory. |
|
1879 | # Don't need this anymore, toss it to free memory. | |
1877 | del msng_filenode_set[fname] |
|
1880 | del msng_filenode_set[fname] | |
1878 | # Signal that no more groups are left. |
|
1881 | # Signal that no more groups are left. | |
1879 | yield changegroup.closechunk() |
|
1882 | yield changegroup.closechunk() | |
1880 |
|
1883 | |||
1881 | if msng_cl_lst: |
|
1884 | if msng_cl_lst: | |
1882 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) |
|
1885 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) | |
1883 |
|
1886 | |||
1884 | return util.chunkbuffer(gengroup()) |
|
1887 | return util.chunkbuffer(gengroup()) | |
1885 |
|
1888 | |||
1886 | def changegroup(self, basenodes, source): |
|
1889 | def changegroup(self, basenodes, source): | |
1887 | """Generate a changegroup of all nodes that we have that a recipient |
|
1890 | """Generate a changegroup of all nodes that we have that a recipient | |
1888 | doesn't. |
|
1891 | doesn't. | |
1889 |
|
1892 | |||
1890 | This is much easier than the previous function as we can assume that |
|
1893 | This is much easier than the previous function as we can assume that | |
1891 | the recipient has any changenode we aren't sending them.""" |
|
1894 | the recipient has any changenode we aren't sending them.""" | |
1892 |
|
1895 | |||
1893 | self.hook('preoutgoing', throw=True, source=source) |
|
1896 | self.hook('preoutgoing', throw=True, source=source) | |
1894 |
|
1897 | |||
1895 | cl = self.changelog |
|
1898 | cl = self.changelog | |
1896 | nodes = cl.nodesbetween(basenodes, None)[0] |
|
1899 | nodes = cl.nodesbetween(basenodes, None)[0] | |
1897 | revset = dict.fromkeys([cl.rev(n) for n in nodes]) |
|
1900 | revset = dict.fromkeys([cl.rev(n) for n in nodes]) | |
1898 | self.changegroupinfo(nodes, source) |
|
1901 | self.changegroupinfo(nodes, source) | |
1899 |
|
1902 | |||
1900 | def identity(x): |
|
1903 | def identity(x): | |
1901 | return x |
|
1904 | return x | |
1902 |
|
1905 | |||
1903 | def gennodelst(revlog): |
|
1906 | def gennodelst(revlog): | |
1904 | for r in xrange(0, revlog.count()): |
|
1907 | for r in xrange(0, revlog.count()): | |
1905 | n = revlog.node(r) |
|
1908 | n = revlog.node(r) | |
1906 | if revlog.linkrev(n) in revset: |
|
1909 | if revlog.linkrev(n) in revset: | |
1907 | yield n |
|
1910 | yield n | |
1908 |
|
1911 | |||
1909 | def changed_file_collector(changedfileset): |
|
1912 | def changed_file_collector(changedfileset): | |
1910 | def collect_changed_files(clnode): |
|
1913 | def collect_changed_files(clnode): | |
1911 | c = cl.read(clnode) |
|
1914 | c = cl.read(clnode) | |
1912 | for fname in c[3]: |
|
1915 | for fname in c[3]: | |
1913 | changedfileset[fname] = 1 |
|
1916 | changedfileset[fname] = 1 | |
1914 | return collect_changed_files |
|
1917 | return collect_changed_files | |
1915 |
|
1918 | |||
1916 | def lookuprevlink_func(revlog): |
|
1919 | def lookuprevlink_func(revlog): | |
1917 | def lookuprevlink(n): |
|
1920 | def lookuprevlink(n): | |
1918 | return cl.node(revlog.linkrev(n)) |
|
1921 | return cl.node(revlog.linkrev(n)) | |
1919 | return lookuprevlink |
|
1922 | return lookuprevlink | |
1920 |
|
1923 | |||
1921 | def gengroup(): |
|
1924 | def gengroup(): | |
1922 | # construct a list of all changed files |
|
1925 | # construct a list of all changed files | |
1923 | changedfiles = {} |
|
1926 | changedfiles = {} | |
1924 |
|
1927 | |||
1925 | for chnk in cl.group(nodes, identity, |
|
1928 | for chnk in cl.group(nodes, identity, | |
1926 | changed_file_collector(changedfiles)): |
|
1929 | changed_file_collector(changedfiles)): | |
1927 | yield chnk |
|
1930 | yield chnk | |
1928 | changedfiles = changedfiles.keys() |
|
1931 | changedfiles = changedfiles.keys() | |
1929 | changedfiles.sort() |
|
1932 | changedfiles.sort() | |
1930 |
|
1933 | |||
1931 | mnfst = self.manifest |
|
1934 | mnfst = self.manifest | |
1932 | nodeiter = gennodelst(mnfst) |
|
1935 | nodeiter = gennodelst(mnfst) | |
1933 | for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): |
|
1936 | for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): | |
1934 | yield chnk |
|
1937 | yield chnk | |
1935 |
|
1938 | |||
1936 | for fname in changedfiles: |
|
1939 | for fname in changedfiles: | |
1937 | filerevlog = self.file(fname) |
|
1940 | filerevlog = self.file(fname) | |
1938 | if filerevlog.count() == 0: |
|
1941 | if filerevlog.count() == 0: | |
1939 | raise util.Abort(_("empty or missing revlog for %s") % fname) |
|
1942 | raise util.Abort(_("empty or missing revlog for %s") % fname) | |
1940 | nodeiter = gennodelst(filerevlog) |
|
1943 | nodeiter = gennodelst(filerevlog) | |
1941 | nodeiter = list(nodeiter) |
|
1944 | nodeiter = list(nodeiter) | |
1942 | if nodeiter: |
|
1945 | if nodeiter: | |
1943 | yield changegroup.chunkheader(len(fname)) |
|
1946 | yield changegroup.chunkheader(len(fname)) | |
1944 | yield fname |
|
1947 | yield fname | |
1945 | lookup = lookuprevlink_func(filerevlog) |
|
1948 | lookup = lookuprevlink_func(filerevlog) | |
1946 | for chnk in filerevlog.group(nodeiter, lookup): |
|
1949 | for chnk in filerevlog.group(nodeiter, lookup): | |
1947 | yield chnk |
|
1950 | yield chnk | |
1948 |
|
1951 | |||
1949 | yield changegroup.closechunk() |
|
1952 | yield changegroup.closechunk() | |
1950 |
|
1953 | |||
1951 | if nodes: |
|
1954 | if nodes: | |
1952 | self.hook('outgoing', node=hex(nodes[0]), source=source) |
|
1955 | self.hook('outgoing', node=hex(nodes[0]), source=source) | |
1953 |
|
1956 | |||
1954 | return util.chunkbuffer(gengroup()) |
|
1957 | return util.chunkbuffer(gengroup()) | |
1955 |
|
1958 | |||
1956 | def addchangegroup(self, source, srctype, url, emptyok=False): |
|
1959 | def addchangegroup(self, source, srctype, url, emptyok=False): | |
1957 | """add changegroup to repo. |
|
1960 | """add changegroup to repo. | |
1958 |
|
1961 | |||
1959 | return values: |
|
1962 | return values: | |
1960 | - nothing changed or no source: 0 |
|
1963 | - nothing changed or no source: 0 | |
1961 | - more heads than before: 1+added heads (2..n) |
|
1964 | - more heads than before: 1+added heads (2..n) | |
1962 | - less heads than before: -1-removed heads (-2..-n) |
|
1965 | - less heads than before: -1-removed heads (-2..-n) | |
1963 | - number of heads stays the same: 1 |
|
1966 | - number of heads stays the same: 1 | |
1964 | """ |
|
1967 | """ | |
1965 | def csmap(x): |
|
1968 | def csmap(x): | |
1966 | self.ui.debug(_("add changeset %s\n") % short(x)) |
|
1969 | self.ui.debug(_("add changeset %s\n") % short(x)) | |
1967 | return cl.count() |
|
1970 | return cl.count() | |
1968 |
|
1971 | |||
1969 | def revmap(x): |
|
1972 | def revmap(x): | |
1970 | return cl.rev(x) |
|
1973 | return cl.rev(x) | |
1971 |
|
1974 | |||
1972 | if not source: |
|
1975 | if not source: | |
1973 | return 0 |
|
1976 | return 0 | |
1974 |
|
1977 | |||
1975 | self.hook('prechangegroup', throw=True, source=srctype, url=url) |
|
1978 | self.hook('prechangegroup', throw=True, source=srctype, url=url) | |
1976 |
|
1979 | |||
1977 | changesets = files = revisions = 0 |
|
1980 | changesets = files = revisions = 0 | |
1978 |
|
1981 | |||
1979 | # write changelog data to temp files so concurrent readers will not see |
|
1982 | # write changelog data to temp files so concurrent readers will not see | |
1980 | # inconsistent view |
|
1983 | # inconsistent view | |
1981 | cl = self.changelog |
|
1984 | cl = self.changelog | |
1982 | cl.delayupdate() |
|
1985 | cl.delayupdate() | |
1983 | oldheads = len(cl.heads()) |
|
1986 | oldheads = len(cl.heads()) | |
1984 |
|
1987 | |||
1985 | tr = self.transaction() |
|
1988 | tr = self.transaction() | |
1986 | try: |
|
1989 | try: | |
1987 | trp = weakref.proxy(tr) |
|
1990 | trp = weakref.proxy(tr) | |
1988 | # pull off the changeset group |
|
1991 | # pull off the changeset group | |
1989 | self.ui.status(_("adding changesets\n")) |
|
1992 | self.ui.status(_("adding changesets\n")) | |
1990 | cor = cl.count() - 1 |
|
1993 | cor = cl.count() - 1 | |
1991 | chunkiter = changegroup.chunkiter(source) |
|
1994 | chunkiter = changegroup.chunkiter(source) | |
1992 | if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok: |
|
1995 | if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok: | |
1993 | raise util.Abort(_("received changelog group is empty")) |
|
1996 | raise util.Abort(_("received changelog group is empty")) | |
1994 | cnr = cl.count() - 1 |
|
1997 | cnr = cl.count() - 1 | |
1995 | changesets = cnr - cor |
|
1998 | changesets = cnr - cor | |
1996 |
|
1999 | |||
1997 | # pull off the manifest group |
|
2000 | # pull off the manifest group | |
1998 | self.ui.status(_("adding manifests\n")) |
|
2001 | self.ui.status(_("adding manifests\n")) | |
1999 | chunkiter = changegroup.chunkiter(source) |
|
2002 | chunkiter = changegroup.chunkiter(source) | |
2000 | # no need to check for empty manifest group here: |
|
2003 | # no need to check for empty manifest group here: | |
2001 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
2004 | # if the result of the merge of 1 and 2 is the same in 3 and 4, | |
2002 | # no new manifest will be created and the manifest group will |
|
2005 | # no new manifest will be created and the manifest group will | |
2003 | # be empty during the pull |
|
2006 | # be empty during the pull | |
2004 | self.manifest.addgroup(chunkiter, revmap, trp) |
|
2007 | self.manifest.addgroup(chunkiter, revmap, trp) | |
2005 |
|
2008 | |||
2006 | # process the files |
|
2009 | # process the files | |
2007 | self.ui.status(_("adding file changes\n")) |
|
2010 | self.ui.status(_("adding file changes\n")) | |
2008 | while 1: |
|
2011 | while 1: | |
2009 | f = changegroup.getchunk(source) |
|
2012 | f = changegroup.getchunk(source) | |
2010 | if not f: |
|
2013 | if not f: | |
2011 | break |
|
2014 | break | |
2012 | self.ui.debug(_("adding %s revisions\n") % f) |
|
2015 | self.ui.debug(_("adding %s revisions\n") % f) | |
2013 | fl = self.file(f) |
|
2016 | fl = self.file(f) | |
2014 | o = fl.count() |
|
2017 | o = fl.count() | |
2015 | chunkiter = changegroup.chunkiter(source) |
|
2018 | chunkiter = changegroup.chunkiter(source) | |
2016 | if fl.addgroup(chunkiter, revmap, trp) is None: |
|
2019 | if fl.addgroup(chunkiter, revmap, trp) is None: | |
2017 | raise util.Abort(_("received file revlog group is empty")) |
|
2020 | raise util.Abort(_("received file revlog group is empty")) | |
2018 | revisions += fl.count() - o |
|
2021 | revisions += fl.count() - o | |
2019 | files += 1 |
|
2022 | files += 1 | |
2020 |
|
2023 | |||
2021 | # make changelog see real files again |
|
2024 | # make changelog see real files again | |
2022 | cl.finalize(trp) |
|
2025 | cl.finalize(trp) | |
2023 |
|
2026 | |||
2024 | newheads = len(self.changelog.heads()) |
|
2027 | newheads = len(self.changelog.heads()) | |
2025 | heads = "" |
|
2028 | heads = "" | |
2026 | if oldheads and newheads != oldheads: |
|
2029 | if oldheads and newheads != oldheads: | |
2027 | heads = _(" (%+d heads)") % (newheads - oldheads) |
|
2030 | heads = _(" (%+d heads)") % (newheads - oldheads) | |
2028 |
|
2031 | |||
2029 | self.ui.status(_("added %d changesets" |
|
2032 | self.ui.status(_("added %d changesets" | |
2030 | " with %d changes to %d files%s\n") |
|
2033 | " with %d changes to %d files%s\n") | |
2031 | % (changesets, revisions, files, heads)) |
|
2034 | % (changesets, revisions, files, heads)) | |
2032 |
|
2035 | |||
2033 | if changesets > 0: |
|
2036 | if changesets > 0: | |
2034 | self.hook('pretxnchangegroup', throw=True, |
|
2037 | self.hook('pretxnchangegroup', throw=True, | |
2035 | node=hex(self.changelog.node(cor+1)), source=srctype, |
|
2038 | node=hex(self.changelog.node(cor+1)), source=srctype, | |
2036 | url=url) |
|
2039 | url=url) | |
2037 |
|
2040 | |||
2038 | tr.close() |
|
2041 | tr.close() | |
2039 | finally: |
|
2042 | finally: | |
2040 | del tr |
|
2043 | del tr | |
2041 |
|
2044 | |||
2042 | if changesets > 0: |
|
2045 | if changesets > 0: | |
2043 | # forcefully update the on-disk branch cache |
|
2046 | # forcefully update the on-disk branch cache | |
2044 | self.ui.debug(_("updating the branch cache\n")) |
|
2047 | self.ui.debug(_("updating the branch cache\n")) | |
2045 | self.branchtags() |
|
2048 | self.branchtags() | |
2046 | self.hook("changegroup", node=hex(self.changelog.node(cor+1)), |
|
2049 | self.hook("changegroup", node=hex(self.changelog.node(cor+1)), | |
2047 | source=srctype, url=url) |
|
2050 | source=srctype, url=url) | |
2048 |
|
2051 | |||
2049 | for i in xrange(cor + 1, cnr + 1): |
|
2052 | for i in xrange(cor + 1, cnr + 1): | |
2050 | self.hook("incoming", node=hex(self.changelog.node(i)), |
|
2053 | self.hook("incoming", node=hex(self.changelog.node(i)), | |
2051 | source=srctype, url=url) |
|
2054 | source=srctype, url=url) | |
2052 |
|
2055 | |||
2053 | # never return 0 here: |
|
2056 | # never return 0 here: | |
2054 | if newheads < oldheads: |
|
2057 | if newheads < oldheads: | |
2055 | return newheads - oldheads - 1 |
|
2058 | return newheads - oldheads - 1 | |
2056 | else: |
|
2059 | else: | |
2057 | return newheads - oldheads + 1 |
|
2060 | return newheads - oldheads + 1 | |
2058 |
|
2061 | |||
2059 |
|
2062 | |||
2060 | def stream_in(self, remote): |
|
2063 | def stream_in(self, remote): | |
2061 | fp = remote.stream_out() |
|
2064 | fp = remote.stream_out() | |
2062 | l = fp.readline() |
|
2065 | l = fp.readline() | |
2063 | try: |
|
2066 | try: | |
2064 | resp = int(l) |
|
2067 | resp = int(l) | |
2065 | except ValueError: |
|
2068 | except ValueError: | |
2066 | raise util.UnexpectedOutput( |
|
2069 | raise util.UnexpectedOutput( | |
2067 | _('Unexpected response from remote server:'), l) |
|
2070 | _('Unexpected response from remote server:'), l) | |
2068 | if resp == 1: |
|
2071 | if resp == 1: | |
2069 | raise util.Abort(_('operation forbidden by server')) |
|
2072 | raise util.Abort(_('operation forbidden by server')) | |
2070 | elif resp == 2: |
|
2073 | elif resp == 2: | |
2071 | raise util.Abort(_('locking the remote repository failed')) |
|
2074 | raise util.Abort(_('locking the remote repository failed')) | |
2072 | elif resp != 0: |
|
2075 | elif resp != 0: | |
2073 | raise util.Abort(_('the server sent an unknown error code')) |
|
2076 | raise util.Abort(_('the server sent an unknown error code')) | |
2074 | self.ui.status(_('streaming all changes\n')) |
|
2077 | self.ui.status(_('streaming all changes\n')) | |
2075 | l = fp.readline() |
|
2078 | l = fp.readline() | |
2076 | try: |
|
2079 | try: | |
2077 | total_files, total_bytes = map(int, l.split(' ', 1)) |
|
2080 | total_files, total_bytes = map(int, l.split(' ', 1)) | |
2078 | except (ValueError, TypeError): |
|
2081 | except (ValueError, TypeError): | |
2079 | raise util.UnexpectedOutput( |
|
2082 | raise util.UnexpectedOutput( | |
2080 | _('Unexpected response from remote server:'), l) |
|
2083 | _('Unexpected response from remote server:'), l) | |
2081 | self.ui.status(_('%d files to transfer, %s of data\n') % |
|
2084 | self.ui.status(_('%d files to transfer, %s of data\n') % | |
2082 | (total_files, util.bytecount(total_bytes))) |
|
2085 | (total_files, util.bytecount(total_bytes))) | |
2083 | start = time.time() |
|
2086 | start = time.time() | |
2084 | for i in xrange(total_files): |
|
2087 | for i in xrange(total_files): | |
2085 | # XXX doesn't support '\n' or '\r' in filenames |
|
2088 | # XXX doesn't support '\n' or '\r' in filenames | |
2086 | l = fp.readline() |
|
2089 | l = fp.readline() | |
2087 | try: |
|
2090 | try: | |
2088 | name, size = l.split('\0', 1) |
|
2091 | name, size = l.split('\0', 1) | |
2089 | size = int(size) |
|
2092 | size = int(size) | |
2090 | except ValueError, TypeError: |
|
2093 | except ValueError, TypeError: | |
2091 | raise util.UnexpectedOutput( |
|
2094 | raise util.UnexpectedOutput( | |
2092 | _('Unexpected response from remote server:'), l) |
|
2095 | _('Unexpected response from remote server:'), l) | |
2093 | self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size))) |
|
2096 | self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size))) | |
2094 | ofp = self.sopener(name, 'w') |
|
2097 | ofp = self.sopener(name, 'w') | |
2095 | for chunk in util.filechunkiter(fp, limit=size): |
|
2098 | for chunk in util.filechunkiter(fp, limit=size): | |
2096 | ofp.write(chunk) |
|
2099 | ofp.write(chunk) | |
2097 | ofp.close() |
|
2100 | ofp.close() | |
2098 | elapsed = time.time() - start |
|
2101 | elapsed = time.time() - start | |
2099 | if elapsed <= 0: |
|
2102 | if elapsed <= 0: | |
2100 | elapsed = 0.001 |
|
2103 | elapsed = 0.001 | |
2101 | self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % |
|
2104 | self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % | |
2102 | (util.bytecount(total_bytes), elapsed, |
|
2105 | (util.bytecount(total_bytes), elapsed, | |
2103 | util.bytecount(total_bytes / elapsed))) |
|
2106 | util.bytecount(total_bytes / elapsed))) | |
2104 | self.invalidate() |
|
2107 | self.invalidate() | |
2105 | return len(self.heads()) + 1 |
|
2108 | return len(self.heads()) + 1 | |
2106 |
|
2109 | |||
2107 | def clone(self, remote, heads=[], stream=False): |
|
2110 | def clone(self, remote, heads=[], stream=False): | |
2108 | '''clone remote repository. |
|
2111 | '''clone remote repository. | |
2109 |
|
2112 | |||
2110 | keyword arguments: |
|
2113 | keyword arguments: | |
2111 | heads: list of revs to clone (forces use of pull) |
|
2114 | heads: list of revs to clone (forces use of pull) | |
2112 | stream: use streaming clone if possible''' |
|
2115 | stream: use streaming clone if possible''' | |
2113 |
|
2116 | |||
2114 | # now, all clients that can request uncompressed clones can |
|
2117 | # now, all clients that can request uncompressed clones can | |
2115 | # read repo formats supported by all servers that can serve |
|
2118 | # read repo formats supported by all servers that can serve | |
2116 | # them. |
|
2119 | # them. | |
2117 |
|
2120 | |||
2118 | # if revlog format changes, client will have to check version |
|
2121 | # if revlog format changes, client will have to check version | |
2119 | # and format flags on "stream" capability, and use |
|
2122 | # and format flags on "stream" capability, and use | |
2120 | # uncompressed only if compatible. |
|
2123 | # uncompressed only if compatible. | |
2121 |
|
2124 | |||
2122 | if stream and not heads and remote.capable('stream'): |
|
2125 | if stream and not heads and remote.capable('stream'): | |
2123 | return self.stream_in(remote) |
|
2126 | return self.stream_in(remote) | |
2124 | return self.pull(remote, heads) |
|
2127 | return self.pull(remote, heads) | |
2125 |
|
2128 | |||
2126 | # used to avoid circular references so destructors work |
|
2129 | # used to avoid circular references so destructors work | |
2127 | def aftertrans(files): |
|
2130 | def aftertrans(files): | |
2128 | renamefiles = [tuple(t) for t in files] |
|
2131 | renamefiles = [tuple(t) for t in files] | |
2129 | def a(): |
|
2132 | def a(): | |
2130 | for src, dest in renamefiles: |
|
2133 | for src, dest in renamefiles: | |
2131 | util.rename(src, dest) |
|
2134 | util.rename(src, dest) | |
2132 | return a |
|
2135 | return a | |
2133 |
|
2136 | |||
2134 | def instance(ui, path, create): |
|
2137 | def instance(ui, path, create): | |
2135 | return localrepository(ui, util.drop_scheme('file', path), create) |
|
2138 | return localrepository(ui, util.drop_scheme('file', path), create) | |
2136 |
|
2139 | |||
2137 | def islocal(path): |
|
2140 | def islocal(path): | |
2138 | return True |
|
2141 | return True |
@@ -1,42 +1,48 b'' | |||||
1 | # repo.py - repository base classes for mercurial |
|
1 | # repo.py - repository base classes for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 |
|
10 | |||
11 | class RepoError(Exception): |
|
11 | class RepoError(Exception): | |
12 | pass |
|
12 | pass | |
13 |
|
13 | |||
14 | class NoCapability(RepoError): |
|
14 | class NoCapability(RepoError): | |
15 | pass |
|
15 | pass | |
16 |
|
16 | |||
17 | class repository(object): |
|
17 | class repository(object): | |
18 | def capable(self, name): |
|
18 | def capable(self, name): | |
19 | '''tell whether repo supports named capability. |
|
19 | '''tell whether repo supports named capability. | |
20 | return False if not supported. |
|
20 | return False if not supported. | |
21 | if boolean capability, return True. |
|
21 | if boolean capability, return True. | |
22 | if string capability, return string.''' |
|
22 | if string capability, return string.''' | |
23 | if name in self.capabilities: |
|
23 | if name in self.capabilities: | |
24 | return True |
|
24 | return True | |
25 | name_eq = name + '=' |
|
25 | name_eq = name + '=' | |
26 | for cap in self.capabilities: |
|
26 | for cap in self.capabilities: | |
27 | if cap.startswith(name_eq): |
|
27 | if cap.startswith(name_eq): | |
28 | return cap[len(name_eq):] |
|
28 | return cap[len(name_eq):] | |
29 | return False |
|
29 | return False | |
30 |
|
30 | |||
31 | def requirecap(self, name, purpose): |
|
31 | def requirecap(self, name, purpose): | |
32 | '''raise an exception if the given capability is not present''' |
|
32 | '''raise an exception if the given capability is not present''' | |
33 | if not self.capable(name): |
|
33 | if not self.capable(name): | |
34 | raise NoCapability(_('cannot %s; remote repository does not ' |
|
34 | raise NoCapability(_('cannot %s; remote repository does not ' | |
35 | 'support the %r capability') % |
|
35 | 'support the %r capability') % | |
36 | (purpose, name)) |
|
36 | (purpose, name)) | |
37 |
|
37 | |||
38 | def local(self): |
|
38 | def local(self): | |
39 | return False |
|
39 | return False | |
40 |
|
40 | |||
41 | def cancopy(self): |
|
41 | def cancopy(self): | |
42 | return self.local() |
|
42 | return self.local() | |
|
43 | ||||
|
44 | def rjoin(self, path): | |||
|
45 | url = self.url() | |||
|
46 | if url.endswith('/'): | |||
|
47 | return url + path | |||
|
48 | return url + '/' + path |
General Comments 0
You need to be logged in to leave comments.
Login now