##// END OF EJS Templates
add "requires" file to the repo, specifying the requirements
Benoit Boissinot -
r3851:8f18e31c default
parent child Browse files
Show More
@@ -0,0 +1,14
1 #!/bin/sh
2
3 mkdir t
4 cd t
5 hg init
6 echo a > a
7 hg add a
8 hg commit -m test -d "1000000 0"
9 rm .hg/requires
10 hg tip
11 echo indoor-pool > .hg/requires
12 hg tip
13
14 true
@@ -0,0 +1,7
1 changeset: 0:0acdaf898367
2 tag: tip
3 user: test
4 date: Mon Jan 12 13:46:40 1970 +0000
5 summary: test
6
7 abort: requirement 'indoor-pool' not supported!
@@ -1,260 +1,266
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from repo import *
10 from repo import *
11 from demandload import *
11 from demandload import *
12 from i18n import gettext as _
12 from i18n import gettext as _
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15
15
16 def _local(path):
16 def _local(path):
17 return (os.path.isfile(util.drop_scheme('file', path)) and
17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 bundlerepo or localrepo)
18 bundlerepo or localrepo)
19
19
20 schemes = {
20 schemes = {
21 'bundle': bundlerepo,
21 'bundle': bundlerepo,
22 'file': _local,
22 'file': _local,
23 'hg': httprepo,
23 'hg': httprepo,
24 'http': httprepo,
24 'http': httprepo,
25 'https': httprepo,
25 'https': httprepo,
26 'old-http': statichttprepo,
26 'old-http': statichttprepo,
27 'ssh': sshrepo,
27 'ssh': sshrepo,
28 'static-http': statichttprepo,
28 'static-http': statichttprepo,
29 }
29 }
30
30
31 def _lookup(path):
31 def _lookup(path):
32 scheme = 'file'
32 scheme = 'file'
33 if path:
33 if path:
34 c = path.find(':')
34 c = path.find(':')
35 if c > 0:
35 if c > 0:
36 scheme = path[:c]
36 scheme = path[:c]
37 thing = schemes.get(scheme) or schemes['file']
37 thing = schemes.get(scheme) or schemes['file']
38 try:
38 try:
39 return thing(path)
39 return thing(path)
40 except TypeError:
40 except TypeError:
41 return thing
41 return thing
42
42
43 def islocal(repo):
43 def islocal(repo):
44 '''return true if repo or path is local'''
44 '''return true if repo or path is local'''
45 if isinstance(repo, str):
45 if isinstance(repo, str):
46 try:
46 try:
47 return _lookup(repo).islocal(repo)
47 return _lookup(repo).islocal(repo)
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50 return repo.local()
50 return repo.local()
51
51
52 repo_setup_hooks = []
52 repo_setup_hooks = []
53
53
54 def repository(ui, path='', create=False):
54 def repository(ui, path='', create=False):
55 """return a repository object for the specified path"""
55 """return a repository object for the specified path"""
56 repo = _lookup(path).instance(ui, path, create)
56 repo = _lookup(path).instance(ui, path, create)
57 for hook in repo_setup_hooks:
57 for hook in repo_setup_hooks:
58 hook(ui, repo)
58 hook(ui, repo)
59 return repo
59 return repo
60
60
61 def defaultdest(source):
61 def defaultdest(source):
62 '''return default destination of clone if none is given'''
62 '''return default destination of clone if none is given'''
63 return os.path.basename(os.path.normpath(source))
63 return os.path.basename(os.path.normpath(source))
64
64
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 stream=False):
66 stream=False):
67 """Make a copy of an existing repository.
67 """Make a copy of an existing repository.
68
68
69 Create a copy of an existing repository in a new directory. The
69 Create a copy of an existing repository in a new directory. The
70 source and destination are URLs, as passed to the repository
70 source and destination are URLs, as passed to the repository
71 function. Returns a pair of repository objects, the source and
71 function. Returns a pair of repository objects, the source and
72 newly created destination.
72 newly created destination.
73
73
74 The location of the source is added to the new repository's
74 The location of the source is added to the new repository's
75 .hg/hgrc file, as the default to be used for future pulls and
75 .hg/hgrc file, as the default to be used for future pulls and
76 pushes.
76 pushes.
77
77
78 If an exception is raised, the partly cloned/updated destination
78 If an exception is raised, the partly cloned/updated destination
79 repository will be deleted.
79 repository will be deleted.
80
80
81 Arguments:
81 Arguments:
82
82
83 source: repository object or URL
83 source: repository object or URL
84
84
85 dest: URL of destination repository to create (defaults to base
85 dest: URL of destination repository to create (defaults to base
86 name of source repository)
86 name of source repository)
87
87
88 pull: always pull from source repository, even in local case
88 pull: always pull from source repository, even in local case
89
89
90 stream: stream raw data uncompressed from repository (fast over
90 stream: stream raw data uncompressed from repository (fast over
91 LAN, slow over WAN)
91 LAN, slow over WAN)
92
92
93 rev: revision to clone up to (implies pull=True)
93 rev: revision to clone up to (implies pull=True)
94
94
95 update: update working directory after clone completes, if
95 update: update working directory after clone completes, if
96 destination is local repository
96 destination is local repository
97 """
97 """
98 if isinstance(source, str):
98 if isinstance(source, str):
99 src_repo = repository(ui, source)
99 src_repo = repository(ui, source)
100 else:
100 else:
101 src_repo = source
101 src_repo = source
102 source = src_repo.url()
102 source = src_repo.url()
103
103
104 if dest is None:
104 if dest is None:
105 dest = defaultdest(source)
105 dest = defaultdest(source)
106
106
107 def localpath(path):
107 def localpath(path):
108 if path.startswith('file://'):
108 if path.startswith('file://'):
109 return path[7:]
109 return path[7:]
110 if path.startswith('file:'):
110 if path.startswith('file:'):
111 return path[5:]
111 return path[5:]
112 return path
112 return path
113
113
114 dest = localpath(dest)
114 dest = localpath(dest)
115 source = localpath(source)
115 source = localpath(source)
116
116
117 if os.path.exists(dest):
117 if os.path.exists(dest):
118 raise util.Abort(_("destination '%s' already exists") % dest)
118 raise util.Abort(_("destination '%s' already exists") % dest)
119
119
120 class DirCleanup(object):
120 class DirCleanup(object):
121 def __init__(self, dir_):
121 def __init__(self, dir_):
122 self.rmtree = shutil.rmtree
122 self.rmtree = shutil.rmtree
123 self.dir_ = dir_
123 self.dir_ = dir_
124 def close(self):
124 def close(self):
125 self.dir_ = None
125 self.dir_ = None
126 def __del__(self):
126 def __del__(self):
127 if self.dir_:
127 if self.dir_:
128 self.rmtree(self.dir_, True)
128 self.rmtree(self.dir_, True)
129
129
130 dir_cleanup = None
130 dir_cleanup = None
131 if islocal(dest):
131 if islocal(dest):
132 dir_cleanup = DirCleanup(dest)
132 dir_cleanup = DirCleanup(dest)
133
133
134 abspath = source
134 abspath = source
135 copy = False
135 copy = False
136 if src_repo.local() and islocal(dest):
136 if src_repo.local() and islocal(dest):
137 abspath = os.path.abspath(source)
137 abspath = os.path.abspath(source)
138 copy = not pull and not rev
138 copy = not pull and not rev
139
139
140 src_lock, dest_lock = None, None
140 src_lock, dest_lock = None, None
141 if copy:
141 if copy:
142 try:
142 try:
143 # we use a lock here because if we race with commit, we
143 # we use a lock here because if we race with commit, we
144 # can end up with extra data in the cloned revlogs that's
144 # can end up with extra data in the cloned revlogs that's
145 # not pointed to by changesets, thus causing verify to
145 # not pointed to by changesets, thus causing verify to
146 # fail
146 # fail
147 src_lock = src_repo.lock()
147 src_lock = src_repo.lock()
148 except lock.LockException:
148 except lock.LockException:
149 copy = False
149 copy = False
150
150
151 if copy:
151 if copy:
152 # we lock here to avoid premature writing to the target
152 def force_copy(src, dst):
153 try:
154 util.copyfiles(src, dst)
155 except OSError, inst:
156 if inst.errno != errno.ENOENT:
157 raise
158
153 src_store = os.path.realpath(src_repo.spath)
159 src_store = os.path.realpath(src_repo.spath)
154 dest_path = os.path.realpath(os.path.join(dest, ".hg"))
160 dest_path = os.path.realpath(os.path.join(dest, ".hg"))
155 dest_store = dest_path
161 dest_store = dest_path
156 if not os.path.exists(dest):
162 if not os.path.exists(dest):
157 os.mkdir(dest)
163 os.mkdir(dest)
158 os.mkdir(dest_path)
164 os.mkdir(dest_path)
165 # copy the requires file
166 force_copy(src_repo.join("requires"),
167 os.path.join(dest_path, "requires"))
168 # we lock here to avoid premature writing to the target
159 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
169 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
160
170
161 files = ("data",
171 files = ("data",
162 "00manifest.d", "00manifest.i",
172 "00manifest.d", "00manifest.i",
163 "00changelog.d", "00changelog.i")
173 "00changelog.d", "00changelog.i")
164 for f in files:
174 for f in files:
165 src = os.path.join(src_store, f)
175 src = os.path.join(src_store, f)
166 dst = os.path.join(dest_store, f)
176 dst = os.path.join(dest_store, f)
167 try:
177 force_copy(src, dst)
168 util.copyfiles(src, dst)
169 except OSError, inst:
170 if inst.errno != errno.ENOENT:
171 raise
172
178
173 # we need to re-init the repo after manually copying the data
179 # we need to re-init the repo after manually copying the data
174 # into it
180 # into it
175 dest_repo = repository(ui, dest)
181 dest_repo = repository(ui, dest)
176
182
177 else:
183 else:
178 dest_repo = repository(ui, dest, create=True)
184 dest_repo = repository(ui, dest, create=True)
179
185
180 revs = None
186 revs = None
181 if rev:
187 if rev:
182 if 'lookup' not in src_repo.capabilities:
188 if 'lookup' not in src_repo.capabilities:
183 raise util.Abort(_("src repository does not support revision "
189 raise util.Abort(_("src repository does not support revision "
184 "lookup and so doesn't support clone by "
190 "lookup and so doesn't support clone by "
185 "revision"))
191 "revision"))
186 revs = [src_repo.lookup(r) for r in rev]
192 revs = [src_repo.lookup(r) for r in rev]
187
193
188 if dest_repo.local():
194 if dest_repo.local():
189 dest_repo.clone(src_repo, heads=revs, stream=stream)
195 dest_repo.clone(src_repo, heads=revs, stream=stream)
190 elif src_repo.local():
196 elif src_repo.local():
191 src_repo.push(dest_repo, revs=revs)
197 src_repo.push(dest_repo, revs=revs)
192 else:
198 else:
193 raise util.Abort(_("clone from remote to remote not supported"))
199 raise util.Abort(_("clone from remote to remote not supported"))
194
200
195 if src_lock:
201 if src_lock:
196 src_lock.release()
202 src_lock.release()
197
203
198 if dest_repo.local():
204 if dest_repo.local():
199 fp = dest_repo.opener("hgrc", "w", text=True)
205 fp = dest_repo.opener("hgrc", "w", text=True)
200 fp.write("[paths]\n")
206 fp.write("[paths]\n")
201 fp.write("default = %s\n" % abspath)
207 fp.write("default = %s\n" % abspath)
202 fp.close()
208 fp.close()
203
209
204 if dest_lock:
210 if dest_lock:
205 dest_lock.release()
211 dest_lock.release()
206
212
207 if update:
213 if update:
208 _update(dest_repo, dest_repo.changelog.tip())
214 _update(dest_repo, dest_repo.changelog.tip())
209 if dir_cleanup:
215 if dir_cleanup:
210 dir_cleanup.close()
216 dir_cleanup.close()
211
217
212 return src_repo, dest_repo
218 return src_repo, dest_repo
213
219
214 def _showstats(repo, stats):
220 def _showstats(repo, stats):
215 stats = ((stats[0], _("updated")),
221 stats = ((stats[0], _("updated")),
216 (stats[1], _("merged")),
222 (stats[1], _("merged")),
217 (stats[2], _("removed")),
223 (stats[2], _("removed")),
218 (stats[3], _("unresolved")))
224 (stats[3], _("unresolved")))
219 note = ", ".join([_("%d files %s") % s for s in stats])
225 note = ", ".join([_("%d files %s") % s for s in stats])
220 repo.ui.status("%s\n" % note)
226 repo.ui.status("%s\n" % note)
221
227
222 def _update(repo, node): return update(repo, node)
228 def _update(repo, node): return update(repo, node)
223
229
224 def update(repo, node):
230 def update(repo, node):
225 """update the working directory to node, merging linear changes"""
231 """update the working directory to node, merging linear changes"""
226 stats = _merge.update(repo, node, False, False, None, None)
232 stats = _merge.update(repo, node, False, False, None, None)
227 _showstats(repo, stats)
233 _showstats(repo, stats)
228 if stats[3]:
234 if stats[3]:
229 repo.ui.status(_("There are unresolved merges with"
235 repo.ui.status(_("There are unresolved merges with"
230 " locally modified files.\n"))
236 " locally modified files.\n"))
231 return stats[3]
237 return stats[3]
232
238
233 def clean(repo, node, wlock=None, show_stats=True):
239 def clean(repo, node, wlock=None, show_stats=True):
234 """forcibly switch the working directory to node, clobbering changes"""
240 """forcibly switch the working directory to node, clobbering changes"""
235 stats = _merge.update(repo, node, False, True, None, wlock)
241 stats = _merge.update(repo, node, False, True, None, wlock)
236 if show_stats: _showstats(repo, stats)
242 if show_stats: _showstats(repo, stats)
237 return stats[3]
243 return stats[3]
238
244
239 def merge(repo, node, force=None, remind=True, wlock=None):
245 def merge(repo, node, force=None, remind=True, wlock=None):
240 """branch merge with node, resolving changes"""
246 """branch merge with node, resolving changes"""
241 stats = _merge.update(repo, node, True, force, False, wlock)
247 stats = _merge.update(repo, node, True, force, False, wlock)
242 _showstats(repo, stats)
248 _showstats(repo, stats)
243 if stats[3]:
249 if stats[3]:
244 pl = repo.parents()
250 pl = repo.parents()
245 repo.ui.status(_("There are unresolved merges,"
251 repo.ui.status(_("There are unresolved merges,"
246 " you can redo the full merge using:\n"
252 " you can redo the full merge using:\n"
247 " hg update -C %s\n"
253 " hg update -C %s\n"
248 " hg merge %s\n")
254 " hg merge %s\n")
249 % (pl[0].rev(), pl[1].rev()))
255 % (pl[0].rev(), pl[1].rev()))
250 elif remind:
256 elif remind:
251 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
257 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
252 return stats[3]
258 return stats[3]
253
259
254 def revert(repo, node, choose, wlock):
260 def revert(repo, node, choose, wlock):
255 """revert changes to revision in node without updating dirstate"""
261 """revert changes to revision in node without updating dirstate"""
256 return _merge.update(repo, node, False, True, choose, wlock)[3]
262 return _merge.update(repo, node, False, True, choose, wlock)[3]
257
263
258 def verify(repo):
264 def verify(repo):
259 """verify the consistency of a repository"""
265 """verify the consistency of a repository"""
260 return _verify.verify(repo)
266 return _verify.verify(repo)
@@ -1,1929 +1,1947
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1',)
19
20
20 def __del__(self):
21 def __del__(self):
21 self.transhandle = None
22 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
24 repo.repository.__init__(self)
24 if not path:
25 if not path:
25 p = os.getcwd()
26 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
28 oldp = p
28 p = os.path.dirname(p)
29 p = os.path.dirname(p)
29 if p == oldp:
30 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
31 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
32 " here (.hg not found)"))
32 path = p
33 path = p
33
34
34 self.path = os.path.join(path, ".hg")
35 self.path = os.path.join(path, ".hg")
35 self.root = os.path.realpath(path)
36 self.root = os.path.realpath(path)
36 self.origroot = path
37 self.origroot = path
37 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
39
40
40 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
41 if create:
42 if create:
42 if not os.path.exists(path):
43 if not os.path.exists(path):
43 os.mkdir(path)
44 os.mkdir(path)
44 os.mkdir(self.path)
45 os.mkdir(self.path)
45 #if self.spath != self.path:
46 #if self.spath != self.path:
46 # os.mkdir(self.spath)
47 # os.mkdir(self.spath)
48 requirements = ("revlogv1",)
49 reqfile = self.opener("requires", "w")
50 for r in requirements:
51 reqfile.write("%s\n" % r)
52 reqfile.close()
47 else:
53 else:
48 raise repo.RepoError(_("repository %s not found") % path)
54 raise repo.RepoError(_("repository %s not found") % path)
49 elif create:
55 elif create:
50 raise repo.RepoError(_("repository %s already exists") % path)
56 raise repo.RepoError(_("repository %s already exists") % path)
57 else:
58 # find requirements
59 try:
60 requirements = self.opener("requires").read().splitlines()
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
63 raise
64 requirements = []
65 # check them
66 for r in requirements:
67 if r not in self.supported:
68 raise repo.RepoError(_("requirement '%s' not supported") % r)
51
69
52 # setup store
70 # setup store
53 self.spath = self.path
71 self.spath = self.path
54 self.sopener = util.opener(self.spath)
72 self.sopener = util.opener(self.spath)
55
73
56 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
57 try:
75 try:
58 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
59 except IOError:
77 except IOError:
60 pass
78 pass
61
79
62 v = self.ui.configrevlog()
80 v = self.ui.configrevlog()
63 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
81 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
64 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
82 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
65 fl = v.get('flags', None)
83 fl = v.get('flags', None)
66 flags = 0
84 flags = 0
67 if fl != None:
85 if fl != None:
68 for x in fl.split():
86 for x in fl.split():
69 flags |= revlog.flagstr(x)
87 flags |= revlog.flagstr(x)
70 elif self.revlogv1:
88 elif self.revlogv1:
71 flags = revlog.REVLOG_DEFAULT_FLAGS
89 flags = revlog.REVLOG_DEFAULT_FLAGS
72
90
73 v = self.revlogversion | flags
91 v = self.revlogversion | flags
74 self.manifest = manifest.manifest(self.sopener, v)
92 self.manifest = manifest.manifest(self.sopener, v)
75 self.changelog = changelog.changelog(self.sopener, v)
93 self.changelog = changelog.changelog(self.sopener, v)
76
94
77 # the changelog might not have the inline index flag
95 # the changelog might not have the inline index flag
78 # on. If the format of the changelog is the same as found in
96 # on. If the format of the changelog is the same as found in
79 # .hgrc, apply any flags found in the .hgrc as well.
97 # .hgrc, apply any flags found in the .hgrc as well.
80 # Otherwise, just version from the changelog
98 # Otherwise, just version from the changelog
81 v = self.changelog.version
99 v = self.changelog.version
82 if v == self.revlogversion:
100 if v == self.revlogversion:
83 v |= flags
101 v |= flags
84 self.revlogversion = v
102 self.revlogversion = v
85
103
86 self.tagscache = None
104 self.tagscache = None
87 self.branchcache = None
105 self.branchcache = None
88 self.nodetagscache = None
106 self.nodetagscache = None
89 self.encodepats = None
107 self.encodepats = None
90 self.decodepats = None
108 self.decodepats = None
91 self.transhandle = None
109 self.transhandle = None
92
110
93 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
111 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
94
112
95 def url(self):
113 def url(self):
96 return 'file:' + self.root
114 return 'file:' + self.root
97
115
98 def hook(self, name, throw=False, **args):
116 def hook(self, name, throw=False, **args):
99 def callhook(hname, funcname):
117 def callhook(hname, funcname):
100 '''call python hook. hook is callable object, looked up as
118 '''call python hook. hook is callable object, looked up as
101 name in python module. if callable returns "true", hook
119 name in python module. if callable returns "true", hook
102 fails, else passes. if hook raises exception, treated as
120 fails, else passes. if hook raises exception, treated as
103 hook failure. exception propagates if throw is "true".
121 hook failure. exception propagates if throw is "true".
104
122
105 reason for "true" meaning "hook failed" is so that
123 reason for "true" meaning "hook failed" is so that
106 unmodified commands (e.g. mercurial.commands.update) can
124 unmodified commands (e.g. mercurial.commands.update) can
107 be run as hooks without wrappers to convert return values.'''
125 be run as hooks without wrappers to convert return values.'''
108
126
109 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
127 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
110 d = funcname.rfind('.')
128 d = funcname.rfind('.')
111 if d == -1:
129 if d == -1:
112 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
130 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
113 % (hname, funcname))
131 % (hname, funcname))
114 modname = funcname[:d]
132 modname = funcname[:d]
115 try:
133 try:
116 obj = __import__(modname)
134 obj = __import__(modname)
117 except ImportError:
135 except ImportError:
118 try:
136 try:
119 # extensions are loaded with hgext_ prefix
137 # extensions are loaded with hgext_ prefix
120 obj = __import__("hgext_%s" % modname)
138 obj = __import__("hgext_%s" % modname)
121 except ImportError:
139 except ImportError:
122 raise util.Abort(_('%s hook is invalid '
140 raise util.Abort(_('%s hook is invalid '
123 '(import of "%s" failed)') %
141 '(import of "%s" failed)') %
124 (hname, modname))
142 (hname, modname))
125 try:
143 try:
126 for p in funcname.split('.')[1:]:
144 for p in funcname.split('.')[1:]:
127 obj = getattr(obj, p)
145 obj = getattr(obj, p)
128 except AttributeError, err:
146 except AttributeError, err:
129 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
130 '("%s" is not defined)') %
148 '("%s" is not defined)') %
131 (hname, funcname))
149 (hname, funcname))
132 if not callable(obj):
150 if not callable(obj):
133 raise util.Abort(_('%s hook is invalid '
151 raise util.Abort(_('%s hook is invalid '
134 '("%s" is not callable)') %
152 '("%s" is not callable)') %
135 (hname, funcname))
153 (hname, funcname))
136 try:
154 try:
137 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
155 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
138 except (KeyboardInterrupt, util.SignalInterrupt):
156 except (KeyboardInterrupt, util.SignalInterrupt):
139 raise
157 raise
140 except Exception, exc:
158 except Exception, exc:
141 if isinstance(exc, util.Abort):
159 if isinstance(exc, util.Abort):
142 self.ui.warn(_('error: %s hook failed: %s\n') %
160 self.ui.warn(_('error: %s hook failed: %s\n') %
143 (hname, exc.args[0]))
161 (hname, exc.args[0]))
144 else:
162 else:
145 self.ui.warn(_('error: %s hook raised an exception: '
163 self.ui.warn(_('error: %s hook raised an exception: '
146 '%s\n') % (hname, exc))
164 '%s\n') % (hname, exc))
147 if throw:
165 if throw:
148 raise
166 raise
149 self.ui.print_exc()
167 self.ui.print_exc()
150 return True
168 return True
151 if r:
169 if r:
152 if throw:
170 if throw:
153 raise util.Abort(_('%s hook failed') % hname)
171 raise util.Abort(_('%s hook failed') % hname)
154 self.ui.warn(_('warning: %s hook failed\n') % hname)
172 self.ui.warn(_('warning: %s hook failed\n') % hname)
155 return r
173 return r
156
174
157 def runhook(name, cmd):
175 def runhook(name, cmd):
158 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
176 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
159 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
177 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
160 r = util.system(cmd, environ=env, cwd=self.root)
178 r = util.system(cmd, environ=env, cwd=self.root)
161 if r:
179 if r:
162 desc, r = util.explain_exit(r)
180 desc, r = util.explain_exit(r)
163 if throw:
181 if throw:
164 raise util.Abort(_('%s hook %s') % (name, desc))
182 raise util.Abort(_('%s hook %s') % (name, desc))
165 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
183 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
166 return r
184 return r
167
185
168 r = False
186 r = False
169 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
187 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
170 if hname.split(".", 1)[0] == name and cmd]
188 if hname.split(".", 1)[0] == name and cmd]
171 hooks.sort()
189 hooks.sort()
172 for hname, cmd in hooks:
190 for hname, cmd in hooks:
173 if cmd.startswith('python:'):
191 if cmd.startswith('python:'):
174 r = callhook(hname, cmd[7:].strip()) or r
192 r = callhook(hname, cmd[7:].strip()) or r
175 else:
193 else:
176 r = runhook(hname, cmd) or r
194 r = runhook(hname, cmd) or r
177 return r
195 return r
178
196
179 tag_disallowed = ':\r\n'
197 tag_disallowed = ':\r\n'
180
198
181 def tag(self, name, node, message, local, user, date):
199 def tag(self, name, node, message, local, user, date):
182 '''tag a revision with a symbolic name.
200 '''tag a revision with a symbolic name.
183
201
184 if local is True, the tag is stored in a per-repository file.
202 if local is True, the tag is stored in a per-repository file.
185 otherwise, it is stored in the .hgtags file, and a new
203 otherwise, it is stored in the .hgtags file, and a new
186 changeset is committed with the change.
204 changeset is committed with the change.
187
205
188 keyword arguments:
206 keyword arguments:
189
207
190 local: whether to store tag in non-version-controlled file
208 local: whether to store tag in non-version-controlled file
191 (default False)
209 (default False)
192
210
193 message: commit message to use if committing
211 message: commit message to use if committing
194
212
195 user: name of user to use if committing
213 user: name of user to use if committing
196
214
197 date: date tuple to use if committing'''
215 date: date tuple to use if committing'''
198
216
199 for c in self.tag_disallowed:
217 for c in self.tag_disallowed:
200 if c in name:
218 if c in name:
201 raise util.Abort(_('%r cannot be used in a tag name') % c)
219 raise util.Abort(_('%r cannot be used in a tag name') % c)
202
220
203 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
221 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204
222
205 if local:
223 if local:
206 # local tags are stored in the current charset
224 # local tags are stored in the current charset
207 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
225 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.hook('tag', node=hex(node), tag=name, local=local)
226 self.hook('tag', node=hex(node), tag=name, local=local)
209 return
227 return
210
228
211 for x in self.status()[:5]:
229 for x in self.status()[:5]:
212 if '.hgtags' in x:
230 if '.hgtags' in x:
213 raise util.Abort(_('working copy of .hgtags is changed '
231 raise util.Abort(_('working copy of .hgtags is changed '
214 '(please commit .hgtags manually)'))
232 '(please commit .hgtags manually)'))
215
233
216 # committed tags are stored in UTF-8
234 # committed tags are stored in UTF-8
217 line = '%s %s\n' % (hex(node), util.fromlocal(name))
235 line = '%s %s\n' % (hex(node), util.fromlocal(name))
218 self.wfile('.hgtags', 'ab').write(line)
236 self.wfile('.hgtags', 'ab').write(line)
219 if self.dirstate.state('.hgtags') == '?':
237 if self.dirstate.state('.hgtags') == '?':
220 self.add(['.hgtags'])
238 self.add(['.hgtags'])
221
239
222 self.commit(['.hgtags'], message, user, date)
240 self.commit(['.hgtags'], message, user, date)
223 self.hook('tag', node=hex(node), tag=name, local=local)
241 self.hook('tag', node=hex(node), tag=name, local=local)
224
242
225 def tags(self):
243 def tags(self):
226 '''return a mapping of tag to node'''
244 '''return a mapping of tag to node'''
227 if not self.tagscache:
245 if not self.tagscache:
228 self.tagscache = {}
246 self.tagscache = {}
229
247
230 def parsetag(line, context):
248 def parsetag(line, context):
231 if not line:
249 if not line:
232 return
250 return
233 s = l.split(" ", 1)
251 s = l.split(" ", 1)
234 if len(s) != 2:
252 if len(s) != 2:
235 self.ui.warn(_("%s: cannot parse entry\n") % context)
253 self.ui.warn(_("%s: cannot parse entry\n") % context)
236 return
254 return
237 node, key = s
255 node, key = s
238 key = util.tolocal(key.strip()) # stored in UTF-8
256 key = util.tolocal(key.strip()) # stored in UTF-8
239 try:
257 try:
240 bin_n = bin(node)
258 bin_n = bin(node)
241 except TypeError:
259 except TypeError:
242 self.ui.warn(_("%s: node '%s' is not well formed\n") %
260 self.ui.warn(_("%s: node '%s' is not well formed\n") %
243 (context, node))
261 (context, node))
244 return
262 return
245 if bin_n not in self.changelog.nodemap:
263 if bin_n not in self.changelog.nodemap:
246 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
264 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
247 (context, key))
265 (context, key))
248 return
266 return
249 self.tagscache[key] = bin_n
267 self.tagscache[key] = bin_n
250
268
251 # read the tags file from each head, ending with the tip,
269 # read the tags file from each head, ending with the tip,
252 # and add each tag found to the map, with "newer" ones
270 # and add each tag found to the map, with "newer" ones
253 # taking precedence
271 # taking precedence
254 f = None
272 f = None
255 for rev, node, fnode in self._hgtagsnodes():
273 for rev, node, fnode in self._hgtagsnodes():
256 f = (f and f.filectx(fnode) or
274 f = (f and f.filectx(fnode) or
257 self.filectx('.hgtags', fileid=fnode))
275 self.filectx('.hgtags', fileid=fnode))
258 count = 0
276 count = 0
259 for l in f.data().splitlines():
277 for l in f.data().splitlines():
260 count += 1
278 count += 1
261 parsetag(l, _("%s, line %d") % (str(f), count))
279 parsetag(l, _("%s, line %d") % (str(f), count))
262
280
263 try:
281 try:
264 f = self.opener("localtags")
282 f = self.opener("localtags")
265 count = 0
283 count = 0
266 for l in f:
284 for l in f:
267 # localtags are stored in the local character set
285 # localtags are stored in the local character set
268 # while the internal tag table is stored in UTF-8
286 # while the internal tag table is stored in UTF-8
269 l = util.fromlocal(l)
287 l = util.fromlocal(l)
270 count += 1
288 count += 1
271 parsetag(l, _("localtags, line %d") % count)
289 parsetag(l, _("localtags, line %d") % count)
272 except IOError:
290 except IOError:
273 pass
291 pass
274
292
275 self.tagscache['tip'] = self.changelog.tip()
293 self.tagscache['tip'] = self.changelog.tip()
276
294
277 return self.tagscache
295 return self.tagscache
278
296
279 def _hgtagsnodes(self):
297 def _hgtagsnodes(self):
280 heads = self.heads()
298 heads = self.heads()
281 heads.reverse()
299 heads.reverse()
282 last = {}
300 last = {}
283 ret = []
301 ret = []
284 for node in heads:
302 for node in heads:
285 c = self.changectx(node)
303 c = self.changectx(node)
286 rev = c.rev()
304 rev = c.rev()
287 try:
305 try:
288 fnode = c.filenode('.hgtags')
306 fnode = c.filenode('.hgtags')
289 except repo.LookupError:
307 except repo.LookupError:
290 continue
308 continue
291 ret.append((rev, node, fnode))
309 ret.append((rev, node, fnode))
292 if fnode in last:
310 if fnode in last:
293 ret[last[fnode]] = None
311 ret[last[fnode]] = None
294 last[fnode] = len(ret) - 1
312 last[fnode] = len(ret) - 1
295 return [item for item in ret if item]
313 return [item for item in ret if item]
296
314
297 def tagslist(self):
315 def tagslist(self):
298 '''return a list of tags ordered by revision'''
316 '''return a list of tags ordered by revision'''
299 l = []
317 l = []
300 for t, n in self.tags().items():
318 for t, n in self.tags().items():
301 try:
319 try:
302 r = self.changelog.rev(n)
320 r = self.changelog.rev(n)
303 except:
321 except:
304 r = -2 # sort to the beginning of the list if unknown
322 r = -2 # sort to the beginning of the list if unknown
305 l.append((r, t, n))
323 l.append((r, t, n))
306 l.sort()
324 l.sort()
307 return [(t, n) for r, t, n in l]
325 return [(t, n) for r, t, n in l]
308
326
309 def nodetags(self, node):
327 def nodetags(self, node):
310 '''return the tags associated with a node'''
328 '''return the tags associated with a node'''
311 if not self.nodetagscache:
329 if not self.nodetagscache:
312 self.nodetagscache = {}
330 self.nodetagscache = {}
313 for t, n in self.tags().items():
331 for t, n in self.tags().items():
314 self.nodetagscache.setdefault(n, []).append(t)
332 self.nodetagscache.setdefault(n, []).append(t)
315 return self.nodetagscache.get(node, [])
333 return self.nodetagscache.get(node, [])
316
334
317 def branchtags(self):
335 def branchtags(self):
318 if self.branchcache != None:
336 if self.branchcache != None:
319 return self.branchcache
337 return self.branchcache
320
338
321 self.branchcache = {} # avoid recursion in changectx
339 self.branchcache = {} # avoid recursion in changectx
322
340
323 partial, last, lrev = self._readbranchcache()
341 partial, last, lrev = self._readbranchcache()
324
342
325 tiprev = self.changelog.count() - 1
343 tiprev = self.changelog.count() - 1
326 if lrev != tiprev:
344 if lrev != tiprev:
327 self._updatebranchcache(partial, lrev+1, tiprev+1)
345 self._updatebranchcache(partial, lrev+1, tiprev+1)
328 self._writebranchcache(partial, self.changelog.tip(), tiprev)
346 self._writebranchcache(partial, self.changelog.tip(), tiprev)
329
347
330 # the branch cache is stored on disk as UTF-8, but in the local
348 # the branch cache is stored on disk as UTF-8, but in the local
331 # charset internally
349 # charset internally
332 for k, v in partial.items():
350 for k, v in partial.items():
333 self.branchcache[util.tolocal(k)] = v
351 self.branchcache[util.tolocal(k)] = v
334 return self.branchcache
352 return self.branchcache
335
353
336 def _readbranchcache(self):
354 def _readbranchcache(self):
337 partial = {}
355 partial = {}
338 try:
356 try:
339 f = self.opener("branches.cache")
357 f = self.opener("branches.cache")
340 lines = f.read().split('\n')
358 lines = f.read().split('\n')
341 f.close()
359 f.close()
342 last, lrev = lines.pop(0).rstrip().split(" ", 1)
360 last, lrev = lines.pop(0).rstrip().split(" ", 1)
343 last, lrev = bin(last), int(lrev)
361 last, lrev = bin(last), int(lrev)
344 if not (lrev < self.changelog.count() and
362 if not (lrev < self.changelog.count() and
345 self.changelog.node(lrev) == last): # sanity check
363 self.changelog.node(lrev) == last): # sanity check
346 # invalidate the cache
364 # invalidate the cache
347 raise ValueError('Invalid branch cache: unknown tip')
365 raise ValueError('Invalid branch cache: unknown tip')
348 for l in lines:
366 for l in lines:
349 if not l: continue
367 if not l: continue
350 node, label = l.rstrip().split(" ", 1)
368 node, label = l.rstrip().split(" ", 1)
351 partial[label] = bin(node)
369 partial[label] = bin(node)
352 except (KeyboardInterrupt, util.SignalInterrupt):
370 except (KeyboardInterrupt, util.SignalInterrupt):
353 raise
371 raise
354 except Exception, inst:
372 except Exception, inst:
355 if self.ui.debugflag:
373 if self.ui.debugflag:
356 self.ui.warn(str(inst), '\n')
374 self.ui.warn(str(inst), '\n')
357 partial, last, lrev = {}, nullid, nullrev
375 partial, last, lrev = {}, nullid, nullrev
358 return partial, last, lrev
376 return partial, last, lrev
359
377
360 def _writebranchcache(self, branches, tip, tiprev):
378 def _writebranchcache(self, branches, tip, tiprev):
361 try:
379 try:
362 f = self.opener("branches.cache", "w")
380 f = self.opener("branches.cache", "w")
363 f.write("%s %s\n" % (hex(tip), tiprev))
381 f.write("%s %s\n" % (hex(tip), tiprev))
364 for label, node in branches.iteritems():
382 for label, node in branches.iteritems():
365 f.write("%s %s\n" % (hex(node), label))
383 f.write("%s %s\n" % (hex(node), label))
366 except IOError:
384 except IOError:
367 pass
385 pass
368
386
369 def _updatebranchcache(self, partial, start, end):
387 def _updatebranchcache(self, partial, start, end):
370 for r in xrange(start, end):
388 for r in xrange(start, end):
371 c = self.changectx(r)
389 c = self.changectx(r)
372 b = c.branch()
390 b = c.branch()
373 if b:
391 if b:
374 partial[b] = c.node()
392 partial[b] = c.node()
375
393
376 def lookup(self, key):
394 def lookup(self, key):
377 if key == '.':
395 if key == '.':
378 key = self.dirstate.parents()[0]
396 key = self.dirstate.parents()[0]
379 if key == nullid:
397 if key == nullid:
380 raise repo.RepoError(_("no revision checked out"))
398 raise repo.RepoError(_("no revision checked out"))
381 elif key == 'null':
399 elif key == 'null':
382 return nullid
400 return nullid
383 n = self.changelog._match(key)
401 n = self.changelog._match(key)
384 if n:
402 if n:
385 return n
403 return n
386 if key in self.tags():
404 if key in self.tags():
387 return self.tags()[key]
405 return self.tags()[key]
388 if key in self.branchtags():
406 if key in self.branchtags():
389 return self.branchtags()[key]
407 return self.branchtags()[key]
390 n = self.changelog._partialmatch(key)
408 n = self.changelog._partialmatch(key)
391 if n:
409 if n:
392 return n
410 return n
393 raise repo.RepoError(_("unknown revision '%s'") % key)
411 raise repo.RepoError(_("unknown revision '%s'") % key)
394
412
395 def dev(self):
413 def dev(self):
396 return os.lstat(self.path).st_dev
414 return os.lstat(self.path).st_dev
397
415
398 def local(self):
416 def local(self):
399 return True
417 return True
400
418
401 def join(self, f):
419 def join(self, f):
402 return os.path.join(self.path, f)
420 return os.path.join(self.path, f)
403
421
404 def sjoin(self, f):
422 def sjoin(self, f):
405 return os.path.join(self.spath, f)
423 return os.path.join(self.spath, f)
406
424
407 def wjoin(self, f):
425 def wjoin(self, f):
408 return os.path.join(self.root, f)
426 return os.path.join(self.root, f)
409
427
410 def file(self, f):
428 def file(self, f):
411 if f[0] == '/':
429 if f[0] == '/':
412 f = f[1:]
430 f = f[1:]
413 return filelog.filelog(self.sopener, f, self.revlogversion)
431 return filelog.filelog(self.sopener, f, self.revlogversion)
414
432
415 def changectx(self, changeid=None):
433 def changectx(self, changeid=None):
416 return context.changectx(self, changeid)
434 return context.changectx(self, changeid)
417
435
418 def workingctx(self):
436 def workingctx(self):
419 return context.workingctx(self)
437 return context.workingctx(self)
420
438
421 def parents(self, changeid=None):
439 def parents(self, changeid=None):
422 '''
440 '''
423 get list of changectxs for parents of changeid or working directory
441 get list of changectxs for parents of changeid or working directory
424 '''
442 '''
425 if changeid is None:
443 if changeid is None:
426 pl = self.dirstate.parents()
444 pl = self.dirstate.parents()
427 else:
445 else:
428 n = self.changelog.lookup(changeid)
446 n = self.changelog.lookup(changeid)
429 pl = self.changelog.parents(n)
447 pl = self.changelog.parents(n)
430 if pl[1] == nullid:
448 if pl[1] == nullid:
431 return [self.changectx(pl[0])]
449 return [self.changectx(pl[0])]
432 return [self.changectx(pl[0]), self.changectx(pl[1])]
450 return [self.changectx(pl[0]), self.changectx(pl[1])]
433
451
434 def filectx(self, path, changeid=None, fileid=None):
452 def filectx(self, path, changeid=None, fileid=None):
435 """changeid can be a changeset revision, node, or tag.
453 """changeid can be a changeset revision, node, or tag.
436 fileid can be a file revision or node."""
454 fileid can be a file revision or node."""
437 return context.filectx(self, path, changeid, fileid)
455 return context.filectx(self, path, changeid, fileid)
438
456
439 def getcwd(self):
457 def getcwd(self):
440 return self.dirstate.getcwd()
458 return self.dirstate.getcwd()
441
459
442 def wfile(self, f, mode='r'):
460 def wfile(self, f, mode='r'):
443 return self.wopener(f, mode)
461 return self.wopener(f, mode)
444
462
445 def wread(self, filename):
463 def wread(self, filename):
446 if self.encodepats == None:
464 if self.encodepats == None:
447 l = []
465 l = []
448 for pat, cmd in self.ui.configitems("encode"):
466 for pat, cmd in self.ui.configitems("encode"):
449 mf = util.matcher(self.root, "", [pat], [], [])[1]
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
450 l.append((mf, cmd))
468 l.append((mf, cmd))
451 self.encodepats = l
469 self.encodepats = l
452
470
453 data = self.wopener(filename, 'r').read()
471 data = self.wopener(filename, 'r').read()
454
472
455 for mf, cmd in self.encodepats:
473 for mf, cmd in self.encodepats:
456 if mf(filename):
474 if mf(filename):
457 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
475 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
458 data = util.filter(data, cmd)
476 data = util.filter(data, cmd)
459 break
477 break
460
478
461 return data
479 return data
462
480
463 def wwrite(self, filename, data, fd=None):
481 def wwrite(self, filename, data, fd=None):
464 if self.decodepats == None:
482 if self.decodepats == None:
465 l = []
483 l = []
466 for pat, cmd in self.ui.configitems("decode"):
484 for pat, cmd in self.ui.configitems("decode"):
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
485 mf = util.matcher(self.root, "", [pat], [], [])[1]
468 l.append((mf, cmd))
486 l.append((mf, cmd))
469 self.decodepats = l
487 self.decodepats = l
470
488
471 for mf, cmd in self.decodepats:
489 for mf, cmd in self.decodepats:
472 if mf(filename):
490 if mf(filename):
473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
491 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
474 data = util.filter(data, cmd)
492 data = util.filter(data, cmd)
475 break
493 break
476
494
477 if fd:
495 if fd:
478 return fd.write(data)
496 return fd.write(data)
479 return self.wopener(filename, 'w').write(data)
497 return self.wopener(filename, 'w').write(data)
480
498
481 def transaction(self):
499 def transaction(self):
482 tr = self.transhandle
500 tr = self.transhandle
483 if tr != None and tr.running():
501 if tr != None and tr.running():
484 return tr.nest()
502 return tr.nest()
485
503
486 # save dirstate for rollback
504 # save dirstate for rollback
487 try:
505 try:
488 ds = self.opener("dirstate").read()
506 ds = self.opener("dirstate").read()
489 except IOError:
507 except IOError:
490 ds = ""
508 ds = ""
491 self.opener("journal.dirstate", "w").write(ds)
509 self.opener("journal.dirstate", "w").write(ds)
492
510
493 renames = [(self.sjoin("journal"), self.sjoin("undo")),
511 renames = [(self.sjoin("journal"), self.sjoin("undo")),
494 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
512 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
495 tr = transaction.transaction(self.ui.warn, self.sopener,
513 tr = transaction.transaction(self.ui.warn, self.sopener,
496 self.sjoin("journal"),
514 self.sjoin("journal"),
497 aftertrans(renames))
515 aftertrans(renames))
498 self.transhandle = tr
516 self.transhandle = tr
499 return tr
517 return tr
500
518
501 def recover(self):
519 def recover(self):
502 l = self.lock()
520 l = self.lock()
503 if os.path.exists(self.sjoin("journal")):
521 if os.path.exists(self.sjoin("journal")):
504 self.ui.status(_("rolling back interrupted transaction\n"))
522 self.ui.status(_("rolling back interrupted transaction\n"))
505 transaction.rollback(self.sopener, self.sjoin("journal"))
523 transaction.rollback(self.sopener, self.sjoin("journal"))
506 self.reload()
524 self.reload()
507 return True
525 return True
508 else:
526 else:
509 self.ui.warn(_("no interrupted transaction available\n"))
527 self.ui.warn(_("no interrupted transaction available\n"))
510 return False
528 return False
511
529
512 def rollback(self, wlock=None):
530 def rollback(self, wlock=None):
513 if not wlock:
531 if not wlock:
514 wlock = self.wlock()
532 wlock = self.wlock()
515 l = self.lock()
533 l = self.lock()
516 if os.path.exists(self.sjoin("undo")):
534 if os.path.exists(self.sjoin("undo")):
517 self.ui.status(_("rolling back last transaction\n"))
535 self.ui.status(_("rolling back last transaction\n"))
518 transaction.rollback(self.sopener, self.sjoin("undo"))
536 transaction.rollback(self.sopener, self.sjoin("undo"))
519 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
537 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
520 self.reload()
538 self.reload()
521 self.wreload()
539 self.wreload()
522 else:
540 else:
523 self.ui.warn(_("no rollback information available\n"))
541 self.ui.warn(_("no rollback information available\n"))
524
542
525 def wreload(self):
543 def wreload(self):
526 self.dirstate.read()
544 self.dirstate.read()
527
545
528 def reload(self):
546 def reload(self):
529 self.changelog.load()
547 self.changelog.load()
530 self.manifest.load()
548 self.manifest.load()
531 self.tagscache = None
549 self.tagscache = None
532 self.nodetagscache = None
550 self.nodetagscache = None
533
551
534 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
552 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
535 desc=None):
553 desc=None):
536 try:
554 try:
537 l = lock.lock(lockname, 0, releasefn, desc=desc)
555 l = lock.lock(lockname, 0, releasefn, desc=desc)
538 except lock.LockHeld, inst:
556 except lock.LockHeld, inst:
539 if not wait:
557 if not wait:
540 raise
558 raise
541 self.ui.warn(_("waiting for lock on %s held by %r\n") %
559 self.ui.warn(_("waiting for lock on %s held by %r\n") %
542 (desc, inst.locker))
560 (desc, inst.locker))
543 # default to 600 seconds timeout
561 # default to 600 seconds timeout
544 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
562 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
545 releasefn, desc=desc)
563 releasefn, desc=desc)
546 if acquirefn:
564 if acquirefn:
547 acquirefn()
565 acquirefn()
548 return l
566 return l
549
567
550 def lock(self, wait=1):
568 def lock(self, wait=1):
551 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
569 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
552 desc=_('repository %s') % self.origroot)
570 desc=_('repository %s') % self.origroot)
553
571
554 def wlock(self, wait=1):
572 def wlock(self, wait=1):
555 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
573 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
556 self.wreload,
574 self.wreload,
557 desc=_('working directory of %s') % self.origroot)
575 desc=_('working directory of %s') % self.origroot)
558
576
559 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
577 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
560 """
578 """
561 commit an individual file as part of a larger transaction
579 commit an individual file as part of a larger transaction
562 """
580 """
563
581
564 t = self.wread(fn)
582 t = self.wread(fn)
565 fl = self.file(fn)
583 fl = self.file(fn)
566 fp1 = manifest1.get(fn, nullid)
584 fp1 = manifest1.get(fn, nullid)
567 fp2 = manifest2.get(fn, nullid)
585 fp2 = manifest2.get(fn, nullid)
568
586
569 meta = {}
587 meta = {}
570 cp = self.dirstate.copied(fn)
588 cp = self.dirstate.copied(fn)
571 if cp:
589 if cp:
572 meta["copy"] = cp
590 meta["copy"] = cp
573 if not manifest2: # not a branch merge
591 if not manifest2: # not a branch merge
574 meta["copyrev"] = hex(manifest1.get(cp, nullid))
592 meta["copyrev"] = hex(manifest1.get(cp, nullid))
575 fp2 = nullid
593 fp2 = nullid
576 elif fp2 != nullid: # copied on remote side
594 elif fp2 != nullid: # copied on remote side
577 meta["copyrev"] = hex(manifest1.get(cp, nullid))
595 meta["copyrev"] = hex(manifest1.get(cp, nullid))
578 elif fp1 != nullid: # copied on local side, reversed
596 elif fp1 != nullid: # copied on local side, reversed
579 meta["copyrev"] = hex(manifest2.get(cp))
597 meta["copyrev"] = hex(manifest2.get(cp))
580 fp2 = nullid
598 fp2 = nullid
581 else: # directory rename
599 else: # directory rename
582 meta["copyrev"] = hex(manifest1.get(cp, nullid))
600 meta["copyrev"] = hex(manifest1.get(cp, nullid))
583 self.ui.debug(_(" %s: copy %s:%s\n") %
601 self.ui.debug(_(" %s: copy %s:%s\n") %
584 (fn, cp, meta["copyrev"]))
602 (fn, cp, meta["copyrev"]))
585 fp1 = nullid
603 fp1 = nullid
586 elif fp2 != nullid:
604 elif fp2 != nullid:
587 # is one parent an ancestor of the other?
605 # is one parent an ancestor of the other?
588 fpa = fl.ancestor(fp1, fp2)
606 fpa = fl.ancestor(fp1, fp2)
589 if fpa == fp1:
607 if fpa == fp1:
590 fp1, fp2 = fp2, nullid
608 fp1, fp2 = fp2, nullid
591 elif fpa == fp2:
609 elif fpa == fp2:
592 fp2 = nullid
610 fp2 = nullid
593
611
594 # is the file unmodified from the parent? report existing entry
612 # is the file unmodified from the parent? report existing entry
595 if fp2 == nullid and not fl.cmp(fp1, t):
613 if fp2 == nullid and not fl.cmp(fp1, t):
596 return fp1
614 return fp1
597
615
598 changelist.append(fn)
616 changelist.append(fn)
599 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
617 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
600
618
601 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
619 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
602 if p1 is None:
620 if p1 is None:
603 p1, p2 = self.dirstate.parents()
621 p1, p2 = self.dirstate.parents()
604 return self.commit(files=files, text=text, user=user, date=date,
622 return self.commit(files=files, text=text, user=user, date=date,
605 p1=p1, p2=p2, wlock=wlock)
623 p1=p1, p2=p2, wlock=wlock)
606
624
607 def commit(self, files=None, text="", user=None, date=None,
625 def commit(self, files=None, text="", user=None, date=None,
608 match=util.always, force=False, lock=None, wlock=None,
626 match=util.always, force=False, lock=None, wlock=None,
609 force_editor=False, p1=None, p2=None, extra={}):
627 force_editor=False, p1=None, p2=None, extra={}):
610
628
611 commit = []
629 commit = []
612 remove = []
630 remove = []
613 changed = []
631 changed = []
614 use_dirstate = (p1 is None) # not rawcommit
632 use_dirstate = (p1 is None) # not rawcommit
615 extra = extra.copy()
633 extra = extra.copy()
616
634
617 if use_dirstate:
635 if use_dirstate:
618 if files:
636 if files:
619 for f in files:
637 for f in files:
620 s = self.dirstate.state(f)
638 s = self.dirstate.state(f)
621 if s in 'nmai':
639 if s in 'nmai':
622 commit.append(f)
640 commit.append(f)
623 elif s == 'r':
641 elif s == 'r':
624 remove.append(f)
642 remove.append(f)
625 else:
643 else:
626 self.ui.warn(_("%s not tracked!\n") % f)
644 self.ui.warn(_("%s not tracked!\n") % f)
627 else:
645 else:
628 changes = self.status(match=match)[:5]
646 changes = self.status(match=match)[:5]
629 modified, added, removed, deleted, unknown = changes
647 modified, added, removed, deleted, unknown = changes
630 commit = modified + added
648 commit = modified + added
631 remove = removed
649 remove = removed
632 else:
650 else:
633 commit = files
651 commit = files
634
652
635 if use_dirstate:
653 if use_dirstate:
636 p1, p2 = self.dirstate.parents()
654 p1, p2 = self.dirstate.parents()
637 update_dirstate = True
655 update_dirstate = True
638 else:
656 else:
639 p1, p2 = p1, p2 or nullid
657 p1, p2 = p1, p2 or nullid
640 update_dirstate = (self.dirstate.parents()[0] == p1)
658 update_dirstate = (self.dirstate.parents()[0] == p1)
641
659
642 c1 = self.changelog.read(p1)
660 c1 = self.changelog.read(p1)
643 c2 = self.changelog.read(p2)
661 c2 = self.changelog.read(p2)
644 m1 = self.manifest.read(c1[0]).copy()
662 m1 = self.manifest.read(c1[0]).copy()
645 m2 = self.manifest.read(c2[0])
663 m2 = self.manifest.read(c2[0])
646
664
647 if use_dirstate:
665 if use_dirstate:
648 branchname = util.fromlocal(self.workingctx().branch())
666 branchname = util.fromlocal(self.workingctx().branch())
649 else:
667 else:
650 branchname = ""
668 branchname = ""
651
669
652 if use_dirstate:
670 if use_dirstate:
653 oldname = c1[5].get("branch", "") # stored in UTF-8
671 oldname = c1[5].get("branch", "") # stored in UTF-8
654 if not commit and not remove and not force and p2 == nullid and \
672 if not commit and not remove and not force and p2 == nullid and \
655 branchname == oldname:
673 branchname == oldname:
656 self.ui.status(_("nothing changed\n"))
674 self.ui.status(_("nothing changed\n"))
657 return None
675 return None
658
676
659 xp1 = hex(p1)
677 xp1 = hex(p1)
660 if p2 == nullid: xp2 = ''
678 if p2 == nullid: xp2 = ''
661 else: xp2 = hex(p2)
679 else: xp2 = hex(p2)
662
680
663 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
681 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
664
682
665 if not wlock:
683 if not wlock:
666 wlock = self.wlock()
684 wlock = self.wlock()
667 if not lock:
685 if not lock:
668 lock = self.lock()
686 lock = self.lock()
669 tr = self.transaction()
687 tr = self.transaction()
670
688
671 # check in files
689 # check in files
672 new = {}
690 new = {}
673 linkrev = self.changelog.count()
691 linkrev = self.changelog.count()
674 commit.sort()
692 commit.sort()
675 for f in commit:
693 for f in commit:
676 self.ui.note(f + "\n")
694 self.ui.note(f + "\n")
677 try:
695 try:
678 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
696 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
679 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
697 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
680 except IOError:
698 except IOError:
681 if use_dirstate:
699 if use_dirstate:
682 self.ui.warn(_("trouble committing %s!\n") % f)
700 self.ui.warn(_("trouble committing %s!\n") % f)
683 raise
701 raise
684 else:
702 else:
685 remove.append(f)
703 remove.append(f)
686
704
687 # update manifest
705 # update manifest
688 m1.update(new)
706 m1.update(new)
689 remove.sort()
707 remove.sort()
690
708
691 for f in remove:
709 for f in remove:
692 if f in m1:
710 if f in m1:
693 del m1[f]
711 del m1[f]
694 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
712 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
695
713
696 # add changeset
714 # add changeset
697 new = new.keys()
715 new = new.keys()
698 new.sort()
716 new.sort()
699
717
700 user = user or self.ui.username()
718 user = user or self.ui.username()
701 if not text or force_editor:
719 if not text or force_editor:
702 edittext = []
720 edittext = []
703 if text:
721 if text:
704 edittext.append(text)
722 edittext.append(text)
705 edittext.append("")
723 edittext.append("")
706 edittext.append("HG: user: %s" % user)
724 edittext.append("HG: user: %s" % user)
707 if p2 != nullid:
725 if p2 != nullid:
708 edittext.append("HG: branch merge")
726 edittext.append("HG: branch merge")
709 edittext.extend(["HG: changed %s" % f for f in changed])
727 edittext.extend(["HG: changed %s" % f for f in changed])
710 edittext.extend(["HG: removed %s" % f for f in remove])
728 edittext.extend(["HG: removed %s" % f for f in remove])
711 if not changed and not remove:
729 if not changed and not remove:
712 edittext.append("HG: no files changed")
730 edittext.append("HG: no files changed")
713 edittext.append("")
731 edittext.append("")
714 # run editor in the repository root
732 # run editor in the repository root
715 olddir = os.getcwd()
733 olddir = os.getcwd()
716 os.chdir(self.root)
734 os.chdir(self.root)
717 text = self.ui.edit("\n".join(edittext), user)
735 text = self.ui.edit("\n".join(edittext), user)
718 os.chdir(olddir)
736 os.chdir(olddir)
719
737
720 lines = [line.rstrip() for line in text.rstrip().splitlines()]
738 lines = [line.rstrip() for line in text.rstrip().splitlines()]
721 while lines and not lines[0]:
739 while lines and not lines[0]:
722 del lines[0]
740 del lines[0]
723 if not lines:
741 if not lines:
724 return None
742 return None
725 text = '\n'.join(lines)
743 text = '\n'.join(lines)
726 if branchname:
744 if branchname:
727 extra["branch"] = branchname
745 extra["branch"] = branchname
728 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
746 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
729 user, date, extra)
747 user, date, extra)
730 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
748 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
731 parent2=xp2)
749 parent2=xp2)
732 tr.close()
750 tr.close()
733
751
734 if use_dirstate or update_dirstate:
752 if use_dirstate or update_dirstate:
735 self.dirstate.setparents(n)
753 self.dirstate.setparents(n)
736 if use_dirstate:
754 if use_dirstate:
737 self.dirstate.update(new, "n")
755 self.dirstate.update(new, "n")
738 self.dirstate.forget(remove)
756 self.dirstate.forget(remove)
739
757
740 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
758 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
741 return n
759 return n
742
760
743 def walk(self, node=None, files=[], match=util.always, badmatch=None):
761 def walk(self, node=None, files=[], match=util.always, badmatch=None):
744 '''
762 '''
745 walk recursively through the directory tree or a given
763 walk recursively through the directory tree or a given
746 changeset, finding all files matched by the match
764 changeset, finding all files matched by the match
747 function
765 function
748
766
749 results are yielded in a tuple (src, filename), where src
767 results are yielded in a tuple (src, filename), where src
750 is one of:
768 is one of:
751 'f' the file was found in the directory tree
769 'f' the file was found in the directory tree
752 'm' the file was only in the dirstate and not in the tree
770 'm' the file was only in the dirstate and not in the tree
753 'b' file was not found and matched badmatch
771 'b' file was not found and matched badmatch
754 '''
772 '''
755
773
756 if node:
774 if node:
757 fdict = dict.fromkeys(files)
775 fdict = dict.fromkeys(files)
758 for fn in self.manifest.read(self.changelog.read(node)[0]):
776 for fn in self.manifest.read(self.changelog.read(node)[0]):
759 for ffn in fdict:
777 for ffn in fdict:
760 # match if the file is the exact name or a directory
778 # match if the file is the exact name or a directory
761 if ffn == fn or fn.startswith("%s/" % ffn):
779 if ffn == fn or fn.startswith("%s/" % ffn):
762 del fdict[ffn]
780 del fdict[ffn]
763 break
781 break
764 if match(fn):
782 if match(fn):
765 yield 'm', fn
783 yield 'm', fn
766 for fn in fdict:
784 for fn in fdict:
767 if badmatch and badmatch(fn):
785 if badmatch and badmatch(fn):
768 if match(fn):
786 if match(fn):
769 yield 'b', fn
787 yield 'b', fn
770 else:
788 else:
771 self.ui.warn(_('%s: No such file in rev %s\n') % (
789 self.ui.warn(_('%s: No such file in rev %s\n') % (
772 util.pathto(self.getcwd(), fn), short(node)))
790 util.pathto(self.getcwd(), fn), short(node)))
773 else:
791 else:
774 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
792 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
775 yield src, fn
793 yield src, fn
776
794
777 def status(self, node1=None, node2=None, files=[], match=util.always,
795 def status(self, node1=None, node2=None, files=[], match=util.always,
778 wlock=None, list_ignored=False, list_clean=False):
796 wlock=None, list_ignored=False, list_clean=False):
779 """return status of files between two nodes or node and working directory
797 """return status of files between two nodes or node and working directory
780
798
781 If node1 is None, use the first dirstate parent instead.
799 If node1 is None, use the first dirstate parent instead.
782 If node2 is None, compare node1 with working directory.
800 If node2 is None, compare node1 with working directory.
783 """
801 """
784
802
785 def fcmp(fn, mf):
803 def fcmp(fn, mf):
786 t1 = self.wread(fn)
804 t1 = self.wread(fn)
787 return self.file(fn).cmp(mf.get(fn, nullid), t1)
805 return self.file(fn).cmp(mf.get(fn, nullid), t1)
788
806
789 def mfmatches(node):
807 def mfmatches(node):
790 change = self.changelog.read(node)
808 change = self.changelog.read(node)
791 mf = self.manifest.read(change[0]).copy()
809 mf = self.manifest.read(change[0]).copy()
792 for fn in mf.keys():
810 for fn in mf.keys():
793 if not match(fn):
811 if not match(fn):
794 del mf[fn]
812 del mf[fn]
795 return mf
813 return mf
796
814
797 modified, added, removed, deleted, unknown = [], [], [], [], []
815 modified, added, removed, deleted, unknown = [], [], [], [], []
798 ignored, clean = [], []
816 ignored, clean = [], []
799
817
800 compareworking = False
818 compareworking = False
801 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
819 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
802 compareworking = True
820 compareworking = True
803
821
804 if not compareworking:
822 if not compareworking:
805 # read the manifest from node1 before the manifest from node2,
823 # read the manifest from node1 before the manifest from node2,
806 # so that we'll hit the manifest cache if we're going through
824 # so that we'll hit the manifest cache if we're going through
807 # all the revisions in parent->child order.
825 # all the revisions in parent->child order.
808 mf1 = mfmatches(node1)
826 mf1 = mfmatches(node1)
809
827
810 # are we comparing the working directory?
828 # are we comparing the working directory?
811 if not node2:
829 if not node2:
812 if not wlock:
830 if not wlock:
813 try:
831 try:
814 wlock = self.wlock(wait=0)
832 wlock = self.wlock(wait=0)
815 except lock.LockException:
833 except lock.LockException:
816 wlock = None
834 wlock = None
817 (lookup, modified, added, removed, deleted, unknown,
835 (lookup, modified, added, removed, deleted, unknown,
818 ignored, clean) = self.dirstate.status(files, match,
836 ignored, clean) = self.dirstate.status(files, match,
819 list_ignored, list_clean)
837 list_ignored, list_clean)
820
838
821 # are we comparing working dir against its parent?
839 # are we comparing working dir against its parent?
822 if compareworking:
840 if compareworking:
823 if lookup:
841 if lookup:
824 # do a full compare of any files that might have changed
842 # do a full compare of any files that might have changed
825 mf2 = mfmatches(self.dirstate.parents()[0])
843 mf2 = mfmatches(self.dirstate.parents()[0])
826 for f in lookup:
844 for f in lookup:
827 if fcmp(f, mf2):
845 if fcmp(f, mf2):
828 modified.append(f)
846 modified.append(f)
829 else:
847 else:
830 clean.append(f)
848 clean.append(f)
831 if wlock is not None:
849 if wlock is not None:
832 self.dirstate.update([f], "n")
850 self.dirstate.update([f], "n")
833 else:
851 else:
834 # we are comparing working dir against non-parent
852 # we are comparing working dir against non-parent
835 # generate a pseudo-manifest for the working dir
853 # generate a pseudo-manifest for the working dir
836 # XXX: create it in dirstate.py ?
854 # XXX: create it in dirstate.py ?
837 mf2 = mfmatches(self.dirstate.parents()[0])
855 mf2 = mfmatches(self.dirstate.parents()[0])
838 for f in lookup + modified + added:
856 for f in lookup + modified + added:
839 mf2[f] = ""
857 mf2[f] = ""
840 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
858 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
841 for f in removed:
859 for f in removed:
842 if f in mf2:
860 if f in mf2:
843 del mf2[f]
861 del mf2[f]
844 else:
862 else:
845 # we are comparing two revisions
863 # we are comparing two revisions
846 mf2 = mfmatches(node2)
864 mf2 = mfmatches(node2)
847
865
848 if not compareworking:
866 if not compareworking:
849 # flush lists from dirstate before comparing manifests
867 # flush lists from dirstate before comparing manifests
850 modified, added, clean = [], [], []
868 modified, added, clean = [], [], []
851
869
852 # make sure to sort the files so we talk to the disk in a
870 # make sure to sort the files so we talk to the disk in a
853 # reasonable order
871 # reasonable order
854 mf2keys = mf2.keys()
872 mf2keys = mf2.keys()
855 mf2keys.sort()
873 mf2keys.sort()
856 for fn in mf2keys:
874 for fn in mf2keys:
857 if mf1.has_key(fn):
875 if mf1.has_key(fn):
858 if mf1.flags(fn) != mf2.flags(fn) or \
876 if mf1.flags(fn) != mf2.flags(fn) or \
859 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
877 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
860 modified.append(fn)
878 modified.append(fn)
861 elif list_clean:
879 elif list_clean:
862 clean.append(fn)
880 clean.append(fn)
863 del mf1[fn]
881 del mf1[fn]
864 else:
882 else:
865 added.append(fn)
883 added.append(fn)
866
884
867 removed = mf1.keys()
885 removed = mf1.keys()
868
886
869 # sort and return results:
887 # sort and return results:
870 for l in modified, added, removed, deleted, unknown, ignored, clean:
888 for l in modified, added, removed, deleted, unknown, ignored, clean:
871 l.sort()
889 l.sort()
872 return (modified, added, removed, deleted, unknown, ignored, clean)
890 return (modified, added, removed, deleted, unknown, ignored, clean)
873
891
874 def add(self, list, wlock=None):
892 def add(self, list, wlock=None):
875 if not wlock:
893 if not wlock:
876 wlock = self.wlock()
894 wlock = self.wlock()
877 for f in list:
895 for f in list:
878 p = self.wjoin(f)
896 p = self.wjoin(f)
879 if not os.path.exists(p):
897 if not os.path.exists(p):
880 self.ui.warn(_("%s does not exist!\n") % f)
898 self.ui.warn(_("%s does not exist!\n") % f)
881 elif not os.path.isfile(p):
899 elif not os.path.isfile(p):
882 self.ui.warn(_("%s not added: only files supported currently\n")
900 self.ui.warn(_("%s not added: only files supported currently\n")
883 % f)
901 % f)
884 elif self.dirstate.state(f) in 'an':
902 elif self.dirstate.state(f) in 'an':
885 self.ui.warn(_("%s already tracked!\n") % f)
903 self.ui.warn(_("%s already tracked!\n") % f)
886 else:
904 else:
887 self.dirstate.update([f], "a")
905 self.dirstate.update([f], "a")
888
906
889 def forget(self, list, wlock=None):
907 def forget(self, list, wlock=None):
890 if not wlock:
908 if not wlock:
891 wlock = self.wlock()
909 wlock = self.wlock()
892 for f in list:
910 for f in list:
893 if self.dirstate.state(f) not in 'ai':
911 if self.dirstate.state(f) not in 'ai':
894 self.ui.warn(_("%s not added!\n") % f)
912 self.ui.warn(_("%s not added!\n") % f)
895 else:
913 else:
896 self.dirstate.forget([f])
914 self.dirstate.forget([f])
897
915
898 def remove(self, list, unlink=False, wlock=None):
916 def remove(self, list, unlink=False, wlock=None):
899 if unlink:
917 if unlink:
900 for f in list:
918 for f in list:
901 try:
919 try:
902 util.unlink(self.wjoin(f))
920 util.unlink(self.wjoin(f))
903 except OSError, inst:
921 except OSError, inst:
904 if inst.errno != errno.ENOENT:
922 if inst.errno != errno.ENOENT:
905 raise
923 raise
906 if not wlock:
924 if not wlock:
907 wlock = self.wlock()
925 wlock = self.wlock()
908 for f in list:
926 for f in list:
909 p = self.wjoin(f)
927 p = self.wjoin(f)
910 if os.path.exists(p):
928 if os.path.exists(p):
911 self.ui.warn(_("%s still exists!\n") % f)
929 self.ui.warn(_("%s still exists!\n") % f)
912 elif self.dirstate.state(f) == 'a':
930 elif self.dirstate.state(f) == 'a':
913 self.dirstate.forget([f])
931 self.dirstate.forget([f])
914 elif f not in self.dirstate:
932 elif f not in self.dirstate:
915 self.ui.warn(_("%s not tracked!\n") % f)
933 self.ui.warn(_("%s not tracked!\n") % f)
916 else:
934 else:
917 self.dirstate.update([f], "r")
935 self.dirstate.update([f], "r")
918
936
919 def undelete(self, list, wlock=None):
937 def undelete(self, list, wlock=None):
920 p = self.dirstate.parents()[0]
938 p = self.dirstate.parents()[0]
921 mn = self.changelog.read(p)[0]
939 mn = self.changelog.read(p)[0]
922 m = self.manifest.read(mn)
940 m = self.manifest.read(mn)
923 if not wlock:
941 if not wlock:
924 wlock = self.wlock()
942 wlock = self.wlock()
925 for f in list:
943 for f in list:
926 if self.dirstate.state(f) not in "r":
944 if self.dirstate.state(f) not in "r":
927 self.ui.warn("%s not removed!\n" % f)
945 self.ui.warn("%s not removed!\n" % f)
928 else:
946 else:
929 t = self.file(f).read(m[f])
947 t = self.file(f).read(m[f])
930 self.wwrite(f, t)
948 self.wwrite(f, t)
931 util.set_exec(self.wjoin(f), m.execf(f))
949 util.set_exec(self.wjoin(f), m.execf(f))
932 self.dirstate.update([f], "n")
950 self.dirstate.update([f], "n")
933
951
934 def copy(self, source, dest, wlock=None):
952 def copy(self, source, dest, wlock=None):
935 p = self.wjoin(dest)
953 p = self.wjoin(dest)
936 if not os.path.exists(p):
954 if not os.path.exists(p):
937 self.ui.warn(_("%s does not exist!\n") % dest)
955 self.ui.warn(_("%s does not exist!\n") % dest)
938 elif not os.path.isfile(p):
956 elif not os.path.isfile(p):
939 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
957 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
940 else:
958 else:
941 if not wlock:
959 if not wlock:
942 wlock = self.wlock()
960 wlock = self.wlock()
943 if self.dirstate.state(dest) == '?':
961 if self.dirstate.state(dest) == '?':
944 self.dirstate.update([dest], "a")
962 self.dirstate.update([dest], "a")
945 self.dirstate.copy(source, dest)
963 self.dirstate.copy(source, dest)
946
964
947 def heads(self, start=None):
965 def heads(self, start=None):
948 heads = self.changelog.heads(start)
966 heads = self.changelog.heads(start)
949 # sort the output in rev descending order
967 # sort the output in rev descending order
950 heads = [(-self.changelog.rev(h), h) for h in heads]
968 heads = [(-self.changelog.rev(h), h) for h in heads]
951 heads.sort()
969 heads.sort()
952 return [n for (r, n) in heads]
970 return [n for (r, n) in heads]
953
971
954 # branchlookup returns a dict giving a list of branches for
972 # branchlookup returns a dict giving a list of branches for
955 # each head. A branch is defined as the tag of a node or
973 # each head. A branch is defined as the tag of a node or
956 # the branch of the node's parents. If a node has multiple
974 # the branch of the node's parents. If a node has multiple
957 # branch tags, tags are eliminated if they are visible from other
975 # branch tags, tags are eliminated if they are visible from other
958 # branch tags.
976 # branch tags.
959 #
977 #
960 # So, for this graph: a->b->c->d->e
978 # So, for this graph: a->b->c->d->e
961 # \ /
979 # \ /
962 # aa -----/
980 # aa -----/
963 # a has tag 2.6.12
981 # a has tag 2.6.12
964 # d has tag 2.6.13
982 # d has tag 2.6.13
965 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
983 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
966 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
984 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
967 # from the list.
985 # from the list.
968 #
986 #
969 # It is possible that more than one head will have the same branch tag.
987 # It is possible that more than one head will have the same branch tag.
970 # callers need to check the result for multiple heads under the same
988 # callers need to check the result for multiple heads under the same
971 # branch tag if that is a problem for them (ie checkout of a specific
989 # branch tag if that is a problem for them (ie checkout of a specific
972 # branch).
990 # branch).
973 #
991 #
974 # passing in a specific branch will limit the depth of the search
992 # passing in a specific branch will limit the depth of the search
975 # through the parents. It won't limit the branches returned in the
993 # through the parents. It won't limit the branches returned in the
976 # result though.
994 # result though.
977 def branchlookup(self, heads=None, branch=None):
995 def branchlookup(self, heads=None, branch=None):
978 if not heads:
996 if not heads:
979 heads = self.heads()
997 heads = self.heads()
980 headt = [ h for h in heads ]
998 headt = [ h for h in heads ]
981 chlog = self.changelog
999 chlog = self.changelog
982 branches = {}
1000 branches = {}
983 merges = []
1001 merges = []
984 seenmerge = {}
1002 seenmerge = {}
985
1003
986 # traverse the tree once for each head, recording in the branches
1004 # traverse the tree once for each head, recording in the branches
987 # dict which tags are visible from this head. The branches
1005 # dict which tags are visible from this head. The branches
988 # dict also records which tags are visible from each tag
1006 # dict also records which tags are visible from each tag
989 # while we traverse.
1007 # while we traverse.
990 while headt or merges:
1008 while headt or merges:
991 if merges:
1009 if merges:
992 n, found = merges.pop()
1010 n, found = merges.pop()
993 visit = [n]
1011 visit = [n]
994 else:
1012 else:
995 h = headt.pop()
1013 h = headt.pop()
996 visit = [h]
1014 visit = [h]
997 found = [h]
1015 found = [h]
998 seen = {}
1016 seen = {}
999 while visit:
1017 while visit:
1000 n = visit.pop()
1018 n = visit.pop()
1001 if n in seen:
1019 if n in seen:
1002 continue
1020 continue
1003 pp = chlog.parents(n)
1021 pp = chlog.parents(n)
1004 tags = self.nodetags(n)
1022 tags = self.nodetags(n)
1005 if tags:
1023 if tags:
1006 for x in tags:
1024 for x in tags:
1007 if x == 'tip':
1025 if x == 'tip':
1008 continue
1026 continue
1009 for f in found:
1027 for f in found:
1010 branches.setdefault(f, {})[n] = 1
1028 branches.setdefault(f, {})[n] = 1
1011 branches.setdefault(n, {})[n] = 1
1029 branches.setdefault(n, {})[n] = 1
1012 break
1030 break
1013 if n not in found:
1031 if n not in found:
1014 found.append(n)
1032 found.append(n)
1015 if branch in tags:
1033 if branch in tags:
1016 continue
1034 continue
1017 seen[n] = 1
1035 seen[n] = 1
1018 if pp[1] != nullid and n not in seenmerge:
1036 if pp[1] != nullid and n not in seenmerge:
1019 merges.append((pp[1], [x for x in found]))
1037 merges.append((pp[1], [x for x in found]))
1020 seenmerge[n] = 1
1038 seenmerge[n] = 1
1021 if pp[0] != nullid:
1039 if pp[0] != nullid:
1022 visit.append(pp[0])
1040 visit.append(pp[0])
1023 # traverse the branches dict, eliminating branch tags from each
1041 # traverse the branches dict, eliminating branch tags from each
1024 # head that are visible from another branch tag for that head.
1042 # head that are visible from another branch tag for that head.
1025 out = {}
1043 out = {}
1026 viscache = {}
1044 viscache = {}
1027 for h in heads:
1045 for h in heads:
1028 def visible(node):
1046 def visible(node):
1029 if node in viscache:
1047 if node in viscache:
1030 return viscache[node]
1048 return viscache[node]
1031 ret = {}
1049 ret = {}
1032 visit = [node]
1050 visit = [node]
1033 while visit:
1051 while visit:
1034 x = visit.pop()
1052 x = visit.pop()
1035 if x in viscache:
1053 if x in viscache:
1036 ret.update(viscache[x])
1054 ret.update(viscache[x])
1037 elif x not in ret:
1055 elif x not in ret:
1038 ret[x] = 1
1056 ret[x] = 1
1039 if x in branches:
1057 if x in branches:
1040 visit[len(visit):] = branches[x].keys()
1058 visit[len(visit):] = branches[x].keys()
1041 viscache[node] = ret
1059 viscache[node] = ret
1042 return ret
1060 return ret
1043 if h not in branches:
1061 if h not in branches:
1044 continue
1062 continue
1045 # O(n^2), but somewhat limited. This only searches the
1063 # O(n^2), but somewhat limited. This only searches the
1046 # tags visible from a specific head, not all the tags in the
1064 # tags visible from a specific head, not all the tags in the
1047 # whole repo.
1065 # whole repo.
1048 for b in branches[h]:
1066 for b in branches[h]:
1049 vis = False
1067 vis = False
1050 for bb in branches[h].keys():
1068 for bb in branches[h].keys():
1051 if b != bb:
1069 if b != bb:
1052 if b in visible(bb):
1070 if b in visible(bb):
1053 vis = True
1071 vis = True
1054 break
1072 break
1055 if not vis:
1073 if not vis:
1056 l = out.setdefault(h, [])
1074 l = out.setdefault(h, [])
1057 l[len(l):] = self.nodetags(b)
1075 l[len(l):] = self.nodetags(b)
1058 return out
1076 return out
1059
1077
1060 def branches(self, nodes):
1078 def branches(self, nodes):
1061 if not nodes:
1079 if not nodes:
1062 nodes = [self.changelog.tip()]
1080 nodes = [self.changelog.tip()]
1063 b = []
1081 b = []
1064 for n in nodes:
1082 for n in nodes:
1065 t = n
1083 t = n
1066 while 1:
1084 while 1:
1067 p = self.changelog.parents(n)
1085 p = self.changelog.parents(n)
1068 if p[1] != nullid or p[0] == nullid:
1086 if p[1] != nullid or p[0] == nullid:
1069 b.append((t, n, p[0], p[1]))
1087 b.append((t, n, p[0], p[1]))
1070 break
1088 break
1071 n = p[0]
1089 n = p[0]
1072 return b
1090 return b
1073
1091
1074 def between(self, pairs):
1092 def between(self, pairs):
1075 r = []
1093 r = []
1076
1094
1077 for top, bottom in pairs:
1095 for top, bottom in pairs:
1078 n, l, i = top, [], 0
1096 n, l, i = top, [], 0
1079 f = 1
1097 f = 1
1080
1098
1081 while n != bottom:
1099 while n != bottom:
1082 p = self.changelog.parents(n)[0]
1100 p = self.changelog.parents(n)[0]
1083 if i == f:
1101 if i == f:
1084 l.append(n)
1102 l.append(n)
1085 f = f * 2
1103 f = f * 2
1086 n = p
1104 n = p
1087 i += 1
1105 i += 1
1088
1106
1089 r.append(l)
1107 r.append(l)
1090
1108
1091 return r
1109 return r
1092
1110
1093 def findincoming(self, remote, base=None, heads=None, force=False):
1111 def findincoming(self, remote, base=None, heads=None, force=False):
1094 """Return list of roots of the subsets of missing nodes from remote
1112 """Return list of roots of the subsets of missing nodes from remote
1095
1113
1096 If base dict is specified, assume that these nodes and their parents
1114 If base dict is specified, assume that these nodes and their parents
1097 exist on the remote side and that no child of a node of base exists
1115 exist on the remote side and that no child of a node of base exists
1098 in both remote and self.
1116 in both remote and self.
1099 Furthermore base will be updated to include the nodes that exists
1117 Furthermore base will be updated to include the nodes that exists
1100 in self and remote but no children exists in self and remote.
1118 in self and remote but no children exists in self and remote.
1101 If a list of heads is specified, return only nodes which are heads
1119 If a list of heads is specified, return only nodes which are heads
1102 or ancestors of these heads.
1120 or ancestors of these heads.
1103
1121
1104 All the ancestors of base are in self and in remote.
1122 All the ancestors of base are in self and in remote.
1105 All the descendants of the list returned are missing in self.
1123 All the descendants of the list returned are missing in self.
1106 (and so we know that the rest of the nodes are missing in remote, see
1124 (and so we know that the rest of the nodes are missing in remote, see
1107 outgoing)
1125 outgoing)
1108 """
1126 """
1109 m = self.changelog.nodemap
1127 m = self.changelog.nodemap
1110 search = []
1128 search = []
1111 fetch = {}
1129 fetch = {}
1112 seen = {}
1130 seen = {}
1113 seenbranch = {}
1131 seenbranch = {}
1114 if base == None:
1132 if base == None:
1115 base = {}
1133 base = {}
1116
1134
1117 if not heads:
1135 if not heads:
1118 heads = remote.heads()
1136 heads = remote.heads()
1119
1137
1120 if self.changelog.tip() == nullid:
1138 if self.changelog.tip() == nullid:
1121 base[nullid] = 1
1139 base[nullid] = 1
1122 if heads != [nullid]:
1140 if heads != [nullid]:
1123 return [nullid]
1141 return [nullid]
1124 return []
1142 return []
1125
1143
1126 # assume we're closer to the tip than the root
1144 # assume we're closer to the tip than the root
1127 # and start by examining the heads
1145 # and start by examining the heads
1128 self.ui.status(_("searching for changes\n"))
1146 self.ui.status(_("searching for changes\n"))
1129
1147
1130 unknown = []
1148 unknown = []
1131 for h in heads:
1149 for h in heads:
1132 if h not in m:
1150 if h not in m:
1133 unknown.append(h)
1151 unknown.append(h)
1134 else:
1152 else:
1135 base[h] = 1
1153 base[h] = 1
1136
1154
1137 if not unknown:
1155 if not unknown:
1138 return []
1156 return []
1139
1157
1140 req = dict.fromkeys(unknown)
1158 req = dict.fromkeys(unknown)
1141 reqcnt = 0
1159 reqcnt = 0
1142
1160
1143 # search through remote branches
1161 # search through remote branches
1144 # a 'branch' here is a linear segment of history, with four parts:
1162 # a 'branch' here is a linear segment of history, with four parts:
1145 # head, root, first parent, second parent
1163 # head, root, first parent, second parent
1146 # (a branch always has two parents (or none) by definition)
1164 # (a branch always has two parents (or none) by definition)
1147 unknown = remote.branches(unknown)
1165 unknown = remote.branches(unknown)
1148 while unknown:
1166 while unknown:
1149 r = []
1167 r = []
1150 while unknown:
1168 while unknown:
1151 n = unknown.pop(0)
1169 n = unknown.pop(0)
1152 if n[0] in seen:
1170 if n[0] in seen:
1153 continue
1171 continue
1154
1172
1155 self.ui.debug(_("examining %s:%s\n")
1173 self.ui.debug(_("examining %s:%s\n")
1156 % (short(n[0]), short(n[1])))
1174 % (short(n[0]), short(n[1])))
1157 if n[0] == nullid: # found the end of the branch
1175 if n[0] == nullid: # found the end of the branch
1158 pass
1176 pass
1159 elif n in seenbranch:
1177 elif n in seenbranch:
1160 self.ui.debug(_("branch already found\n"))
1178 self.ui.debug(_("branch already found\n"))
1161 continue
1179 continue
1162 elif n[1] and n[1] in m: # do we know the base?
1180 elif n[1] and n[1] in m: # do we know the base?
1163 self.ui.debug(_("found incomplete branch %s:%s\n")
1181 self.ui.debug(_("found incomplete branch %s:%s\n")
1164 % (short(n[0]), short(n[1])))
1182 % (short(n[0]), short(n[1])))
1165 search.append(n) # schedule branch range for scanning
1183 search.append(n) # schedule branch range for scanning
1166 seenbranch[n] = 1
1184 seenbranch[n] = 1
1167 else:
1185 else:
1168 if n[1] not in seen and n[1] not in fetch:
1186 if n[1] not in seen and n[1] not in fetch:
1169 if n[2] in m and n[3] in m:
1187 if n[2] in m and n[3] in m:
1170 self.ui.debug(_("found new changeset %s\n") %
1188 self.ui.debug(_("found new changeset %s\n") %
1171 short(n[1]))
1189 short(n[1]))
1172 fetch[n[1]] = 1 # earliest unknown
1190 fetch[n[1]] = 1 # earliest unknown
1173 for p in n[2:4]:
1191 for p in n[2:4]:
1174 if p in m:
1192 if p in m:
1175 base[p] = 1 # latest known
1193 base[p] = 1 # latest known
1176
1194
1177 for p in n[2:4]:
1195 for p in n[2:4]:
1178 if p not in req and p not in m:
1196 if p not in req and p not in m:
1179 r.append(p)
1197 r.append(p)
1180 req[p] = 1
1198 req[p] = 1
1181 seen[n[0]] = 1
1199 seen[n[0]] = 1
1182
1200
1183 if r:
1201 if r:
1184 reqcnt += 1
1202 reqcnt += 1
1185 self.ui.debug(_("request %d: %s\n") %
1203 self.ui.debug(_("request %d: %s\n") %
1186 (reqcnt, " ".join(map(short, r))))
1204 (reqcnt, " ".join(map(short, r))))
1187 for p in xrange(0, len(r), 10):
1205 for p in xrange(0, len(r), 10):
1188 for b in remote.branches(r[p:p+10]):
1206 for b in remote.branches(r[p:p+10]):
1189 self.ui.debug(_("received %s:%s\n") %
1207 self.ui.debug(_("received %s:%s\n") %
1190 (short(b[0]), short(b[1])))
1208 (short(b[0]), short(b[1])))
1191 unknown.append(b)
1209 unknown.append(b)
1192
1210
1193 # do binary search on the branches we found
1211 # do binary search on the branches we found
1194 while search:
1212 while search:
1195 n = search.pop(0)
1213 n = search.pop(0)
1196 reqcnt += 1
1214 reqcnt += 1
1197 l = remote.between([(n[0], n[1])])[0]
1215 l = remote.between([(n[0], n[1])])[0]
1198 l.append(n[1])
1216 l.append(n[1])
1199 p = n[0]
1217 p = n[0]
1200 f = 1
1218 f = 1
1201 for i in l:
1219 for i in l:
1202 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1220 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1203 if i in m:
1221 if i in m:
1204 if f <= 2:
1222 if f <= 2:
1205 self.ui.debug(_("found new branch changeset %s\n") %
1223 self.ui.debug(_("found new branch changeset %s\n") %
1206 short(p))
1224 short(p))
1207 fetch[p] = 1
1225 fetch[p] = 1
1208 base[i] = 1
1226 base[i] = 1
1209 else:
1227 else:
1210 self.ui.debug(_("narrowed branch search to %s:%s\n")
1228 self.ui.debug(_("narrowed branch search to %s:%s\n")
1211 % (short(p), short(i)))
1229 % (short(p), short(i)))
1212 search.append((p, i))
1230 search.append((p, i))
1213 break
1231 break
1214 p, f = i, f * 2
1232 p, f = i, f * 2
1215
1233
1216 # sanity check our fetch list
1234 # sanity check our fetch list
1217 for f in fetch.keys():
1235 for f in fetch.keys():
1218 if f in m:
1236 if f in m:
1219 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1237 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1220
1238
1221 if base.keys() == [nullid]:
1239 if base.keys() == [nullid]:
1222 if force:
1240 if force:
1223 self.ui.warn(_("warning: repository is unrelated\n"))
1241 self.ui.warn(_("warning: repository is unrelated\n"))
1224 else:
1242 else:
1225 raise util.Abort(_("repository is unrelated"))
1243 raise util.Abort(_("repository is unrelated"))
1226
1244
1227 self.ui.debug(_("found new changesets starting at ") +
1245 self.ui.debug(_("found new changesets starting at ") +
1228 " ".join([short(f) for f in fetch]) + "\n")
1246 " ".join([short(f) for f in fetch]) + "\n")
1229
1247
1230 self.ui.debug(_("%d total queries\n") % reqcnt)
1248 self.ui.debug(_("%d total queries\n") % reqcnt)
1231
1249
1232 return fetch.keys()
1250 return fetch.keys()
1233
1251
1234 def findoutgoing(self, remote, base=None, heads=None, force=False):
1252 def findoutgoing(self, remote, base=None, heads=None, force=False):
1235 """Return list of nodes that are roots of subsets not in remote
1253 """Return list of nodes that are roots of subsets not in remote
1236
1254
1237 If base dict is specified, assume that these nodes and their parents
1255 If base dict is specified, assume that these nodes and their parents
1238 exist on the remote side.
1256 exist on the remote side.
1239 If a list of heads is specified, return only nodes which are heads
1257 If a list of heads is specified, return only nodes which are heads
1240 or ancestors of these heads, and return a second element which
1258 or ancestors of these heads, and return a second element which
1241 contains all remote heads which get new children.
1259 contains all remote heads which get new children.
1242 """
1260 """
1243 if base == None:
1261 if base == None:
1244 base = {}
1262 base = {}
1245 self.findincoming(remote, base, heads, force=force)
1263 self.findincoming(remote, base, heads, force=force)
1246
1264
1247 self.ui.debug(_("common changesets up to ")
1265 self.ui.debug(_("common changesets up to ")
1248 + " ".join(map(short, base.keys())) + "\n")
1266 + " ".join(map(short, base.keys())) + "\n")
1249
1267
1250 remain = dict.fromkeys(self.changelog.nodemap)
1268 remain = dict.fromkeys(self.changelog.nodemap)
1251
1269
1252 # prune everything remote has from the tree
1270 # prune everything remote has from the tree
1253 del remain[nullid]
1271 del remain[nullid]
1254 remove = base.keys()
1272 remove = base.keys()
1255 while remove:
1273 while remove:
1256 n = remove.pop(0)
1274 n = remove.pop(0)
1257 if n in remain:
1275 if n in remain:
1258 del remain[n]
1276 del remain[n]
1259 for p in self.changelog.parents(n):
1277 for p in self.changelog.parents(n):
1260 remove.append(p)
1278 remove.append(p)
1261
1279
1262 # find every node whose parents have been pruned
1280 # find every node whose parents have been pruned
1263 subset = []
1281 subset = []
1264 # find every remote head that will get new children
1282 # find every remote head that will get new children
1265 updated_heads = {}
1283 updated_heads = {}
1266 for n in remain:
1284 for n in remain:
1267 p1, p2 = self.changelog.parents(n)
1285 p1, p2 = self.changelog.parents(n)
1268 if p1 not in remain and p2 not in remain:
1286 if p1 not in remain and p2 not in remain:
1269 subset.append(n)
1287 subset.append(n)
1270 if heads:
1288 if heads:
1271 if p1 in heads:
1289 if p1 in heads:
1272 updated_heads[p1] = True
1290 updated_heads[p1] = True
1273 if p2 in heads:
1291 if p2 in heads:
1274 updated_heads[p2] = True
1292 updated_heads[p2] = True
1275
1293
1276 # this is the set of all roots we have to push
1294 # this is the set of all roots we have to push
1277 if heads:
1295 if heads:
1278 return subset, updated_heads.keys()
1296 return subset, updated_heads.keys()
1279 else:
1297 else:
1280 return subset
1298 return subset
1281
1299
1282 def pull(self, remote, heads=None, force=False, lock=None):
1300 def pull(self, remote, heads=None, force=False, lock=None):
1283 mylock = False
1301 mylock = False
1284 if not lock:
1302 if not lock:
1285 lock = self.lock()
1303 lock = self.lock()
1286 mylock = True
1304 mylock = True
1287
1305
1288 try:
1306 try:
1289 fetch = self.findincoming(remote, force=force)
1307 fetch = self.findincoming(remote, force=force)
1290 if fetch == [nullid]:
1308 if fetch == [nullid]:
1291 self.ui.status(_("requesting all changes\n"))
1309 self.ui.status(_("requesting all changes\n"))
1292
1310
1293 if not fetch:
1311 if not fetch:
1294 self.ui.status(_("no changes found\n"))
1312 self.ui.status(_("no changes found\n"))
1295 return 0
1313 return 0
1296
1314
1297 if heads is None:
1315 if heads is None:
1298 cg = remote.changegroup(fetch, 'pull')
1316 cg = remote.changegroup(fetch, 'pull')
1299 else:
1317 else:
1300 if 'changegroupsubset' not in remote.capabilities:
1318 if 'changegroupsubset' not in remote.capabilities:
1301 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1319 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1302 cg = remote.changegroupsubset(fetch, heads, 'pull')
1320 cg = remote.changegroupsubset(fetch, heads, 'pull')
1303 return self.addchangegroup(cg, 'pull', remote.url())
1321 return self.addchangegroup(cg, 'pull', remote.url())
1304 finally:
1322 finally:
1305 if mylock:
1323 if mylock:
1306 lock.release()
1324 lock.release()
1307
1325
1308 def push(self, remote, force=False, revs=None):
1326 def push(self, remote, force=False, revs=None):
1309 # there are two ways to push to remote repo:
1327 # there are two ways to push to remote repo:
1310 #
1328 #
1311 # addchangegroup assumes local user can lock remote
1329 # addchangegroup assumes local user can lock remote
1312 # repo (local filesystem, old ssh servers).
1330 # repo (local filesystem, old ssh servers).
1313 #
1331 #
1314 # unbundle assumes local user cannot lock remote repo (new ssh
1332 # unbundle assumes local user cannot lock remote repo (new ssh
1315 # servers, http servers).
1333 # servers, http servers).
1316
1334
1317 if remote.capable('unbundle'):
1335 if remote.capable('unbundle'):
1318 return self.push_unbundle(remote, force, revs)
1336 return self.push_unbundle(remote, force, revs)
1319 return self.push_addchangegroup(remote, force, revs)
1337 return self.push_addchangegroup(remote, force, revs)
1320
1338
1321 def prepush(self, remote, force, revs):
1339 def prepush(self, remote, force, revs):
1322 base = {}
1340 base = {}
1323 remote_heads = remote.heads()
1341 remote_heads = remote.heads()
1324 inc = self.findincoming(remote, base, remote_heads, force=force)
1342 inc = self.findincoming(remote, base, remote_heads, force=force)
1325
1343
1326 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1344 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1327 if revs is not None:
1345 if revs is not None:
1328 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1346 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1329 else:
1347 else:
1330 bases, heads = update, self.changelog.heads()
1348 bases, heads = update, self.changelog.heads()
1331
1349
1332 if not bases:
1350 if not bases:
1333 self.ui.status(_("no changes found\n"))
1351 self.ui.status(_("no changes found\n"))
1334 return None, 1
1352 return None, 1
1335 elif not force:
1353 elif not force:
1336 # check if we're creating new remote heads
1354 # check if we're creating new remote heads
1337 # to be a remote head after push, node must be either
1355 # to be a remote head after push, node must be either
1338 # - unknown locally
1356 # - unknown locally
1339 # - a local outgoing head descended from update
1357 # - a local outgoing head descended from update
1340 # - a remote head that's known locally and not
1358 # - a remote head that's known locally and not
1341 # ancestral to an outgoing head
1359 # ancestral to an outgoing head
1342
1360
1343 warn = 0
1361 warn = 0
1344
1362
1345 if remote_heads == [nullid]:
1363 if remote_heads == [nullid]:
1346 warn = 0
1364 warn = 0
1347 elif not revs and len(heads) > len(remote_heads):
1365 elif not revs and len(heads) > len(remote_heads):
1348 warn = 1
1366 warn = 1
1349 else:
1367 else:
1350 newheads = list(heads)
1368 newheads = list(heads)
1351 for r in remote_heads:
1369 for r in remote_heads:
1352 if r in self.changelog.nodemap:
1370 if r in self.changelog.nodemap:
1353 desc = self.changelog.heads(r)
1371 desc = self.changelog.heads(r)
1354 l = [h for h in heads if h in desc]
1372 l = [h for h in heads if h in desc]
1355 if not l:
1373 if not l:
1356 newheads.append(r)
1374 newheads.append(r)
1357 else:
1375 else:
1358 newheads.append(r)
1376 newheads.append(r)
1359 if len(newheads) > len(remote_heads):
1377 if len(newheads) > len(remote_heads):
1360 warn = 1
1378 warn = 1
1361
1379
1362 if warn:
1380 if warn:
1363 self.ui.warn(_("abort: push creates new remote branches!\n"))
1381 self.ui.warn(_("abort: push creates new remote branches!\n"))
1364 self.ui.status(_("(did you forget to merge?"
1382 self.ui.status(_("(did you forget to merge?"
1365 " use push -f to force)\n"))
1383 " use push -f to force)\n"))
1366 return None, 1
1384 return None, 1
1367 elif inc:
1385 elif inc:
1368 self.ui.warn(_("note: unsynced remote changes!\n"))
1386 self.ui.warn(_("note: unsynced remote changes!\n"))
1369
1387
1370
1388
1371 if revs is None:
1389 if revs is None:
1372 cg = self.changegroup(update, 'push')
1390 cg = self.changegroup(update, 'push')
1373 else:
1391 else:
1374 cg = self.changegroupsubset(update, revs, 'push')
1392 cg = self.changegroupsubset(update, revs, 'push')
1375 return cg, remote_heads
1393 return cg, remote_heads
1376
1394
1377 def push_addchangegroup(self, remote, force, revs):
1395 def push_addchangegroup(self, remote, force, revs):
1378 lock = remote.lock()
1396 lock = remote.lock()
1379
1397
1380 ret = self.prepush(remote, force, revs)
1398 ret = self.prepush(remote, force, revs)
1381 if ret[0] is not None:
1399 if ret[0] is not None:
1382 cg, remote_heads = ret
1400 cg, remote_heads = ret
1383 return remote.addchangegroup(cg, 'push', self.url())
1401 return remote.addchangegroup(cg, 'push', self.url())
1384 return ret[1]
1402 return ret[1]
1385
1403
1386 def push_unbundle(self, remote, force, revs):
1404 def push_unbundle(self, remote, force, revs):
1387 # local repo finds heads on server, finds out what revs it
1405 # local repo finds heads on server, finds out what revs it
1388 # must push. once revs transferred, if server finds it has
1406 # must push. once revs transferred, if server finds it has
1389 # different heads (someone else won commit/push race), server
1407 # different heads (someone else won commit/push race), server
1390 # aborts.
1408 # aborts.
1391
1409
1392 ret = self.prepush(remote, force, revs)
1410 ret = self.prepush(remote, force, revs)
1393 if ret[0] is not None:
1411 if ret[0] is not None:
1394 cg, remote_heads = ret
1412 cg, remote_heads = ret
1395 if force: remote_heads = ['force']
1413 if force: remote_heads = ['force']
1396 return remote.unbundle(cg, remote_heads, 'push')
1414 return remote.unbundle(cg, remote_heads, 'push')
1397 return ret[1]
1415 return ret[1]
1398
1416
1399 def changegroupinfo(self, nodes):
1417 def changegroupinfo(self, nodes):
1400 self.ui.note(_("%d changesets found\n") % len(nodes))
1418 self.ui.note(_("%d changesets found\n") % len(nodes))
1401 if self.ui.debugflag:
1419 if self.ui.debugflag:
1402 self.ui.debug(_("List of changesets:\n"))
1420 self.ui.debug(_("List of changesets:\n"))
1403 for node in nodes:
1421 for node in nodes:
1404 self.ui.debug("%s\n" % hex(node))
1422 self.ui.debug("%s\n" % hex(node))
1405
1423
1406 def changegroupsubset(self, bases, heads, source):
1424 def changegroupsubset(self, bases, heads, source):
1407 """This function generates a changegroup consisting of all the nodes
1425 """This function generates a changegroup consisting of all the nodes
1408 that are descendents of any of the bases, and ancestors of any of
1426 that are descendents of any of the bases, and ancestors of any of
1409 the heads.
1427 the heads.
1410
1428
1411 It is fairly complex as determining which filenodes and which
1429 It is fairly complex as determining which filenodes and which
1412 manifest nodes need to be included for the changeset to be complete
1430 manifest nodes need to be included for the changeset to be complete
1413 is non-trivial.
1431 is non-trivial.
1414
1432
1415 Another wrinkle is doing the reverse, figuring out which changeset in
1433 Another wrinkle is doing the reverse, figuring out which changeset in
1416 the changegroup a particular filenode or manifestnode belongs to."""
1434 the changegroup a particular filenode or manifestnode belongs to."""
1417
1435
1418 self.hook('preoutgoing', throw=True, source=source)
1436 self.hook('preoutgoing', throw=True, source=source)
1419
1437
1420 # Set up some initial variables
1438 # Set up some initial variables
1421 # Make it easy to refer to self.changelog
1439 # Make it easy to refer to self.changelog
1422 cl = self.changelog
1440 cl = self.changelog
1423 # msng is short for missing - compute the list of changesets in this
1441 # msng is short for missing - compute the list of changesets in this
1424 # changegroup.
1442 # changegroup.
1425 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1443 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1426 self.changegroupinfo(msng_cl_lst)
1444 self.changegroupinfo(msng_cl_lst)
1427 # Some bases may turn out to be superfluous, and some heads may be
1445 # Some bases may turn out to be superfluous, and some heads may be
1428 # too. nodesbetween will return the minimal set of bases and heads
1446 # too. nodesbetween will return the minimal set of bases and heads
1429 # necessary to re-create the changegroup.
1447 # necessary to re-create the changegroup.
1430
1448
1431 # Known heads are the list of heads that it is assumed the recipient
1449 # Known heads are the list of heads that it is assumed the recipient
1432 # of this changegroup will know about.
1450 # of this changegroup will know about.
1433 knownheads = {}
1451 knownheads = {}
1434 # We assume that all parents of bases are known heads.
1452 # We assume that all parents of bases are known heads.
1435 for n in bases:
1453 for n in bases:
1436 for p in cl.parents(n):
1454 for p in cl.parents(n):
1437 if p != nullid:
1455 if p != nullid:
1438 knownheads[p] = 1
1456 knownheads[p] = 1
1439 knownheads = knownheads.keys()
1457 knownheads = knownheads.keys()
1440 if knownheads:
1458 if knownheads:
1441 # Now that we know what heads are known, we can compute which
1459 # Now that we know what heads are known, we can compute which
1442 # changesets are known. The recipient must know about all
1460 # changesets are known. The recipient must know about all
1443 # changesets required to reach the known heads from the null
1461 # changesets required to reach the known heads from the null
1444 # changeset.
1462 # changeset.
1445 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1463 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1446 junk = None
1464 junk = None
1447 # Transform the list into an ersatz set.
1465 # Transform the list into an ersatz set.
1448 has_cl_set = dict.fromkeys(has_cl_set)
1466 has_cl_set = dict.fromkeys(has_cl_set)
1449 else:
1467 else:
1450 # If there were no known heads, the recipient cannot be assumed to
1468 # If there were no known heads, the recipient cannot be assumed to
1451 # know about any changesets.
1469 # know about any changesets.
1452 has_cl_set = {}
1470 has_cl_set = {}
1453
1471
1454 # Make it easy to refer to self.manifest
1472 # Make it easy to refer to self.manifest
1455 mnfst = self.manifest
1473 mnfst = self.manifest
1456 # We don't know which manifests are missing yet
1474 # We don't know which manifests are missing yet
1457 msng_mnfst_set = {}
1475 msng_mnfst_set = {}
1458 # Nor do we know which filenodes are missing.
1476 # Nor do we know which filenodes are missing.
1459 msng_filenode_set = {}
1477 msng_filenode_set = {}
1460
1478
1461 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1479 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1462 junk = None
1480 junk = None
1463
1481
1464 # A changeset always belongs to itself, so the changenode lookup
1482 # A changeset always belongs to itself, so the changenode lookup
1465 # function for a changenode is identity.
1483 # function for a changenode is identity.
1466 def identity(x):
1484 def identity(x):
1467 return x
1485 return x
1468
1486
1469 # A function generating function. Sets up an environment for the
1487 # A function generating function. Sets up an environment for the
1470 # inner function.
1488 # inner function.
1471 def cmp_by_rev_func(revlog):
1489 def cmp_by_rev_func(revlog):
1472 # Compare two nodes by their revision number in the environment's
1490 # Compare two nodes by their revision number in the environment's
1473 # revision history. Since the revision number both represents the
1491 # revision history. Since the revision number both represents the
1474 # most efficient order to read the nodes in, and represents a
1492 # most efficient order to read the nodes in, and represents a
1475 # topological sorting of the nodes, this function is often useful.
1493 # topological sorting of the nodes, this function is often useful.
1476 def cmp_by_rev(a, b):
1494 def cmp_by_rev(a, b):
1477 return cmp(revlog.rev(a), revlog.rev(b))
1495 return cmp(revlog.rev(a), revlog.rev(b))
1478 return cmp_by_rev
1496 return cmp_by_rev
1479
1497
1480 # If we determine that a particular file or manifest node must be a
1498 # If we determine that a particular file or manifest node must be a
1481 # node that the recipient of the changegroup will already have, we can
1499 # node that the recipient of the changegroup will already have, we can
1482 # also assume the recipient will have all the parents. This function
1500 # also assume the recipient will have all the parents. This function
1483 # prunes them from the set of missing nodes.
1501 # prunes them from the set of missing nodes.
1484 def prune_parents(revlog, hasset, msngset):
1502 def prune_parents(revlog, hasset, msngset):
1485 haslst = hasset.keys()
1503 haslst = hasset.keys()
1486 haslst.sort(cmp_by_rev_func(revlog))
1504 haslst.sort(cmp_by_rev_func(revlog))
1487 for node in haslst:
1505 for node in haslst:
1488 parentlst = [p for p in revlog.parents(node) if p != nullid]
1506 parentlst = [p for p in revlog.parents(node) if p != nullid]
1489 while parentlst:
1507 while parentlst:
1490 n = parentlst.pop()
1508 n = parentlst.pop()
1491 if n not in hasset:
1509 if n not in hasset:
1492 hasset[n] = 1
1510 hasset[n] = 1
1493 p = [p for p in revlog.parents(n) if p != nullid]
1511 p = [p for p in revlog.parents(n) if p != nullid]
1494 parentlst.extend(p)
1512 parentlst.extend(p)
1495 for n in hasset:
1513 for n in hasset:
1496 msngset.pop(n, None)
1514 msngset.pop(n, None)
1497
1515
1498 # This is a function generating function used to set up an environment
1516 # This is a function generating function used to set up an environment
1499 # for the inner function to execute in.
1517 # for the inner function to execute in.
1500 def manifest_and_file_collector(changedfileset):
1518 def manifest_and_file_collector(changedfileset):
1501 # This is an information gathering function that gathers
1519 # This is an information gathering function that gathers
1502 # information from each changeset node that goes out as part of
1520 # information from each changeset node that goes out as part of
1503 # the changegroup. The information gathered is a list of which
1521 # the changegroup. The information gathered is a list of which
1504 # manifest nodes are potentially required (the recipient may
1522 # manifest nodes are potentially required (the recipient may
1505 # already have them) and total list of all files which were
1523 # already have them) and total list of all files which were
1506 # changed in any changeset in the changegroup.
1524 # changed in any changeset in the changegroup.
1507 #
1525 #
1508 # We also remember the first changenode we saw any manifest
1526 # We also remember the first changenode we saw any manifest
1509 # referenced by so we can later determine which changenode 'owns'
1527 # referenced by so we can later determine which changenode 'owns'
1510 # the manifest.
1528 # the manifest.
1511 def collect_manifests_and_files(clnode):
1529 def collect_manifests_and_files(clnode):
1512 c = cl.read(clnode)
1530 c = cl.read(clnode)
1513 for f in c[3]:
1531 for f in c[3]:
1514 # This is to make sure we only have one instance of each
1532 # This is to make sure we only have one instance of each
1515 # filename string for each filename.
1533 # filename string for each filename.
1516 changedfileset.setdefault(f, f)
1534 changedfileset.setdefault(f, f)
1517 msng_mnfst_set.setdefault(c[0], clnode)
1535 msng_mnfst_set.setdefault(c[0], clnode)
1518 return collect_manifests_and_files
1536 return collect_manifests_and_files
1519
1537
1520 # Figure out which manifest nodes (of the ones we think might be part
1538 # Figure out which manifest nodes (of the ones we think might be part
1521 # of the changegroup) the recipient must know about and remove them
1539 # of the changegroup) the recipient must know about and remove them
1522 # from the changegroup.
1540 # from the changegroup.
1523 def prune_manifests():
1541 def prune_manifests():
1524 has_mnfst_set = {}
1542 has_mnfst_set = {}
1525 for n in msng_mnfst_set:
1543 for n in msng_mnfst_set:
1526 # If a 'missing' manifest thinks it belongs to a changenode
1544 # If a 'missing' manifest thinks it belongs to a changenode
1527 # the recipient is assumed to have, obviously the recipient
1545 # the recipient is assumed to have, obviously the recipient
1528 # must have that manifest.
1546 # must have that manifest.
1529 linknode = cl.node(mnfst.linkrev(n))
1547 linknode = cl.node(mnfst.linkrev(n))
1530 if linknode in has_cl_set:
1548 if linknode in has_cl_set:
1531 has_mnfst_set[n] = 1
1549 has_mnfst_set[n] = 1
1532 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1550 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1533
1551
1534 # Use the information collected in collect_manifests_and_files to say
1552 # Use the information collected in collect_manifests_and_files to say
1535 # which changenode any manifestnode belongs to.
1553 # which changenode any manifestnode belongs to.
1536 def lookup_manifest_link(mnfstnode):
1554 def lookup_manifest_link(mnfstnode):
1537 return msng_mnfst_set[mnfstnode]
1555 return msng_mnfst_set[mnfstnode]
1538
1556
1539 # A function generating function that sets up the initial environment
1557 # A function generating function that sets up the initial environment
1540 # the inner function.
1558 # the inner function.
1541 def filenode_collector(changedfiles):
1559 def filenode_collector(changedfiles):
1542 next_rev = [0]
1560 next_rev = [0]
1543 # This gathers information from each manifestnode included in the
1561 # This gathers information from each manifestnode included in the
1544 # changegroup about which filenodes the manifest node references
1562 # changegroup about which filenodes the manifest node references
1545 # so we can include those in the changegroup too.
1563 # so we can include those in the changegroup too.
1546 #
1564 #
1547 # It also remembers which changenode each filenode belongs to. It
1565 # It also remembers which changenode each filenode belongs to. It
1548 # does this by assuming the a filenode belongs to the changenode
1566 # does this by assuming the a filenode belongs to the changenode
1549 # the first manifest that references it belongs to.
1567 # the first manifest that references it belongs to.
1550 def collect_msng_filenodes(mnfstnode):
1568 def collect_msng_filenodes(mnfstnode):
1551 r = mnfst.rev(mnfstnode)
1569 r = mnfst.rev(mnfstnode)
1552 if r == next_rev[0]:
1570 if r == next_rev[0]:
1553 # If the last rev we looked at was the one just previous,
1571 # If the last rev we looked at was the one just previous,
1554 # we only need to see a diff.
1572 # we only need to see a diff.
1555 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1573 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1556 # For each line in the delta
1574 # For each line in the delta
1557 for dline in delta.splitlines():
1575 for dline in delta.splitlines():
1558 # get the filename and filenode for that line
1576 # get the filename and filenode for that line
1559 f, fnode = dline.split('\0')
1577 f, fnode = dline.split('\0')
1560 fnode = bin(fnode[:40])
1578 fnode = bin(fnode[:40])
1561 f = changedfiles.get(f, None)
1579 f = changedfiles.get(f, None)
1562 # And if the file is in the list of files we care
1580 # And if the file is in the list of files we care
1563 # about.
1581 # about.
1564 if f is not None:
1582 if f is not None:
1565 # Get the changenode this manifest belongs to
1583 # Get the changenode this manifest belongs to
1566 clnode = msng_mnfst_set[mnfstnode]
1584 clnode = msng_mnfst_set[mnfstnode]
1567 # Create the set of filenodes for the file if
1585 # Create the set of filenodes for the file if
1568 # there isn't one already.
1586 # there isn't one already.
1569 ndset = msng_filenode_set.setdefault(f, {})
1587 ndset = msng_filenode_set.setdefault(f, {})
1570 # And set the filenode's changelog node to the
1588 # And set the filenode's changelog node to the
1571 # manifest's if it hasn't been set already.
1589 # manifest's if it hasn't been set already.
1572 ndset.setdefault(fnode, clnode)
1590 ndset.setdefault(fnode, clnode)
1573 else:
1591 else:
1574 # Otherwise we need a full manifest.
1592 # Otherwise we need a full manifest.
1575 m = mnfst.read(mnfstnode)
1593 m = mnfst.read(mnfstnode)
1576 # For every file in we care about.
1594 # For every file in we care about.
1577 for f in changedfiles:
1595 for f in changedfiles:
1578 fnode = m.get(f, None)
1596 fnode = m.get(f, None)
1579 # If it's in the manifest
1597 # If it's in the manifest
1580 if fnode is not None:
1598 if fnode is not None:
1581 # See comments above.
1599 # See comments above.
1582 clnode = msng_mnfst_set[mnfstnode]
1600 clnode = msng_mnfst_set[mnfstnode]
1583 ndset = msng_filenode_set.setdefault(f, {})
1601 ndset = msng_filenode_set.setdefault(f, {})
1584 ndset.setdefault(fnode, clnode)
1602 ndset.setdefault(fnode, clnode)
1585 # Remember the revision we hope to see next.
1603 # Remember the revision we hope to see next.
1586 next_rev[0] = r + 1
1604 next_rev[0] = r + 1
1587 return collect_msng_filenodes
1605 return collect_msng_filenodes
1588
1606
1589 # We have a list of filenodes we think we need for a file, lets remove
1607 # We have a list of filenodes we think we need for a file, lets remove
1590 # all those we now the recipient must have.
1608 # all those we now the recipient must have.
1591 def prune_filenodes(f, filerevlog):
1609 def prune_filenodes(f, filerevlog):
1592 msngset = msng_filenode_set[f]
1610 msngset = msng_filenode_set[f]
1593 hasset = {}
1611 hasset = {}
1594 # If a 'missing' filenode thinks it belongs to a changenode we
1612 # If a 'missing' filenode thinks it belongs to a changenode we
1595 # assume the recipient must have, then the recipient must have
1613 # assume the recipient must have, then the recipient must have
1596 # that filenode.
1614 # that filenode.
1597 for n in msngset:
1615 for n in msngset:
1598 clnode = cl.node(filerevlog.linkrev(n))
1616 clnode = cl.node(filerevlog.linkrev(n))
1599 if clnode in has_cl_set:
1617 if clnode in has_cl_set:
1600 hasset[n] = 1
1618 hasset[n] = 1
1601 prune_parents(filerevlog, hasset, msngset)
1619 prune_parents(filerevlog, hasset, msngset)
1602
1620
1603 # A function generator function that sets up the a context for the
1621 # A function generator function that sets up the a context for the
1604 # inner function.
1622 # inner function.
1605 def lookup_filenode_link_func(fname):
1623 def lookup_filenode_link_func(fname):
1606 msngset = msng_filenode_set[fname]
1624 msngset = msng_filenode_set[fname]
1607 # Lookup the changenode the filenode belongs to.
1625 # Lookup the changenode the filenode belongs to.
1608 def lookup_filenode_link(fnode):
1626 def lookup_filenode_link(fnode):
1609 return msngset[fnode]
1627 return msngset[fnode]
1610 return lookup_filenode_link
1628 return lookup_filenode_link
1611
1629
1612 # Now that we have all theses utility functions to help out and
1630 # Now that we have all theses utility functions to help out and
1613 # logically divide up the task, generate the group.
1631 # logically divide up the task, generate the group.
1614 def gengroup():
1632 def gengroup():
1615 # The set of changed files starts empty.
1633 # The set of changed files starts empty.
1616 changedfiles = {}
1634 changedfiles = {}
1617 # Create a changenode group generator that will call our functions
1635 # Create a changenode group generator that will call our functions
1618 # back to lookup the owning changenode and collect information.
1636 # back to lookup the owning changenode and collect information.
1619 group = cl.group(msng_cl_lst, identity,
1637 group = cl.group(msng_cl_lst, identity,
1620 manifest_and_file_collector(changedfiles))
1638 manifest_and_file_collector(changedfiles))
1621 for chnk in group:
1639 for chnk in group:
1622 yield chnk
1640 yield chnk
1623
1641
1624 # The list of manifests has been collected by the generator
1642 # The list of manifests has been collected by the generator
1625 # calling our functions back.
1643 # calling our functions back.
1626 prune_manifests()
1644 prune_manifests()
1627 msng_mnfst_lst = msng_mnfst_set.keys()
1645 msng_mnfst_lst = msng_mnfst_set.keys()
1628 # Sort the manifestnodes by revision number.
1646 # Sort the manifestnodes by revision number.
1629 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1647 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1630 # Create a generator for the manifestnodes that calls our lookup
1648 # Create a generator for the manifestnodes that calls our lookup
1631 # and data collection functions back.
1649 # and data collection functions back.
1632 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1650 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1633 filenode_collector(changedfiles))
1651 filenode_collector(changedfiles))
1634 for chnk in group:
1652 for chnk in group:
1635 yield chnk
1653 yield chnk
1636
1654
1637 # These are no longer needed, dereference and toss the memory for
1655 # These are no longer needed, dereference and toss the memory for
1638 # them.
1656 # them.
1639 msng_mnfst_lst = None
1657 msng_mnfst_lst = None
1640 msng_mnfst_set.clear()
1658 msng_mnfst_set.clear()
1641
1659
1642 changedfiles = changedfiles.keys()
1660 changedfiles = changedfiles.keys()
1643 changedfiles.sort()
1661 changedfiles.sort()
1644 # Go through all our files in order sorted by name.
1662 # Go through all our files in order sorted by name.
1645 for fname in changedfiles:
1663 for fname in changedfiles:
1646 filerevlog = self.file(fname)
1664 filerevlog = self.file(fname)
1647 # Toss out the filenodes that the recipient isn't really
1665 # Toss out the filenodes that the recipient isn't really
1648 # missing.
1666 # missing.
1649 if msng_filenode_set.has_key(fname):
1667 if msng_filenode_set.has_key(fname):
1650 prune_filenodes(fname, filerevlog)
1668 prune_filenodes(fname, filerevlog)
1651 msng_filenode_lst = msng_filenode_set[fname].keys()
1669 msng_filenode_lst = msng_filenode_set[fname].keys()
1652 else:
1670 else:
1653 msng_filenode_lst = []
1671 msng_filenode_lst = []
1654 # If any filenodes are left, generate the group for them,
1672 # If any filenodes are left, generate the group for them,
1655 # otherwise don't bother.
1673 # otherwise don't bother.
1656 if len(msng_filenode_lst) > 0:
1674 if len(msng_filenode_lst) > 0:
1657 yield changegroup.genchunk(fname)
1675 yield changegroup.genchunk(fname)
1658 # Sort the filenodes by their revision #
1676 # Sort the filenodes by their revision #
1659 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1677 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1660 # Create a group generator and only pass in a changenode
1678 # Create a group generator and only pass in a changenode
1661 # lookup function as we need to collect no information
1679 # lookup function as we need to collect no information
1662 # from filenodes.
1680 # from filenodes.
1663 group = filerevlog.group(msng_filenode_lst,
1681 group = filerevlog.group(msng_filenode_lst,
1664 lookup_filenode_link_func(fname))
1682 lookup_filenode_link_func(fname))
1665 for chnk in group:
1683 for chnk in group:
1666 yield chnk
1684 yield chnk
1667 if msng_filenode_set.has_key(fname):
1685 if msng_filenode_set.has_key(fname):
1668 # Don't need this anymore, toss it to free memory.
1686 # Don't need this anymore, toss it to free memory.
1669 del msng_filenode_set[fname]
1687 del msng_filenode_set[fname]
1670 # Signal that no more groups are left.
1688 # Signal that no more groups are left.
1671 yield changegroup.closechunk()
1689 yield changegroup.closechunk()
1672
1690
1673 if msng_cl_lst:
1691 if msng_cl_lst:
1674 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1692 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1675
1693
1676 return util.chunkbuffer(gengroup())
1694 return util.chunkbuffer(gengroup())
1677
1695
1678 def changegroup(self, basenodes, source):
1696 def changegroup(self, basenodes, source):
1679 """Generate a changegroup of all nodes that we have that a recipient
1697 """Generate a changegroup of all nodes that we have that a recipient
1680 doesn't.
1698 doesn't.
1681
1699
1682 This is much easier than the previous function as we can assume that
1700 This is much easier than the previous function as we can assume that
1683 the recipient has any changenode we aren't sending them."""
1701 the recipient has any changenode we aren't sending them."""
1684
1702
1685 self.hook('preoutgoing', throw=True, source=source)
1703 self.hook('preoutgoing', throw=True, source=source)
1686
1704
1687 cl = self.changelog
1705 cl = self.changelog
1688 nodes = cl.nodesbetween(basenodes, None)[0]
1706 nodes = cl.nodesbetween(basenodes, None)[0]
1689 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1707 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1690 self.changegroupinfo(nodes)
1708 self.changegroupinfo(nodes)
1691
1709
1692 def identity(x):
1710 def identity(x):
1693 return x
1711 return x
1694
1712
1695 def gennodelst(revlog):
1713 def gennodelst(revlog):
1696 for r in xrange(0, revlog.count()):
1714 for r in xrange(0, revlog.count()):
1697 n = revlog.node(r)
1715 n = revlog.node(r)
1698 if revlog.linkrev(n) in revset:
1716 if revlog.linkrev(n) in revset:
1699 yield n
1717 yield n
1700
1718
1701 def changed_file_collector(changedfileset):
1719 def changed_file_collector(changedfileset):
1702 def collect_changed_files(clnode):
1720 def collect_changed_files(clnode):
1703 c = cl.read(clnode)
1721 c = cl.read(clnode)
1704 for fname in c[3]:
1722 for fname in c[3]:
1705 changedfileset[fname] = 1
1723 changedfileset[fname] = 1
1706 return collect_changed_files
1724 return collect_changed_files
1707
1725
1708 def lookuprevlink_func(revlog):
1726 def lookuprevlink_func(revlog):
1709 def lookuprevlink(n):
1727 def lookuprevlink(n):
1710 return cl.node(revlog.linkrev(n))
1728 return cl.node(revlog.linkrev(n))
1711 return lookuprevlink
1729 return lookuprevlink
1712
1730
1713 def gengroup():
1731 def gengroup():
1714 # construct a list of all changed files
1732 # construct a list of all changed files
1715 changedfiles = {}
1733 changedfiles = {}
1716
1734
1717 for chnk in cl.group(nodes, identity,
1735 for chnk in cl.group(nodes, identity,
1718 changed_file_collector(changedfiles)):
1736 changed_file_collector(changedfiles)):
1719 yield chnk
1737 yield chnk
1720 changedfiles = changedfiles.keys()
1738 changedfiles = changedfiles.keys()
1721 changedfiles.sort()
1739 changedfiles.sort()
1722
1740
1723 mnfst = self.manifest
1741 mnfst = self.manifest
1724 nodeiter = gennodelst(mnfst)
1742 nodeiter = gennodelst(mnfst)
1725 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1743 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1726 yield chnk
1744 yield chnk
1727
1745
1728 for fname in changedfiles:
1746 for fname in changedfiles:
1729 filerevlog = self.file(fname)
1747 filerevlog = self.file(fname)
1730 nodeiter = gennodelst(filerevlog)
1748 nodeiter = gennodelst(filerevlog)
1731 nodeiter = list(nodeiter)
1749 nodeiter = list(nodeiter)
1732 if nodeiter:
1750 if nodeiter:
1733 yield changegroup.genchunk(fname)
1751 yield changegroup.genchunk(fname)
1734 lookup = lookuprevlink_func(filerevlog)
1752 lookup = lookuprevlink_func(filerevlog)
1735 for chnk in filerevlog.group(nodeiter, lookup):
1753 for chnk in filerevlog.group(nodeiter, lookup):
1736 yield chnk
1754 yield chnk
1737
1755
1738 yield changegroup.closechunk()
1756 yield changegroup.closechunk()
1739
1757
1740 if nodes:
1758 if nodes:
1741 self.hook('outgoing', node=hex(nodes[0]), source=source)
1759 self.hook('outgoing', node=hex(nodes[0]), source=source)
1742
1760
1743 return util.chunkbuffer(gengroup())
1761 return util.chunkbuffer(gengroup())
1744
1762
1745 def addchangegroup(self, source, srctype, url):
1763 def addchangegroup(self, source, srctype, url):
1746 """add changegroup to repo.
1764 """add changegroup to repo.
1747
1765
1748 return values:
1766 return values:
1749 - nothing changed or no source: 0
1767 - nothing changed or no source: 0
1750 - more heads than before: 1+added heads (2..n)
1768 - more heads than before: 1+added heads (2..n)
1751 - less heads than before: -1-removed heads (-2..-n)
1769 - less heads than before: -1-removed heads (-2..-n)
1752 - number of heads stays the same: 1
1770 - number of heads stays the same: 1
1753 """
1771 """
1754 def csmap(x):
1772 def csmap(x):
1755 self.ui.debug(_("add changeset %s\n") % short(x))
1773 self.ui.debug(_("add changeset %s\n") % short(x))
1756 return cl.count()
1774 return cl.count()
1757
1775
1758 def revmap(x):
1776 def revmap(x):
1759 return cl.rev(x)
1777 return cl.rev(x)
1760
1778
1761 if not source:
1779 if not source:
1762 return 0
1780 return 0
1763
1781
1764 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1782 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1765
1783
1766 changesets = files = revisions = 0
1784 changesets = files = revisions = 0
1767
1785
1768 tr = self.transaction()
1786 tr = self.transaction()
1769
1787
1770 # write changelog data to temp files so concurrent readers will not see
1788 # write changelog data to temp files so concurrent readers will not see
1771 # inconsistent view
1789 # inconsistent view
1772 cl = None
1790 cl = None
1773 try:
1791 try:
1774 cl = appendfile.appendchangelog(self.sopener,
1792 cl = appendfile.appendchangelog(self.sopener,
1775 self.changelog.version)
1793 self.changelog.version)
1776
1794
1777 oldheads = len(cl.heads())
1795 oldheads = len(cl.heads())
1778
1796
1779 # pull off the changeset group
1797 # pull off the changeset group
1780 self.ui.status(_("adding changesets\n"))
1798 self.ui.status(_("adding changesets\n"))
1781 cor = cl.count() - 1
1799 cor = cl.count() - 1
1782 chunkiter = changegroup.chunkiter(source)
1800 chunkiter = changegroup.chunkiter(source)
1783 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1801 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1784 raise util.Abort(_("received changelog group is empty"))
1802 raise util.Abort(_("received changelog group is empty"))
1785 cnr = cl.count() - 1
1803 cnr = cl.count() - 1
1786 changesets = cnr - cor
1804 changesets = cnr - cor
1787
1805
1788 # pull off the manifest group
1806 # pull off the manifest group
1789 self.ui.status(_("adding manifests\n"))
1807 self.ui.status(_("adding manifests\n"))
1790 chunkiter = changegroup.chunkiter(source)
1808 chunkiter = changegroup.chunkiter(source)
1791 # no need to check for empty manifest group here:
1809 # no need to check for empty manifest group here:
1792 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1810 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1793 # no new manifest will be created and the manifest group will
1811 # no new manifest will be created and the manifest group will
1794 # be empty during the pull
1812 # be empty during the pull
1795 self.manifest.addgroup(chunkiter, revmap, tr)
1813 self.manifest.addgroup(chunkiter, revmap, tr)
1796
1814
1797 # process the files
1815 # process the files
1798 self.ui.status(_("adding file changes\n"))
1816 self.ui.status(_("adding file changes\n"))
1799 while 1:
1817 while 1:
1800 f = changegroup.getchunk(source)
1818 f = changegroup.getchunk(source)
1801 if not f:
1819 if not f:
1802 break
1820 break
1803 self.ui.debug(_("adding %s revisions\n") % f)
1821 self.ui.debug(_("adding %s revisions\n") % f)
1804 fl = self.file(f)
1822 fl = self.file(f)
1805 o = fl.count()
1823 o = fl.count()
1806 chunkiter = changegroup.chunkiter(source)
1824 chunkiter = changegroup.chunkiter(source)
1807 if fl.addgroup(chunkiter, revmap, tr) is None:
1825 if fl.addgroup(chunkiter, revmap, tr) is None:
1808 raise util.Abort(_("received file revlog group is empty"))
1826 raise util.Abort(_("received file revlog group is empty"))
1809 revisions += fl.count() - o
1827 revisions += fl.count() - o
1810 files += 1
1828 files += 1
1811
1829
1812 cl.writedata()
1830 cl.writedata()
1813 finally:
1831 finally:
1814 if cl:
1832 if cl:
1815 cl.cleanup()
1833 cl.cleanup()
1816
1834
1817 # make changelog see real files again
1835 # make changelog see real files again
1818 self.changelog = changelog.changelog(self.sopener,
1836 self.changelog = changelog.changelog(self.sopener,
1819 self.changelog.version)
1837 self.changelog.version)
1820 self.changelog.checkinlinesize(tr)
1838 self.changelog.checkinlinesize(tr)
1821
1839
1822 newheads = len(self.changelog.heads())
1840 newheads = len(self.changelog.heads())
1823 heads = ""
1841 heads = ""
1824 if oldheads and newheads != oldheads:
1842 if oldheads and newheads != oldheads:
1825 heads = _(" (%+d heads)") % (newheads - oldheads)
1843 heads = _(" (%+d heads)") % (newheads - oldheads)
1826
1844
1827 self.ui.status(_("added %d changesets"
1845 self.ui.status(_("added %d changesets"
1828 " with %d changes to %d files%s\n")
1846 " with %d changes to %d files%s\n")
1829 % (changesets, revisions, files, heads))
1847 % (changesets, revisions, files, heads))
1830
1848
1831 if changesets > 0:
1849 if changesets > 0:
1832 self.hook('pretxnchangegroup', throw=True,
1850 self.hook('pretxnchangegroup', throw=True,
1833 node=hex(self.changelog.node(cor+1)), source=srctype,
1851 node=hex(self.changelog.node(cor+1)), source=srctype,
1834 url=url)
1852 url=url)
1835
1853
1836 tr.close()
1854 tr.close()
1837
1855
1838 if changesets > 0:
1856 if changesets > 0:
1839 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1857 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1840 source=srctype, url=url)
1858 source=srctype, url=url)
1841
1859
1842 for i in xrange(cor + 1, cnr + 1):
1860 for i in xrange(cor + 1, cnr + 1):
1843 self.hook("incoming", node=hex(self.changelog.node(i)),
1861 self.hook("incoming", node=hex(self.changelog.node(i)),
1844 source=srctype, url=url)
1862 source=srctype, url=url)
1845
1863
1846 # never return 0 here:
1864 # never return 0 here:
1847 if newheads < oldheads:
1865 if newheads < oldheads:
1848 return newheads - oldheads - 1
1866 return newheads - oldheads - 1
1849 else:
1867 else:
1850 return newheads - oldheads + 1
1868 return newheads - oldheads + 1
1851
1869
1852
1870
1853 def stream_in(self, remote):
1871 def stream_in(self, remote):
1854 fp = remote.stream_out()
1872 fp = remote.stream_out()
1855 l = fp.readline()
1873 l = fp.readline()
1856 try:
1874 try:
1857 resp = int(l)
1875 resp = int(l)
1858 except ValueError:
1876 except ValueError:
1859 raise util.UnexpectedOutput(
1877 raise util.UnexpectedOutput(
1860 _('Unexpected response from remote server:'), l)
1878 _('Unexpected response from remote server:'), l)
1861 if resp == 1:
1879 if resp == 1:
1862 raise util.Abort(_('operation forbidden by server'))
1880 raise util.Abort(_('operation forbidden by server'))
1863 elif resp == 2:
1881 elif resp == 2:
1864 raise util.Abort(_('locking the remote repository failed'))
1882 raise util.Abort(_('locking the remote repository failed'))
1865 elif resp != 0:
1883 elif resp != 0:
1866 raise util.Abort(_('the server sent an unknown error code'))
1884 raise util.Abort(_('the server sent an unknown error code'))
1867 self.ui.status(_('streaming all changes\n'))
1885 self.ui.status(_('streaming all changes\n'))
1868 l = fp.readline()
1886 l = fp.readline()
1869 try:
1887 try:
1870 total_files, total_bytes = map(int, l.split(' ', 1))
1888 total_files, total_bytes = map(int, l.split(' ', 1))
1871 except ValueError, TypeError:
1889 except ValueError, TypeError:
1872 raise util.UnexpectedOutput(
1890 raise util.UnexpectedOutput(
1873 _('Unexpected response from remote server:'), l)
1891 _('Unexpected response from remote server:'), l)
1874 self.ui.status(_('%d files to transfer, %s of data\n') %
1892 self.ui.status(_('%d files to transfer, %s of data\n') %
1875 (total_files, util.bytecount(total_bytes)))
1893 (total_files, util.bytecount(total_bytes)))
1876 start = time.time()
1894 start = time.time()
1877 for i in xrange(total_files):
1895 for i in xrange(total_files):
1878 # XXX doesn't support '\n' or '\r' in filenames
1896 # XXX doesn't support '\n' or '\r' in filenames
1879 l = fp.readline()
1897 l = fp.readline()
1880 try:
1898 try:
1881 name, size = l.split('\0', 1)
1899 name, size = l.split('\0', 1)
1882 size = int(size)
1900 size = int(size)
1883 except ValueError, TypeError:
1901 except ValueError, TypeError:
1884 raise util.UnexpectedOutput(
1902 raise util.UnexpectedOutput(
1885 _('Unexpected response from remote server:'), l)
1903 _('Unexpected response from remote server:'), l)
1886 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1904 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1887 ofp = self.sopener(name, 'w')
1905 ofp = self.sopener(name, 'w')
1888 for chunk in util.filechunkiter(fp, limit=size):
1906 for chunk in util.filechunkiter(fp, limit=size):
1889 ofp.write(chunk)
1907 ofp.write(chunk)
1890 ofp.close()
1908 ofp.close()
1891 elapsed = time.time() - start
1909 elapsed = time.time() - start
1892 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1910 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1893 (util.bytecount(total_bytes), elapsed,
1911 (util.bytecount(total_bytes), elapsed,
1894 util.bytecount(total_bytes / elapsed)))
1912 util.bytecount(total_bytes / elapsed)))
1895 self.reload()
1913 self.reload()
1896 return len(self.heads()) + 1
1914 return len(self.heads()) + 1
1897
1915
1898 def clone(self, remote, heads=[], stream=False):
1916 def clone(self, remote, heads=[], stream=False):
1899 '''clone remote repository.
1917 '''clone remote repository.
1900
1918
1901 keyword arguments:
1919 keyword arguments:
1902 heads: list of revs to clone (forces use of pull)
1920 heads: list of revs to clone (forces use of pull)
1903 stream: use streaming clone if possible'''
1921 stream: use streaming clone if possible'''
1904
1922
1905 # now, all clients that can request uncompressed clones can
1923 # now, all clients that can request uncompressed clones can
1906 # read repo formats supported by all servers that can serve
1924 # read repo formats supported by all servers that can serve
1907 # them.
1925 # them.
1908
1926
1909 # if revlog format changes, client will have to check version
1927 # if revlog format changes, client will have to check version
1910 # and format flags on "stream" capability, and use
1928 # and format flags on "stream" capability, and use
1911 # uncompressed only if compatible.
1929 # uncompressed only if compatible.
1912
1930
1913 if stream and not heads and remote.capable('stream'):
1931 if stream and not heads and remote.capable('stream'):
1914 return self.stream_in(remote)
1932 return self.stream_in(remote)
1915 return self.pull(remote, heads)
1933 return self.pull(remote, heads)
1916
1934
1917 # used to avoid circular references so destructors work
1935 # used to avoid circular references so destructors work
1918 def aftertrans(files):
1936 def aftertrans(files):
1919 renamefiles = [tuple(t) for t in files]
1937 renamefiles = [tuple(t) for t in files]
1920 def a():
1938 def a():
1921 for src, dest in renamefiles:
1939 for src, dest in renamefiles:
1922 util.rename(src, dest)
1940 util.rename(src, dest)
1923 return a
1941 return a
1924
1942
1925 def instance(ui, path, create):
1943 def instance(ui, path, create):
1926 return localrepository(ui, util.drop_scheme('file', path), create)
1944 return localrepository(ui, util.drop_scheme('file', path), create)
1927
1945
1928 def islocal(path):
1946 def islocal(path):
1929 return True
1947 return True
@@ -1,66 +1,79
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from demandload import *
10 from demandload import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 demandload(globals(), "changelog filelog httprangereader")
12 demandload(globals(), "changelog filelog httprangereader")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 raise IOError(None, inst)
20 raise IOError(None, inst)
21 except urllib2.URLError, inst:
21 except urllib2.URLError, inst:
22 raise IOError(None, inst.reason[1])
22 raise IOError(None, inst.reason[1])
23
23
24 def opener(base):
24 def opener(base):
25 """return a function that opens files over http"""
25 """return a function that opens files over http"""
26 p = base
26 p = base
27 def o(path, mode="r"):
27 def o(path, mode="r"):
28 f = "/".join((p, urllib.quote(path)))
28 f = "/".join((p, urllib.quote(path)))
29 return rangereader(f)
29 return rangereader(f)
30 return o
30 return o
31
31
32 class statichttprepository(localrepo.localrepository):
32 class statichttprepository(localrepo.localrepository):
33 def __init__(self, ui, path):
33 def __init__(self, ui, path):
34 self._url = path
34 self._url = path
35 self.path = (path + "/.hg")
35 self.path = (path + "/.hg")
36 self.spath = self.path
36 self.spath = self.path
37 self.ui = ui
37 self.ui = ui
38 self.revlogversion = 0
38 self.revlogversion = 0
39 self.opener = opener(self.path)
39 self.opener = opener(self.path)
40 # find requirements
41 try:
42 requirements = self.opener("requires").read().splitlines()
43 except IOError:
44 requirements = []
45 # check them
46 for r in requirements:
47 if r not in self.supported:
48 raise repo.RepoError(_("requirement '%s' not supported") % r)
49
50 # setup store
51 self.spath = self.path
40 self.sopener = opener(self.spath)
52 self.sopener = opener(self.spath)
53
41 self.manifest = manifest.manifest(self.sopener)
54 self.manifest = manifest.manifest(self.sopener)
42 self.changelog = changelog.changelog(self.sopener)
55 self.changelog = changelog.changelog(self.sopener)
43 self.tagscache = None
56 self.tagscache = None
44 self.nodetagscache = None
57 self.nodetagscache = None
45 self.encodepats = None
58 self.encodepats = None
46 self.decodepats = None
59 self.decodepats = None
47
60
48 def url(self):
61 def url(self):
49 return 'static-' + self._url
62 return 'static-' + self._url
50
63
51 def dev(self):
64 def dev(self):
52 return -1
65 return -1
53
66
54 def local(self):
67 def local(self):
55 return False
68 return False
56
69
57 def instance(ui, path, create):
70 def instance(ui, path, create):
58 if create:
71 if create:
59 raise util.Abort(_('cannot create new static-http repository'))
72 raise util.Abort(_('cannot create new static-http repository'))
60 if path.startswith('old-http:'):
73 if path.startswith('old-http:'):
61 ui.warn(_("old-http:// syntax is deprecated, "
74 ui.warn(_("old-http:// syntax is deprecated, "
62 "please use static-http:// instead\n"))
75 "please use static-http:// instead\n"))
63 path = path[4:]
76 path = path[4:]
64 else:
77 else:
65 path = path[7:]
78 path = path[7:]
66 return statichttprepository(ui, path)
79 return statichttprepository(ui, path)
@@ -1,8 +1,9
1 0
1 0
2 0
2 0
3 adding changesets
3 adding changesets
4 killed!
4 killed!
5 transaction abort!
5 transaction abort!
6 rollback completed
6 rollback completed
7 00changelog.i
7 00changelog.i
8 journal.dirstate
8 journal.dirstate
9 requires
General Comments 0
You need to be logged in to leave comments. Login now