##// END OF EJS Templates
don't create the .hg/data at init time
Benoit Boissinot -
r3713:8ae88ed2 default
parent child Browse files
Show More
@@ -1,257 +1,256
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from repo import *
10 from repo import *
11 from demandload import *
11 from demandload import *
12 from i18n import gettext as _
12 from i18n import gettext as _
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15
15
16 def _local(path):
16 def _local(path):
17 return (os.path.isfile(util.drop_scheme('file', path)) and
17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 bundlerepo or localrepo)
18 bundlerepo or localrepo)
19
19
20 schemes = {
20 schemes = {
21 'bundle': bundlerepo,
21 'bundle': bundlerepo,
22 'file': _local,
22 'file': _local,
23 'hg': httprepo,
23 'hg': httprepo,
24 'http': httprepo,
24 'http': httprepo,
25 'https': httprepo,
25 'https': httprepo,
26 'old-http': statichttprepo,
26 'old-http': statichttprepo,
27 'ssh': sshrepo,
27 'ssh': sshrepo,
28 'static-http': statichttprepo,
28 'static-http': statichttprepo,
29 }
29 }
30
30
31 def _lookup(path):
31 def _lookup(path):
32 scheme = 'file'
32 scheme = 'file'
33 if path:
33 if path:
34 c = path.find(':')
34 c = path.find(':')
35 if c > 0:
35 if c > 0:
36 scheme = path[:c]
36 scheme = path[:c]
37 thing = schemes.get(scheme) or schemes['file']
37 thing = schemes.get(scheme) or schemes['file']
38 try:
38 try:
39 return thing(path)
39 return thing(path)
40 except TypeError:
40 except TypeError:
41 return thing
41 return thing
42
42
43 def islocal(repo):
43 def islocal(repo):
44 '''return true if repo or path is local'''
44 '''return true if repo or path is local'''
45 if isinstance(repo, str):
45 if isinstance(repo, str):
46 try:
46 try:
47 return _lookup(repo).islocal(repo)
47 return _lookup(repo).islocal(repo)
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50 return repo.local()
50 return repo.local()
51
51
52 repo_setup_hooks = []
52 repo_setup_hooks = []
53
53
54 def repository(ui, path='', create=False):
54 def repository(ui, path='', create=False):
55 """return a repository object for the specified path"""
55 """return a repository object for the specified path"""
56 repo = _lookup(path).instance(ui, path, create)
56 repo = _lookup(path).instance(ui, path, create)
57 for hook in repo_setup_hooks:
57 for hook in repo_setup_hooks:
58 hook(ui, repo)
58 hook(ui, repo)
59 return repo
59 return repo
60
60
61 def defaultdest(source):
61 def defaultdest(source):
62 '''return default destination of clone if none is given'''
62 '''return default destination of clone if none is given'''
63 return os.path.basename(os.path.normpath(source))
63 return os.path.basename(os.path.normpath(source))
64
64
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 stream=False):
66 stream=False):
67 """Make a copy of an existing repository.
67 """Make a copy of an existing repository.
68
68
69 Create a copy of an existing repository in a new directory. The
69 Create a copy of an existing repository in a new directory. The
70 source and destination are URLs, as passed to the repository
70 source and destination are URLs, as passed to the repository
71 function. Returns a pair of repository objects, the source and
71 function. Returns a pair of repository objects, the source and
72 newly created destination.
72 newly created destination.
73
73
74 The location of the source is added to the new repository's
74 The location of the source is added to the new repository's
75 .hg/hgrc file, as the default to be used for future pulls and
75 .hg/hgrc file, as the default to be used for future pulls and
76 pushes.
76 pushes.
77
77
78 If an exception is raised, the partly cloned/updated destination
78 If an exception is raised, the partly cloned/updated destination
79 repository will be deleted.
79 repository will be deleted.
80
80
81 Arguments:
81 Arguments:
82
82
83 source: repository object or URL
83 source: repository object or URL
84
84
85 dest: URL of destination repository to create (defaults to base
85 dest: URL of destination repository to create (defaults to base
86 name of source repository)
86 name of source repository)
87
87
88 pull: always pull from source repository, even in local case
88 pull: always pull from source repository, even in local case
89
89
90 stream: stream raw data uncompressed from repository (fast over
90 stream: stream raw data uncompressed from repository (fast over
91 LAN, slow over WAN)
91 LAN, slow over WAN)
92
92
93 rev: revision to clone up to (implies pull=True)
93 rev: revision to clone up to (implies pull=True)
94
94
95 update: update working directory after clone completes, if
95 update: update working directory after clone completes, if
96 destination is local repository
96 destination is local repository
97 """
97 """
98 if isinstance(source, str):
98 if isinstance(source, str):
99 src_repo = repository(ui, source)
99 src_repo = repository(ui, source)
100 else:
100 else:
101 src_repo = source
101 src_repo = source
102 source = src_repo.url()
102 source = src_repo.url()
103
103
104 if dest is None:
104 if dest is None:
105 dest = defaultdest(source)
105 dest = defaultdest(source)
106
106
107 def localpath(path):
107 def localpath(path):
108 if path.startswith('file://'):
108 if path.startswith('file://'):
109 return path[7:]
109 return path[7:]
110 if path.startswith('file:'):
110 if path.startswith('file:'):
111 return path[5:]
111 return path[5:]
112 return path
112 return path
113
113
114 dest = localpath(dest)
114 dest = localpath(dest)
115 source = localpath(source)
115 source = localpath(source)
116
116
117 if os.path.exists(dest):
117 if os.path.exists(dest):
118 raise util.Abort(_("destination '%s' already exists") % dest)
118 raise util.Abort(_("destination '%s' already exists") % dest)
119
119
120 class DirCleanup(object):
120 class DirCleanup(object):
121 def __init__(self, dir_):
121 def __init__(self, dir_):
122 self.rmtree = shutil.rmtree
122 self.rmtree = shutil.rmtree
123 self.dir_ = dir_
123 self.dir_ = dir_
124 def close(self):
124 def close(self):
125 self.dir_ = None
125 self.dir_ = None
126 def __del__(self):
126 def __del__(self):
127 if self.dir_:
127 if self.dir_:
128 self.rmtree(self.dir_, True)
128 self.rmtree(self.dir_, True)
129
129
130 dest_repo = repository(ui, dest, create=True)
130 dest_repo = repository(ui, dest, create=True)
131
131
132 dest_path = None
132 dest_path = None
133 dir_cleanup = None
133 dir_cleanup = None
134 if dest_repo.local():
134 if dest_repo.local():
135 dest_path = os.path.realpath(dest_repo.root)
135 dest_path = os.path.realpath(dest_repo.root)
136 dir_cleanup = DirCleanup(dest_path)
136 dir_cleanup = DirCleanup(dest_path)
137
137
138 abspath = source
138 abspath = source
139 copy = False
139 copy = False
140 if src_repo.local() and dest_repo.local():
140 if src_repo.local() and dest_repo.local():
141 abspath = os.path.abspath(source)
141 abspath = os.path.abspath(source)
142 copy = not pull and not rev
142 copy = not pull and not rev
143
143
144 src_lock, dest_lock = None, None
144 src_lock, dest_lock = None, None
145 if copy:
145 if copy:
146 try:
146 try:
147 # we use a lock here because if we race with commit, we
147 # we use a lock here because if we race with commit, we
148 # can end up with extra data in the cloned revlogs that's
148 # can end up with extra data in the cloned revlogs that's
149 # not pointed to by changesets, thus causing verify to
149 # not pointed to by changesets, thus causing verify to
150 # fail
150 # fail
151 src_lock = src_repo.lock()
151 src_lock = src_repo.lock()
152 except lock.LockException:
152 except lock.LockException:
153 copy = False
153 copy = False
154
154
155 if copy:
155 if copy:
156 # we lock here to avoid premature writing to the target
156 # we lock here to avoid premature writing to the target
157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
158
158
159 # we need to remove the (empty) data dir in dest so copyfiles
159 files = ("data",
160 # can do its work
160 "00manifest.d", "00manifest.i",
161 os.rmdir(os.path.join(dest_path, ".hg", "data"))
161 "00changelog.d", "00changelog.i")
162 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
162 for f in files:
163 for f in files.split():
164 src = os.path.join(source, ".hg", f)
163 src = os.path.join(source, ".hg", f)
165 dst = os.path.join(dest_path, ".hg", f)
164 dst = os.path.join(dest_path, ".hg", f)
166 try:
165 try:
167 util.copyfiles(src, dst)
166 util.copyfiles(src, dst)
168 except OSError, inst:
167 except OSError, inst:
169 if inst.errno != errno.ENOENT:
168 if inst.errno != errno.ENOENT:
170 raise
169 raise
171
170
172 # we need to re-init the repo after manually copying the data
171 # we need to re-init the repo after manually copying the data
173 # into it
172 # into it
174 dest_repo = repository(ui, dest)
173 dest_repo = repository(ui, dest)
175
174
176 else:
175 else:
177 revs = None
176 revs = None
178 if rev:
177 if rev:
179 if 'lookup' not in src_repo.capabilities:
178 if 'lookup' not in src_repo.capabilities:
180 raise util.Abort(_("src repository does not support revision "
179 raise util.Abort(_("src repository does not support revision "
181 "lookup and so doesn't support clone by "
180 "lookup and so doesn't support clone by "
182 "revision"))
181 "revision"))
183 revs = [src_repo.lookup(r) for r in rev]
182 revs = [src_repo.lookup(r) for r in rev]
184
183
185 if dest_repo.local():
184 if dest_repo.local():
186 dest_repo.clone(src_repo, heads=revs, stream=stream)
185 dest_repo.clone(src_repo, heads=revs, stream=stream)
187 elif src_repo.local():
186 elif src_repo.local():
188 src_repo.push(dest_repo, revs=revs)
187 src_repo.push(dest_repo, revs=revs)
189 else:
188 else:
190 raise util.Abort(_("clone from remote to remote not supported"))
189 raise util.Abort(_("clone from remote to remote not supported"))
191
190
192 if src_lock:
191 if src_lock:
193 src_lock.release()
192 src_lock.release()
194
193
195 if dest_repo.local():
194 if dest_repo.local():
196 fp = dest_repo.opener("hgrc", "w", text=True)
195 fp = dest_repo.opener("hgrc", "w", text=True)
197 fp.write("[paths]\n")
196 fp.write("[paths]\n")
198 fp.write("default = %s\n" % abspath)
197 fp.write("default = %s\n" % abspath)
199 fp.close()
198 fp.close()
200
199
201 if dest_lock:
200 if dest_lock:
202 dest_lock.release()
201 dest_lock.release()
203
202
204 if update:
203 if update:
205 _update(dest_repo, dest_repo.changelog.tip())
204 _update(dest_repo, dest_repo.changelog.tip())
206 if dir_cleanup:
205 if dir_cleanup:
207 dir_cleanup.close()
206 dir_cleanup.close()
208
207
209 return src_repo, dest_repo
208 return src_repo, dest_repo
210
209
211 def _showstats(repo, stats):
210 def _showstats(repo, stats):
212 stats = ((stats[0], _("updated")),
211 stats = ((stats[0], _("updated")),
213 (stats[1], _("merged")),
212 (stats[1], _("merged")),
214 (stats[2], _("removed")),
213 (stats[2], _("removed")),
215 (stats[3], _("unresolved")))
214 (stats[3], _("unresolved")))
216 note = ", ".join([_("%d files %s") % s for s in stats])
215 note = ", ".join([_("%d files %s") % s for s in stats])
217 repo.ui.status("%s\n" % note)
216 repo.ui.status("%s\n" % note)
218
217
219 def _update(repo, node): return update(repo, node)
218 def _update(repo, node): return update(repo, node)
220
219
221 def update(repo, node):
220 def update(repo, node):
222 """update the working directory to node, merging linear changes"""
221 """update the working directory to node, merging linear changes"""
223 stats = _merge.update(repo, node, False, False, None, None)
222 stats = _merge.update(repo, node, False, False, None, None)
224 _showstats(repo, stats)
223 _showstats(repo, stats)
225 if stats[3]:
224 if stats[3]:
226 repo.ui.status(_("There are unresolved merges with"
225 repo.ui.status(_("There are unresolved merges with"
227 " locally modified files.\n"))
226 " locally modified files.\n"))
228 return stats[3]
227 return stats[3]
229
228
230 def clean(repo, node, wlock=None, show_stats=True):
229 def clean(repo, node, wlock=None, show_stats=True):
231 """forcibly switch the working directory to node, clobbering changes"""
230 """forcibly switch the working directory to node, clobbering changes"""
232 stats = _merge.update(repo, node, False, True, None, wlock)
231 stats = _merge.update(repo, node, False, True, None, wlock)
233 if show_stats: _showstats(repo, stats)
232 if show_stats: _showstats(repo, stats)
234 return stats[3]
233 return stats[3]
235
234
236 def merge(repo, node, force=None, remind=True, wlock=None):
235 def merge(repo, node, force=None, remind=True, wlock=None):
237 """branch merge with node, resolving changes"""
236 """branch merge with node, resolving changes"""
238 stats = _merge.update(repo, node, True, force, False, wlock)
237 stats = _merge.update(repo, node, True, force, False, wlock)
239 _showstats(repo, stats)
238 _showstats(repo, stats)
240 if stats[3]:
239 if stats[3]:
241 pl = repo.parents()
240 pl = repo.parents()
242 repo.ui.status(_("There are unresolved merges,"
241 repo.ui.status(_("There are unresolved merges,"
243 " you can redo the full merge using:\n"
242 " you can redo the full merge using:\n"
244 " hg update -C %s\n"
243 " hg update -C %s\n"
245 " hg merge %s\n")
244 " hg merge %s\n")
246 % (pl[0].rev(), pl[1].rev()))
245 % (pl[0].rev(), pl[1].rev()))
247 elif remind:
246 elif remind:
248 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
247 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
249 return stats[3]
248 return stats[3]
250
249
251 def revert(repo, node, choose, wlock):
250 def revert(repo, node, choose, wlock):
252 """revert changes to revision in node without updating dirstate"""
251 """revert changes to revision in node without updating dirstate"""
253 return _merge.update(repo, node, False, True, choose, wlock)[3]
252 return _merge.update(repo, node, False, True, choose, wlock)[3]
254
253
255 def verify(repo):
254 def verify(repo):
256 """verify the consistency of a repository"""
255 """verify the consistency of a repository"""
257 return _verify.verify(repo)
256 return _verify.verify(repo)
@@ -1,1896 +1,1895
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
41 else:
40 else:
42 raise repo.RepoError(_("repository %s not found") % path)
41 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
42 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
43 raise repo.RepoError(_("repository %s already exists") % path)
45
44
46 self.root = os.path.realpath(path)
45 self.root = os.path.realpath(path)
47 self.origroot = path
46 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
47 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
48 self.opener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
51 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
52
51
53 try:
52 try:
54 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
55 except IOError:
54 except IOError:
56 pass
55 pass
57
56
58 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 fl = v.get('flags', None)
60 fl = v.get('flags', None)
62 flags = 0
61 flags = 0
63 if fl != None:
62 if fl != None:
64 for x in fl.split():
63 for x in fl.split():
65 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
66 elif self.revlogv1:
65 elif self.revlogv1:
67 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
68
67
69 v = self.revlogversion | flags
68 v = self.revlogversion | flags
70 self.manifest = manifest.manifest(self.sopener, v)
69 self.manifest = manifest.manifest(self.sopener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
72
71
73 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
74 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
75 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
76 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
77 v = self.changelog.version
76 v = self.changelog.version
78 if v == self.revlogversion:
77 if v == self.revlogversion:
79 v |= flags
78 v |= flags
80 self.revlogversion = v
79 self.revlogversion = v
81
80
82 self.tagscache = None
81 self.tagscache = None
83 self.branchcache = None
82 self.branchcache = None
84 self.nodetagscache = None
83 self.nodetagscache = None
85 self.encodepats = None
84 self.encodepats = None
86 self.decodepats = None
85 self.decodepats = None
87 self.transhandle = None
86 self.transhandle = None
88
87
89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90
89
91 def url(self):
90 def url(self):
92 return 'file:' + self.root
91 return 'file:' + self.root
93
92
94 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
95 def callhook(hname, funcname):
94 def callhook(hname, funcname):
96 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
97 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
98 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
99 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
100
99
101 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
102 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
103 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
104
103
105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 d = funcname.rfind('.')
105 d = funcname.rfind('.')
107 if d == -1:
106 if d == -1:
108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 % (hname, funcname))
108 % (hname, funcname))
110 modname = funcname[:d]
109 modname = funcname[:d]
111 try:
110 try:
112 obj = __import__(modname)
111 obj = __import__(modname)
113 except ImportError:
112 except ImportError:
114 try:
113 try:
115 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
116 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
117 except ImportError:
116 except ImportError:
118 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
119 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
120 (hname, modname))
119 (hname, modname))
121 try:
120 try:
122 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
123 obj = getattr(obj, p)
122 obj = getattr(obj, p)
124 except AttributeError, err:
123 except AttributeError, err:
125 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
126 '("%s" is not defined)') %
125 '("%s" is not defined)') %
127 (hname, funcname))
126 (hname, funcname))
128 if not callable(obj):
127 if not callable(obj):
129 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
130 '("%s" is not callable)') %
129 '("%s" is not callable)') %
131 (hname, funcname))
130 (hname, funcname))
132 try:
131 try:
133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
135 raise
134 raise
136 except Exception, exc:
135 except Exception, exc:
137 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
138 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
139 (hname, exc.args[0]))
138 (hname, exc.args[0]))
140 else:
139 else:
141 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
142 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
143 if throw:
142 if throw:
144 raise
143 raise
145 self.ui.print_exc()
144 self.ui.print_exc()
146 return True
145 return True
147 if r:
146 if r:
148 if throw:
147 if throw:
149 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
150 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 return r
150 return r
152
151
153 def runhook(name, cmd):
152 def runhook(name, cmd):
154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
157 if r:
156 if r:
158 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
159 if throw:
158 if throw:
160 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 return r
161 return r
163
162
164 r = False
163 r = False
165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
167 hooks.sort()
166 hooks.sort()
168 for hname, cmd in hooks:
167 for hname, cmd in hooks:
169 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
170 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
171 else:
170 else:
172 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
173 return r
172 return r
174
173
175 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
176
175
177 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
178 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
179
178
180 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
181 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
182 changeset is committed with the change.
181 changeset is committed with the change.
183
182
184 keyword arguments:
183 keyword arguments:
185
184
186 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
187 (default False)
186 (default False)
188
187
189 message: commit message to use if committing
188 message: commit message to use if committing
190
189
191 user: name of user to use if committing
190 user: name of user to use if committing
192
191
193 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
194
193
195 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
196 if c in name:
195 if c in name:
197 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
198
197
199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200
199
201 if local:
200 if local:
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
203 return
205
204
206 for x in self.status()[:5]:
205 for x in self.status()[:5]:
207 if '.hgtags' in x:
206 if '.hgtags' in x:
208 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
209 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
210
209
211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
213 self.add(['.hgtags'])
212 self.add(['.hgtags'])
214
213
215 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
216 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
217
216
218 def tags(self):
217 def tags(self):
219 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
220 if not self.tagscache:
219 if not self.tagscache:
221 self.tagscache = {}
220 self.tagscache = {}
222
221
223 def parsetag(line, context):
222 def parsetag(line, context):
224 if not line:
223 if not line:
225 return
224 return
226 s = l.split(" ", 1)
225 s = l.split(" ", 1)
227 if len(s) != 2:
226 if len(s) != 2:
228 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 return
228 return
230 node, key = s
229 node, key = s
231 key = key.strip()
230 key = key.strip()
232 try:
231 try:
233 bin_n = bin(node)
232 bin_n = bin(node)
234 except TypeError:
233 except TypeError:
235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 (context, node))
235 (context, node))
237 return
236 return
238 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 (context, key))
239 (context, key))
241 return
240 return
242 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
243
242
244 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
245 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
246 # taking precedence
245 # taking precedence
247 f = None
246 f = None
248 for rev, node, fnode in self._hgtagsnodes():
247 for rev, node, fnode in self._hgtagsnodes():
249 f = (f and f.filectx(fnode) or
248 f = (f and f.filectx(fnode) or
250 self.filectx('.hgtags', fileid=fnode))
249 self.filectx('.hgtags', fileid=fnode))
251 count = 0
250 count = 0
252 for l in f.data().splitlines():
251 for l in f.data().splitlines():
253 count += 1
252 count += 1
254 parsetag(l, _("%s, line %d") % (str(f), count))
253 parsetag(l, _("%s, line %d") % (str(f), count))
255
254
256 try:
255 try:
257 f = self.opener("localtags")
256 f = self.opener("localtags")
258 count = 0
257 count = 0
259 for l in f:
258 for l in f:
260 count += 1
259 count += 1
261 parsetag(l, _("localtags, line %d") % count)
260 parsetag(l, _("localtags, line %d") % count)
262 except IOError:
261 except IOError:
263 pass
262 pass
264
263
265 self.tagscache['tip'] = self.changelog.tip()
264 self.tagscache['tip'] = self.changelog.tip()
266
265
267 return self.tagscache
266 return self.tagscache
268
267
269 def _hgtagsnodes(self):
268 def _hgtagsnodes(self):
270 heads = self.heads()
269 heads = self.heads()
271 heads.reverse()
270 heads.reverse()
272 last = {}
271 last = {}
273 ret = []
272 ret = []
274 for node in heads:
273 for node in heads:
275 c = self.changectx(node)
274 c = self.changectx(node)
276 rev = c.rev()
275 rev = c.rev()
277 try:
276 try:
278 fnode = c.filenode('.hgtags')
277 fnode = c.filenode('.hgtags')
279 except repo.LookupError:
278 except repo.LookupError:
280 continue
279 continue
281 ret.append((rev, node, fnode))
280 ret.append((rev, node, fnode))
282 if fnode in last:
281 if fnode in last:
283 ret[last[fnode]] = None
282 ret[last[fnode]] = None
284 last[fnode] = len(ret) - 1
283 last[fnode] = len(ret) - 1
285 return [item for item in ret if item]
284 return [item for item in ret if item]
286
285
287 def tagslist(self):
286 def tagslist(self):
288 '''return a list of tags ordered by revision'''
287 '''return a list of tags ordered by revision'''
289 l = []
288 l = []
290 for t, n in self.tags().items():
289 for t, n in self.tags().items():
291 try:
290 try:
292 r = self.changelog.rev(n)
291 r = self.changelog.rev(n)
293 except:
292 except:
294 r = -2 # sort to the beginning of the list if unknown
293 r = -2 # sort to the beginning of the list if unknown
295 l.append((r, t, n))
294 l.append((r, t, n))
296 l.sort()
295 l.sort()
297 return [(t, n) for r, t, n in l]
296 return [(t, n) for r, t, n in l]
298
297
299 def nodetags(self, node):
298 def nodetags(self, node):
300 '''return the tags associated with a node'''
299 '''return the tags associated with a node'''
301 if not self.nodetagscache:
300 if not self.nodetagscache:
302 self.nodetagscache = {}
301 self.nodetagscache = {}
303 for t, n in self.tags().items():
302 for t, n in self.tags().items():
304 self.nodetagscache.setdefault(n, []).append(t)
303 self.nodetagscache.setdefault(n, []).append(t)
305 return self.nodetagscache.get(node, [])
304 return self.nodetagscache.get(node, [])
306
305
307 def branchtags(self):
306 def branchtags(self):
308 if self.branchcache != None:
307 if self.branchcache != None:
309 return self.branchcache
308 return self.branchcache
310
309
311 self.branchcache = {} # avoid recursion in changectx
310 self.branchcache = {} # avoid recursion in changectx
312
311
313 partial, last, lrev = self._readbranchcache()
312 partial, last, lrev = self._readbranchcache()
314
313
315 tiprev = self.changelog.count() - 1
314 tiprev = self.changelog.count() - 1
316 if lrev != tiprev:
315 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
318
320 self.branchcache = partial
319 self.branchcache = partial
321 return self.branchcache
320 return self.branchcache
322
321
323 def _readbranchcache(self):
322 def _readbranchcache(self):
324 partial = {}
323 partial = {}
325 try:
324 try:
326 f = self.opener("branches.cache")
325 f = self.opener("branches.cache")
327 lines = f.read().split('\n')
326 lines = f.read().split('\n')
328 f.close()
327 f.close()
329 last, lrev = lines.pop(0).rstrip().split(" ", 1)
328 last, lrev = lines.pop(0).rstrip().split(" ", 1)
330 last, lrev = bin(last), int(lrev)
329 last, lrev = bin(last), int(lrev)
331 if (lrev < self.changelog.count() and
330 if (lrev < self.changelog.count() and
332 self.changelog.node(lrev) == last): # sanity check
331 self.changelog.node(lrev) == last): # sanity check
333 for l in lines:
332 for l in lines:
334 if not l: continue
333 if not l: continue
335 node, label = l.rstrip().split(" ", 1)
334 node, label = l.rstrip().split(" ", 1)
336 partial[label] = bin(node)
335 partial[label] = bin(node)
337 else: # invalidate the cache
336 else: # invalidate the cache
338 last, lrev = nullid, nullrev
337 last, lrev = nullid, nullrev
339 except IOError:
338 except IOError:
340 last, lrev = nullid, nullrev
339 last, lrev = nullid, nullrev
341 return partial, last, lrev
340 return partial, last, lrev
342
341
343 def _writebranchcache(self, branches, tip, tiprev):
342 def _writebranchcache(self, branches, tip, tiprev):
344 try:
343 try:
345 f = self.opener("branches.cache", "w")
344 f = self.opener("branches.cache", "w")
346 f.write("%s %s\n" % (hex(tip), tiprev))
345 f.write("%s %s\n" % (hex(tip), tiprev))
347 for label, node in branches.iteritems():
346 for label, node in branches.iteritems():
348 f.write("%s %s\n" % (hex(node), label))
347 f.write("%s %s\n" % (hex(node), label))
349 except IOError:
348 except IOError:
350 pass
349 pass
351
350
352 def _updatebranchcache(self, partial, start, end):
351 def _updatebranchcache(self, partial, start, end):
353 for r in xrange(start, end):
352 for r in xrange(start, end):
354 c = self.changectx(r)
353 c = self.changectx(r)
355 b = c.branch()
354 b = c.branch()
356 if b:
355 if b:
357 partial[b] = c.node()
356 partial[b] = c.node()
358
357
359 def lookup(self, key):
358 def lookup(self, key):
360 if key == '.':
359 if key == '.':
361 key = self.dirstate.parents()[0]
360 key = self.dirstate.parents()[0]
362 if key == nullid:
361 if key == nullid:
363 raise repo.RepoError(_("no revision checked out"))
362 raise repo.RepoError(_("no revision checked out"))
364 n = self.changelog._match(key)
363 n = self.changelog._match(key)
365 if n:
364 if n:
366 return n
365 return n
367 if key in self.tags():
366 if key in self.tags():
368 return self.tags()[key]
367 return self.tags()[key]
369 if key in self.branchtags():
368 if key in self.branchtags():
370 return self.branchtags()[key]
369 return self.branchtags()[key]
371 n = self.changelog._partialmatch(key)
370 n = self.changelog._partialmatch(key)
372 if n:
371 if n:
373 return n
372 return n
374 raise repo.RepoError(_("unknown revision '%s'") % key)
373 raise repo.RepoError(_("unknown revision '%s'") % key)
375
374
376 def dev(self):
375 def dev(self):
377 return os.lstat(self.path).st_dev
376 return os.lstat(self.path).st_dev
378
377
379 def local(self):
378 def local(self):
380 return True
379 return True
381
380
382 def join(self, f):
381 def join(self, f):
383 return os.path.join(self.path, f)
382 return os.path.join(self.path, f)
384
383
385 def sjoin(self, f):
384 def sjoin(self, f):
386 return os.path.join(self.path, f)
385 return os.path.join(self.path, f)
387
386
388 def wjoin(self, f):
387 def wjoin(self, f):
389 return os.path.join(self.root, f)
388 return os.path.join(self.root, f)
390
389
391 def file(self, f):
390 def file(self, f):
392 if f[0] == '/':
391 if f[0] == '/':
393 f = f[1:]
392 f = f[1:]
394 return filelog.filelog(self.sopener, f, self.revlogversion)
393 return filelog.filelog(self.sopener, f, self.revlogversion)
395
394
396 def changectx(self, changeid=None):
395 def changectx(self, changeid=None):
397 return context.changectx(self, changeid)
396 return context.changectx(self, changeid)
398
397
399 def workingctx(self):
398 def workingctx(self):
400 return context.workingctx(self)
399 return context.workingctx(self)
401
400
402 def parents(self, changeid=None):
401 def parents(self, changeid=None):
403 '''
402 '''
404 get list of changectxs for parents of changeid or working directory
403 get list of changectxs for parents of changeid or working directory
405 '''
404 '''
406 if changeid is None:
405 if changeid is None:
407 pl = self.dirstate.parents()
406 pl = self.dirstate.parents()
408 else:
407 else:
409 n = self.changelog.lookup(changeid)
408 n = self.changelog.lookup(changeid)
410 pl = self.changelog.parents(n)
409 pl = self.changelog.parents(n)
411 if pl[1] == nullid:
410 if pl[1] == nullid:
412 return [self.changectx(pl[0])]
411 return [self.changectx(pl[0])]
413 return [self.changectx(pl[0]), self.changectx(pl[1])]
412 return [self.changectx(pl[0]), self.changectx(pl[1])]
414
413
415 def filectx(self, path, changeid=None, fileid=None):
414 def filectx(self, path, changeid=None, fileid=None):
416 """changeid can be a changeset revision, node, or tag.
415 """changeid can be a changeset revision, node, or tag.
417 fileid can be a file revision or node."""
416 fileid can be a file revision or node."""
418 return context.filectx(self, path, changeid, fileid)
417 return context.filectx(self, path, changeid, fileid)
419
418
420 def getcwd(self):
419 def getcwd(self):
421 return self.dirstate.getcwd()
420 return self.dirstate.getcwd()
422
421
423 def wfile(self, f, mode='r'):
422 def wfile(self, f, mode='r'):
424 return self.wopener(f, mode)
423 return self.wopener(f, mode)
425
424
426 def wread(self, filename):
425 def wread(self, filename):
427 if self.encodepats == None:
426 if self.encodepats == None:
428 l = []
427 l = []
429 for pat, cmd in self.ui.configitems("encode"):
428 for pat, cmd in self.ui.configitems("encode"):
430 mf = util.matcher(self.root, "", [pat], [], [])[1]
429 mf = util.matcher(self.root, "", [pat], [], [])[1]
431 l.append((mf, cmd))
430 l.append((mf, cmd))
432 self.encodepats = l
431 self.encodepats = l
433
432
434 data = self.wopener(filename, 'r').read()
433 data = self.wopener(filename, 'r').read()
435
434
436 for mf, cmd in self.encodepats:
435 for mf, cmd in self.encodepats:
437 if mf(filename):
436 if mf(filename):
438 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
439 data = util.filter(data, cmd)
438 data = util.filter(data, cmd)
440 break
439 break
441
440
442 return data
441 return data
443
442
444 def wwrite(self, filename, data, fd=None):
443 def wwrite(self, filename, data, fd=None):
445 if self.decodepats == None:
444 if self.decodepats == None:
446 l = []
445 l = []
447 for pat, cmd in self.ui.configitems("decode"):
446 for pat, cmd in self.ui.configitems("decode"):
448 mf = util.matcher(self.root, "", [pat], [], [])[1]
447 mf = util.matcher(self.root, "", [pat], [], [])[1]
449 l.append((mf, cmd))
448 l.append((mf, cmd))
450 self.decodepats = l
449 self.decodepats = l
451
450
452 for mf, cmd in self.decodepats:
451 for mf, cmd in self.decodepats:
453 if mf(filename):
452 if mf(filename):
454 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
455 data = util.filter(data, cmd)
454 data = util.filter(data, cmd)
456 break
455 break
457
456
458 if fd:
457 if fd:
459 return fd.write(data)
458 return fd.write(data)
460 return self.wopener(filename, 'w').write(data)
459 return self.wopener(filename, 'w').write(data)
461
460
462 def transaction(self):
461 def transaction(self):
463 tr = self.transhandle
462 tr = self.transhandle
464 if tr != None and tr.running():
463 if tr != None and tr.running():
465 return tr.nest()
464 return tr.nest()
466
465
467 # save dirstate for rollback
466 # save dirstate for rollback
468 try:
467 try:
469 ds = self.opener("dirstate").read()
468 ds = self.opener("dirstate").read()
470 except IOError:
469 except IOError:
471 ds = ""
470 ds = ""
472 self.opener("journal.dirstate", "w").write(ds)
471 self.opener("journal.dirstate", "w").write(ds)
473
472
474 tr = transaction.transaction(self.ui.warn, self.sopener,
473 tr = transaction.transaction(self.ui.warn, self.sopener,
475 self.sjoin("journal"),
474 self.sjoin("journal"),
476 aftertrans(self.path))
475 aftertrans(self.path))
477 self.transhandle = tr
476 self.transhandle = tr
478 return tr
477 return tr
479
478
480 def recover(self):
479 def recover(self):
481 l = self.lock()
480 l = self.lock()
482 if os.path.exists(self.sjoin("journal")):
481 if os.path.exists(self.sjoin("journal")):
483 self.ui.status(_("rolling back interrupted transaction\n"))
482 self.ui.status(_("rolling back interrupted transaction\n"))
484 transaction.rollback(self.sopener, self.sjoin("journal"))
483 transaction.rollback(self.sopener, self.sjoin("journal"))
485 self.reload()
484 self.reload()
486 return True
485 return True
487 else:
486 else:
488 self.ui.warn(_("no interrupted transaction available\n"))
487 self.ui.warn(_("no interrupted transaction available\n"))
489 return False
488 return False
490
489
491 def rollback(self, wlock=None):
490 def rollback(self, wlock=None):
492 if not wlock:
491 if not wlock:
493 wlock = self.wlock()
492 wlock = self.wlock()
494 l = self.lock()
493 l = self.lock()
495 if os.path.exists(self.sjoin("undo")):
494 if os.path.exists(self.sjoin("undo")):
496 self.ui.status(_("rolling back last transaction\n"))
495 self.ui.status(_("rolling back last transaction\n"))
497 transaction.rollback(self.sopener, self.sjoin("undo"))
496 transaction.rollback(self.sopener, self.sjoin("undo"))
498 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
497 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
499 self.reload()
498 self.reload()
500 self.wreload()
499 self.wreload()
501 else:
500 else:
502 self.ui.warn(_("no rollback information available\n"))
501 self.ui.warn(_("no rollback information available\n"))
503
502
504 def wreload(self):
503 def wreload(self):
505 self.dirstate.read()
504 self.dirstate.read()
506
505
507 def reload(self):
506 def reload(self):
508 self.changelog.load()
507 self.changelog.load()
509 self.manifest.load()
508 self.manifest.load()
510 self.tagscache = None
509 self.tagscache = None
511 self.nodetagscache = None
510 self.nodetagscache = None
512
511
513 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
512 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
514 desc=None):
513 desc=None):
515 try:
514 try:
516 l = lock.lock(lockname, 0, releasefn, desc=desc)
515 l = lock.lock(lockname, 0, releasefn, desc=desc)
517 except lock.LockHeld, inst:
516 except lock.LockHeld, inst:
518 if not wait:
517 if not wait:
519 raise
518 raise
520 self.ui.warn(_("waiting for lock on %s held by %r\n") %
519 self.ui.warn(_("waiting for lock on %s held by %r\n") %
521 (desc, inst.locker))
520 (desc, inst.locker))
522 # default to 600 seconds timeout
521 # default to 600 seconds timeout
523 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
522 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
524 releasefn, desc=desc)
523 releasefn, desc=desc)
525 if acquirefn:
524 if acquirefn:
526 acquirefn()
525 acquirefn()
527 return l
526 return l
528
527
529 def lock(self, wait=1):
528 def lock(self, wait=1):
530 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
529 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
531 desc=_('repository %s') % self.origroot)
530 desc=_('repository %s') % self.origroot)
532
531
533 def wlock(self, wait=1):
532 def wlock(self, wait=1):
534 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
533 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
535 self.wreload,
534 self.wreload,
536 desc=_('working directory of %s') % self.origroot)
535 desc=_('working directory of %s') % self.origroot)
537
536
538 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
537 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
539 """
538 """
540 commit an individual file as part of a larger transaction
539 commit an individual file as part of a larger transaction
541 """
540 """
542
541
543 t = self.wread(fn)
542 t = self.wread(fn)
544 fl = self.file(fn)
543 fl = self.file(fn)
545 fp1 = manifest1.get(fn, nullid)
544 fp1 = manifest1.get(fn, nullid)
546 fp2 = manifest2.get(fn, nullid)
545 fp2 = manifest2.get(fn, nullid)
547
546
548 meta = {}
547 meta = {}
549 cp = self.dirstate.copied(fn)
548 cp = self.dirstate.copied(fn)
550 if cp:
549 if cp:
551 meta["copy"] = cp
550 meta["copy"] = cp
552 if not manifest2: # not a branch merge
551 if not manifest2: # not a branch merge
553 meta["copyrev"] = hex(manifest1.get(cp, nullid))
552 meta["copyrev"] = hex(manifest1.get(cp, nullid))
554 fp2 = nullid
553 fp2 = nullid
555 elif fp2 != nullid: # copied on remote side
554 elif fp2 != nullid: # copied on remote side
556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
555 meta["copyrev"] = hex(manifest1.get(cp, nullid))
557 else: # copied on local side, reversed
556 else: # copied on local side, reversed
558 meta["copyrev"] = hex(manifest2.get(cp))
557 meta["copyrev"] = hex(manifest2.get(cp))
559 fp2 = nullid
558 fp2 = nullid
560 self.ui.debug(_(" %s: copy %s:%s\n") %
559 self.ui.debug(_(" %s: copy %s:%s\n") %
561 (fn, cp, meta["copyrev"]))
560 (fn, cp, meta["copyrev"]))
562 fp1 = nullid
561 fp1 = nullid
563 elif fp2 != nullid:
562 elif fp2 != nullid:
564 # is one parent an ancestor of the other?
563 # is one parent an ancestor of the other?
565 fpa = fl.ancestor(fp1, fp2)
564 fpa = fl.ancestor(fp1, fp2)
566 if fpa == fp1:
565 if fpa == fp1:
567 fp1, fp2 = fp2, nullid
566 fp1, fp2 = fp2, nullid
568 elif fpa == fp2:
567 elif fpa == fp2:
569 fp2 = nullid
568 fp2 = nullid
570
569
571 # is the file unmodified from the parent? report existing entry
570 # is the file unmodified from the parent? report existing entry
572 if fp2 == nullid and not fl.cmp(fp1, t):
571 if fp2 == nullid and not fl.cmp(fp1, t):
573 return fp1
572 return fp1
574
573
575 changelist.append(fn)
574 changelist.append(fn)
576 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
575 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
577
576
578 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
577 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
579 if p1 is None:
578 if p1 is None:
580 p1, p2 = self.dirstate.parents()
579 p1, p2 = self.dirstate.parents()
581 return self.commit(files=files, text=text, user=user, date=date,
580 return self.commit(files=files, text=text, user=user, date=date,
582 p1=p1, p2=p2, wlock=wlock)
581 p1=p1, p2=p2, wlock=wlock)
583
582
584 def commit(self, files=None, text="", user=None, date=None,
583 def commit(self, files=None, text="", user=None, date=None,
585 match=util.always, force=False, lock=None, wlock=None,
584 match=util.always, force=False, lock=None, wlock=None,
586 force_editor=False, p1=None, p2=None, extra={}):
585 force_editor=False, p1=None, p2=None, extra={}):
587
586
588 commit = []
587 commit = []
589 remove = []
588 remove = []
590 changed = []
589 changed = []
591 use_dirstate = (p1 is None) # not rawcommit
590 use_dirstate = (p1 is None) # not rawcommit
592 extra = extra.copy()
591 extra = extra.copy()
593
592
594 if use_dirstate:
593 if use_dirstate:
595 if files:
594 if files:
596 for f in files:
595 for f in files:
597 s = self.dirstate.state(f)
596 s = self.dirstate.state(f)
598 if s in 'nmai':
597 if s in 'nmai':
599 commit.append(f)
598 commit.append(f)
600 elif s == 'r':
599 elif s == 'r':
601 remove.append(f)
600 remove.append(f)
602 else:
601 else:
603 self.ui.warn(_("%s not tracked!\n") % f)
602 self.ui.warn(_("%s not tracked!\n") % f)
604 else:
603 else:
605 changes = self.status(match=match)[:5]
604 changes = self.status(match=match)[:5]
606 modified, added, removed, deleted, unknown = changes
605 modified, added, removed, deleted, unknown = changes
607 commit = modified + added
606 commit = modified + added
608 remove = removed
607 remove = removed
609 else:
608 else:
610 commit = files
609 commit = files
611
610
612 if use_dirstate:
611 if use_dirstate:
613 p1, p2 = self.dirstate.parents()
612 p1, p2 = self.dirstate.parents()
614 update_dirstate = True
613 update_dirstate = True
615 else:
614 else:
616 p1, p2 = p1, p2 or nullid
615 p1, p2 = p1, p2 or nullid
617 update_dirstate = (self.dirstate.parents()[0] == p1)
616 update_dirstate = (self.dirstate.parents()[0] == p1)
618
617
619 c1 = self.changelog.read(p1)
618 c1 = self.changelog.read(p1)
620 c2 = self.changelog.read(p2)
619 c2 = self.changelog.read(p2)
621 m1 = self.manifest.read(c1[0]).copy()
620 m1 = self.manifest.read(c1[0]).copy()
622 m2 = self.manifest.read(c2[0])
621 m2 = self.manifest.read(c2[0])
623
622
624 if use_dirstate:
623 if use_dirstate:
625 branchname = self.workingctx().branch()
624 branchname = self.workingctx().branch()
626 else:
625 else:
627 branchname = ""
626 branchname = ""
628
627
629 if use_dirstate:
628 if use_dirstate:
630 oldname = c1[5].get("branch", "")
629 oldname = c1[5].get("branch", "")
631 if not commit and not remove and not force and p2 == nullid and \
630 if not commit and not remove and not force and p2 == nullid and \
632 branchname == oldname:
631 branchname == oldname:
633 self.ui.status(_("nothing changed\n"))
632 self.ui.status(_("nothing changed\n"))
634 return None
633 return None
635
634
636 xp1 = hex(p1)
635 xp1 = hex(p1)
637 if p2 == nullid: xp2 = ''
636 if p2 == nullid: xp2 = ''
638 else: xp2 = hex(p2)
637 else: xp2 = hex(p2)
639
638
640 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
639 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
641
640
642 if not wlock:
641 if not wlock:
643 wlock = self.wlock()
642 wlock = self.wlock()
644 if not lock:
643 if not lock:
645 lock = self.lock()
644 lock = self.lock()
646 tr = self.transaction()
645 tr = self.transaction()
647
646
648 # check in files
647 # check in files
649 new = {}
648 new = {}
650 linkrev = self.changelog.count()
649 linkrev = self.changelog.count()
651 commit.sort()
650 commit.sort()
652 for f in commit:
651 for f in commit:
653 self.ui.note(f + "\n")
652 self.ui.note(f + "\n")
654 try:
653 try:
655 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
654 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
656 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
655 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
657 except IOError:
656 except IOError:
658 if use_dirstate:
657 if use_dirstate:
659 self.ui.warn(_("trouble committing %s!\n") % f)
658 self.ui.warn(_("trouble committing %s!\n") % f)
660 raise
659 raise
661 else:
660 else:
662 remove.append(f)
661 remove.append(f)
663
662
664 # update manifest
663 # update manifest
665 m1.update(new)
664 m1.update(new)
666 remove.sort()
665 remove.sort()
667
666
668 for f in remove:
667 for f in remove:
669 if f in m1:
668 if f in m1:
670 del m1[f]
669 del m1[f]
671 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
670 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
672
671
673 # add changeset
672 # add changeset
674 new = new.keys()
673 new = new.keys()
675 new.sort()
674 new.sort()
676
675
677 user = user or self.ui.username()
676 user = user or self.ui.username()
678 if not text or force_editor:
677 if not text or force_editor:
679 edittext = []
678 edittext = []
680 if text:
679 if text:
681 edittext.append(text)
680 edittext.append(text)
682 edittext.append("")
681 edittext.append("")
683 if p2 != nullid:
682 if p2 != nullid:
684 edittext.append("HG: branch merge")
683 edittext.append("HG: branch merge")
685 edittext.extend(["HG: changed %s" % f for f in changed])
684 edittext.extend(["HG: changed %s" % f for f in changed])
686 edittext.extend(["HG: removed %s" % f for f in remove])
685 edittext.extend(["HG: removed %s" % f for f in remove])
687 if not changed and not remove:
686 if not changed and not remove:
688 edittext.append("HG: no files changed")
687 edittext.append("HG: no files changed")
689 edittext.append("")
688 edittext.append("")
690 # run editor in the repository root
689 # run editor in the repository root
691 olddir = os.getcwd()
690 olddir = os.getcwd()
692 os.chdir(self.root)
691 os.chdir(self.root)
693 text = self.ui.edit("\n".join(edittext), user)
692 text = self.ui.edit("\n".join(edittext), user)
694 os.chdir(olddir)
693 os.chdir(olddir)
695
694
696 lines = [line.rstrip() for line in text.rstrip().splitlines()]
695 lines = [line.rstrip() for line in text.rstrip().splitlines()]
697 while lines and not lines[0]:
696 while lines and not lines[0]:
698 del lines[0]
697 del lines[0]
699 if not lines:
698 if not lines:
700 return None
699 return None
701 text = '\n'.join(lines)
700 text = '\n'.join(lines)
702 if branchname:
701 if branchname:
703 extra["branch"] = branchname
702 extra["branch"] = branchname
704 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
703 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
705 user, date, extra)
704 user, date, extra)
706 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
705 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
707 parent2=xp2)
706 parent2=xp2)
708 tr.close()
707 tr.close()
709
708
710 if use_dirstate or update_dirstate:
709 if use_dirstate or update_dirstate:
711 self.dirstate.setparents(n)
710 self.dirstate.setparents(n)
712 if use_dirstate:
711 if use_dirstate:
713 self.dirstate.update(new, "n")
712 self.dirstate.update(new, "n")
714 self.dirstate.forget(remove)
713 self.dirstate.forget(remove)
715
714
716 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
715 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
717 return n
716 return n
718
717
719 def walk(self, node=None, files=[], match=util.always, badmatch=None):
718 def walk(self, node=None, files=[], match=util.always, badmatch=None):
720 '''
719 '''
721 walk recursively through the directory tree or a given
720 walk recursively through the directory tree or a given
722 changeset, finding all files matched by the match
721 changeset, finding all files matched by the match
723 function
722 function
724
723
725 results are yielded in a tuple (src, filename), where src
724 results are yielded in a tuple (src, filename), where src
726 is one of:
725 is one of:
727 'f' the file was found in the directory tree
726 'f' the file was found in the directory tree
728 'm' the file was only in the dirstate and not in the tree
727 'm' the file was only in the dirstate and not in the tree
729 'b' file was not found and matched badmatch
728 'b' file was not found and matched badmatch
730 '''
729 '''
731
730
732 if node:
731 if node:
733 fdict = dict.fromkeys(files)
732 fdict = dict.fromkeys(files)
734 for fn in self.manifest.read(self.changelog.read(node)[0]):
733 for fn in self.manifest.read(self.changelog.read(node)[0]):
735 for ffn in fdict:
734 for ffn in fdict:
736 # match if the file is the exact name or a directory
735 # match if the file is the exact name or a directory
737 if ffn == fn or fn.startswith("%s/" % ffn):
736 if ffn == fn or fn.startswith("%s/" % ffn):
738 del fdict[ffn]
737 del fdict[ffn]
739 break
738 break
740 if match(fn):
739 if match(fn):
741 yield 'm', fn
740 yield 'm', fn
742 for fn in fdict:
741 for fn in fdict:
743 if badmatch and badmatch(fn):
742 if badmatch and badmatch(fn):
744 if match(fn):
743 if match(fn):
745 yield 'b', fn
744 yield 'b', fn
746 else:
745 else:
747 self.ui.warn(_('%s: No such file in rev %s\n') % (
746 self.ui.warn(_('%s: No such file in rev %s\n') % (
748 util.pathto(self.getcwd(), fn), short(node)))
747 util.pathto(self.getcwd(), fn), short(node)))
749 else:
748 else:
750 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
749 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
751 yield src, fn
750 yield src, fn
752
751
753 def status(self, node1=None, node2=None, files=[], match=util.always,
752 def status(self, node1=None, node2=None, files=[], match=util.always,
754 wlock=None, list_ignored=False, list_clean=False):
753 wlock=None, list_ignored=False, list_clean=False):
755 """return status of files between two nodes or node and working directory
754 """return status of files between two nodes or node and working directory
756
755
757 If node1 is None, use the first dirstate parent instead.
756 If node1 is None, use the first dirstate parent instead.
758 If node2 is None, compare node1 with working directory.
757 If node2 is None, compare node1 with working directory.
759 """
758 """
760
759
761 def fcmp(fn, mf):
760 def fcmp(fn, mf):
762 t1 = self.wread(fn)
761 t1 = self.wread(fn)
763 return self.file(fn).cmp(mf.get(fn, nullid), t1)
762 return self.file(fn).cmp(mf.get(fn, nullid), t1)
764
763
765 def mfmatches(node):
764 def mfmatches(node):
766 change = self.changelog.read(node)
765 change = self.changelog.read(node)
767 mf = self.manifest.read(change[0]).copy()
766 mf = self.manifest.read(change[0]).copy()
768 for fn in mf.keys():
767 for fn in mf.keys():
769 if not match(fn):
768 if not match(fn):
770 del mf[fn]
769 del mf[fn]
771 return mf
770 return mf
772
771
773 modified, added, removed, deleted, unknown = [], [], [], [], []
772 modified, added, removed, deleted, unknown = [], [], [], [], []
774 ignored, clean = [], []
773 ignored, clean = [], []
775
774
776 compareworking = False
775 compareworking = False
777 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
776 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
778 compareworking = True
777 compareworking = True
779
778
780 if not compareworking:
779 if not compareworking:
781 # read the manifest from node1 before the manifest from node2,
780 # read the manifest from node1 before the manifest from node2,
782 # so that we'll hit the manifest cache if we're going through
781 # so that we'll hit the manifest cache if we're going through
783 # all the revisions in parent->child order.
782 # all the revisions in parent->child order.
784 mf1 = mfmatches(node1)
783 mf1 = mfmatches(node1)
785
784
786 # are we comparing the working directory?
785 # are we comparing the working directory?
787 if not node2:
786 if not node2:
788 if not wlock:
787 if not wlock:
789 try:
788 try:
790 wlock = self.wlock(wait=0)
789 wlock = self.wlock(wait=0)
791 except lock.LockException:
790 except lock.LockException:
792 wlock = None
791 wlock = None
793 (lookup, modified, added, removed, deleted, unknown,
792 (lookup, modified, added, removed, deleted, unknown,
794 ignored, clean) = self.dirstate.status(files, match,
793 ignored, clean) = self.dirstate.status(files, match,
795 list_ignored, list_clean)
794 list_ignored, list_clean)
796
795
797 # are we comparing working dir against its parent?
796 # are we comparing working dir against its parent?
798 if compareworking:
797 if compareworking:
799 if lookup:
798 if lookup:
800 # do a full compare of any files that might have changed
799 # do a full compare of any files that might have changed
801 mf2 = mfmatches(self.dirstate.parents()[0])
800 mf2 = mfmatches(self.dirstate.parents()[0])
802 for f in lookup:
801 for f in lookup:
803 if fcmp(f, mf2):
802 if fcmp(f, mf2):
804 modified.append(f)
803 modified.append(f)
805 else:
804 else:
806 clean.append(f)
805 clean.append(f)
807 if wlock is not None:
806 if wlock is not None:
808 self.dirstate.update([f], "n")
807 self.dirstate.update([f], "n")
809 else:
808 else:
810 # we are comparing working dir against non-parent
809 # we are comparing working dir against non-parent
811 # generate a pseudo-manifest for the working dir
810 # generate a pseudo-manifest for the working dir
812 # XXX: create it in dirstate.py ?
811 # XXX: create it in dirstate.py ?
813 mf2 = mfmatches(self.dirstate.parents()[0])
812 mf2 = mfmatches(self.dirstate.parents()[0])
814 for f in lookup + modified + added:
813 for f in lookup + modified + added:
815 mf2[f] = ""
814 mf2[f] = ""
816 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
815 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
817 for f in removed:
816 for f in removed:
818 if f in mf2:
817 if f in mf2:
819 del mf2[f]
818 del mf2[f]
820 else:
819 else:
821 # we are comparing two revisions
820 # we are comparing two revisions
822 mf2 = mfmatches(node2)
821 mf2 = mfmatches(node2)
823
822
824 if not compareworking:
823 if not compareworking:
825 # flush lists from dirstate before comparing manifests
824 # flush lists from dirstate before comparing manifests
826 modified, added, clean = [], [], []
825 modified, added, clean = [], [], []
827
826
828 # make sure to sort the files so we talk to the disk in a
827 # make sure to sort the files so we talk to the disk in a
829 # reasonable order
828 # reasonable order
830 mf2keys = mf2.keys()
829 mf2keys = mf2.keys()
831 mf2keys.sort()
830 mf2keys.sort()
832 for fn in mf2keys:
831 for fn in mf2keys:
833 if mf1.has_key(fn):
832 if mf1.has_key(fn):
834 if mf1.flags(fn) != mf2.flags(fn) or \
833 if mf1.flags(fn) != mf2.flags(fn) or \
835 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
834 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
836 modified.append(fn)
835 modified.append(fn)
837 elif list_clean:
836 elif list_clean:
838 clean.append(fn)
837 clean.append(fn)
839 del mf1[fn]
838 del mf1[fn]
840 else:
839 else:
841 added.append(fn)
840 added.append(fn)
842
841
843 removed = mf1.keys()
842 removed = mf1.keys()
844
843
845 # sort and return results:
844 # sort and return results:
846 for l in modified, added, removed, deleted, unknown, ignored, clean:
845 for l in modified, added, removed, deleted, unknown, ignored, clean:
847 l.sort()
846 l.sort()
848 return (modified, added, removed, deleted, unknown, ignored, clean)
847 return (modified, added, removed, deleted, unknown, ignored, clean)
849
848
850 def add(self, list, wlock=None):
849 def add(self, list, wlock=None):
851 if not wlock:
850 if not wlock:
852 wlock = self.wlock()
851 wlock = self.wlock()
853 for f in list:
852 for f in list:
854 p = self.wjoin(f)
853 p = self.wjoin(f)
855 if not os.path.exists(p):
854 if not os.path.exists(p):
856 self.ui.warn(_("%s does not exist!\n") % f)
855 self.ui.warn(_("%s does not exist!\n") % f)
857 elif not os.path.isfile(p):
856 elif not os.path.isfile(p):
858 self.ui.warn(_("%s not added: only files supported currently\n")
857 self.ui.warn(_("%s not added: only files supported currently\n")
859 % f)
858 % f)
860 elif self.dirstate.state(f) in 'an':
859 elif self.dirstate.state(f) in 'an':
861 self.ui.warn(_("%s already tracked!\n") % f)
860 self.ui.warn(_("%s already tracked!\n") % f)
862 else:
861 else:
863 self.dirstate.update([f], "a")
862 self.dirstate.update([f], "a")
864
863
865 def forget(self, list, wlock=None):
864 def forget(self, list, wlock=None):
866 if not wlock:
865 if not wlock:
867 wlock = self.wlock()
866 wlock = self.wlock()
868 for f in list:
867 for f in list:
869 if self.dirstate.state(f) not in 'ai':
868 if self.dirstate.state(f) not in 'ai':
870 self.ui.warn(_("%s not added!\n") % f)
869 self.ui.warn(_("%s not added!\n") % f)
871 else:
870 else:
872 self.dirstate.forget([f])
871 self.dirstate.forget([f])
873
872
874 def remove(self, list, unlink=False, wlock=None):
873 def remove(self, list, unlink=False, wlock=None):
875 if unlink:
874 if unlink:
876 for f in list:
875 for f in list:
877 try:
876 try:
878 util.unlink(self.wjoin(f))
877 util.unlink(self.wjoin(f))
879 except OSError, inst:
878 except OSError, inst:
880 if inst.errno != errno.ENOENT:
879 if inst.errno != errno.ENOENT:
881 raise
880 raise
882 if not wlock:
881 if not wlock:
883 wlock = self.wlock()
882 wlock = self.wlock()
884 for f in list:
883 for f in list:
885 p = self.wjoin(f)
884 p = self.wjoin(f)
886 if os.path.exists(p):
885 if os.path.exists(p):
887 self.ui.warn(_("%s still exists!\n") % f)
886 self.ui.warn(_("%s still exists!\n") % f)
888 elif self.dirstate.state(f) == 'a':
887 elif self.dirstate.state(f) == 'a':
889 self.dirstate.forget([f])
888 self.dirstate.forget([f])
890 elif f not in self.dirstate:
889 elif f not in self.dirstate:
891 self.ui.warn(_("%s not tracked!\n") % f)
890 self.ui.warn(_("%s not tracked!\n") % f)
892 else:
891 else:
893 self.dirstate.update([f], "r")
892 self.dirstate.update([f], "r")
894
893
895 def undelete(self, list, wlock=None):
894 def undelete(self, list, wlock=None):
896 p = self.dirstate.parents()[0]
895 p = self.dirstate.parents()[0]
897 mn = self.changelog.read(p)[0]
896 mn = self.changelog.read(p)[0]
898 m = self.manifest.read(mn)
897 m = self.manifest.read(mn)
899 if not wlock:
898 if not wlock:
900 wlock = self.wlock()
899 wlock = self.wlock()
901 for f in list:
900 for f in list:
902 if self.dirstate.state(f) not in "r":
901 if self.dirstate.state(f) not in "r":
903 self.ui.warn("%s not removed!\n" % f)
902 self.ui.warn("%s not removed!\n" % f)
904 else:
903 else:
905 t = self.file(f).read(m[f])
904 t = self.file(f).read(m[f])
906 self.wwrite(f, t)
905 self.wwrite(f, t)
907 util.set_exec(self.wjoin(f), m.execf(f))
906 util.set_exec(self.wjoin(f), m.execf(f))
908 self.dirstate.update([f], "n")
907 self.dirstate.update([f], "n")
909
908
910 def copy(self, source, dest, wlock=None):
909 def copy(self, source, dest, wlock=None):
911 p = self.wjoin(dest)
910 p = self.wjoin(dest)
912 if not os.path.exists(p):
911 if not os.path.exists(p):
913 self.ui.warn(_("%s does not exist!\n") % dest)
912 self.ui.warn(_("%s does not exist!\n") % dest)
914 elif not os.path.isfile(p):
913 elif not os.path.isfile(p):
915 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
914 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
916 else:
915 else:
917 if not wlock:
916 if not wlock:
918 wlock = self.wlock()
917 wlock = self.wlock()
919 if self.dirstate.state(dest) == '?':
918 if self.dirstate.state(dest) == '?':
920 self.dirstate.update([dest], "a")
919 self.dirstate.update([dest], "a")
921 self.dirstate.copy(source, dest)
920 self.dirstate.copy(source, dest)
922
921
923 def heads(self, start=None):
922 def heads(self, start=None):
924 heads = self.changelog.heads(start)
923 heads = self.changelog.heads(start)
925 # sort the output in rev descending order
924 # sort the output in rev descending order
926 heads = [(-self.changelog.rev(h), h) for h in heads]
925 heads = [(-self.changelog.rev(h), h) for h in heads]
927 heads.sort()
926 heads.sort()
928 return [n for (r, n) in heads]
927 return [n for (r, n) in heads]
929
928
930 # branchlookup returns a dict giving a list of branches for
929 # branchlookup returns a dict giving a list of branches for
931 # each head. A branch is defined as the tag of a node or
930 # each head. A branch is defined as the tag of a node or
932 # the branch of the node's parents. If a node has multiple
931 # the branch of the node's parents. If a node has multiple
933 # branch tags, tags are eliminated if they are visible from other
932 # branch tags, tags are eliminated if they are visible from other
934 # branch tags.
933 # branch tags.
935 #
934 #
936 # So, for this graph: a->b->c->d->e
935 # So, for this graph: a->b->c->d->e
937 # \ /
936 # \ /
938 # aa -----/
937 # aa -----/
939 # a has tag 2.6.12
938 # a has tag 2.6.12
940 # d has tag 2.6.13
939 # d has tag 2.6.13
941 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
940 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
942 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
941 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
943 # from the list.
942 # from the list.
944 #
943 #
945 # It is possible that more than one head will have the same branch tag.
944 # It is possible that more than one head will have the same branch tag.
946 # callers need to check the result for multiple heads under the same
945 # callers need to check the result for multiple heads under the same
947 # branch tag if that is a problem for them (ie checkout of a specific
946 # branch tag if that is a problem for them (ie checkout of a specific
948 # branch).
947 # branch).
949 #
948 #
950 # passing in a specific branch will limit the depth of the search
949 # passing in a specific branch will limit the depth of the search
951 # through the parents. It won't limit the branches returned in the
950 # through the parents. It won't limit the branches returned in the
952 # result though.
951 # result though.
953 def branchlookup(self, heads=None, branch=None):
952 def branchlookup(self, heads=None, branch=None):
954 if not heads:
953 if not heads:
955 heads = self.heads()
954 heads = self.heads()
956 headt = [ h for h in heads ]
955 headt = [ h for h in heads ]
957 chlog = self.changelog
956 chlog = self.changelog
958 branches = {}
957 branches = {}
959 merges = []
958 merges = []
960 seenmerge = {}
959 seenmerge = {}
961
960
962 # traverse the tree once for each head, recording in the branches
961 # traverse the tree once for each head, recording in the branches
963 # dict which tags are visible from this head. The branches
962 # dict which tags are visible from this head. The branches
964 # dict also records which tags are visible from each tag
963 # dict also records which tags are visible from each tag
965 # while we traverse.
964 # while we traverse.
966 while headt or merges:
965 while headt or merges:
967 if merges:
966 if merges:
968 n, found = merges.pop()
967 n, found = merges.pop()
969 visit = [n]
968 visit = [n]
970 else:
969 else:
971 h = headt.pop()
970 h = headt.pop()
972 visit = [h]
971 visit = [h]
973 found = [h]
972 found = [h]
974 seen = {}
973 seen = {}
975 while visit:
974 while visit:
976 n = visit.pop()
975 n = visit.pop()
977 if n in seen:
976 if n in seen:
978 continue
977 continue
979 pp = chlog.parents(n)
978 pp = chlog.parents(n)
980 tags = self.nodetags(n)
979 tags = self.nodetags(n)
981 if tags:
980 if tags:
982 for x in tags:
981 for x in tags:
983 if x == 'tip':
982 if x == 'tip':
984 continue
983 continue
985 for f in found:
984 for f in found:
986 branches.setdefault(f, {})[n] = 1
985 branches.setdefault(f, {})[n] = 1
987 branches.setdefault(n, {})[n] = 1
986 branches.setdefault(n, {})[n] = 1
988 break
987 break
989 if n not in found:
988 if n not in found:
990 found.append(n)
989 found.append(n)
991 if branch in tags:
990 if branch in tags:
992 continue
991 continue
993 seen[n] = 1
992 seen[n] = 1
994 if pp[1] != nullid and n not in seenmerge:
993 if pp[1] != nullid and n not in seenmerge:
995 merges.append((pp[1], [x for x in found]))
994 merges.append((pp[1], [x for x in found]))
996 seenmerge[n] = 1
995 seenmerge[n] = 1
997 if pp[0] != nullid:
996 if pp[0] != nullid:
998 visit.append(pp[0])
997 visit.append(pp[0])
999 # traverse the branches dict, eliminating branch tags from each
998 # traverse the branches dict, eliminating branch tags from each
1000 # head that are visible from another branch tag for that head.
999 # head that are visible from another branch tag for that head.
1001 out = {}
1000 out = {}
1002 viscache = {}
1001 viscache = {}
1003 for h in heads:
1002 for h in heads:
1004 def visible(node):
1003 def visible(node):
1005 if node in viscache:
1004 if node in viscache:
1006 return viscache[node]
1005 return viscache[node]
1007 ret = {}
1006 ret = {}
1008 visit = [node]
1007 visit = [node]
1009 while visit:
1008 while visit:
1010 x = visit.pop()
1009 x = visit.pop()
1011 if x in viscache:
1010 if x in viscache:
1012 ret.update(viscache[x])
1011 ret.update(viscache[x])
1013 elif x not in ret:
1012 elif x not in ret:
1014 ret[x] = 1
1013 ret[x] = 1
1015 if x in branches:
1014 if x in branches:
1016 visit[len(visit):] = branches[x].keys()
1015 visit[len(visit):] = branches[x].keys()
1017 viscache[node] = ret
1016 viscache[node] = ret
1018 return ret
1017 return ret
1019 if h not in branches:
1018 if h not in branches:
1020 continue
1019 continue
1021 # O(n^2), but somewhat limited. This only searches the
1020 # O(n^2), but somewhat limited. This only searches the
1022 # tags visible from a specific head, not all the tags in the
1021 # tags visible from a specific head, not all the tags in the
1023 # whole repo.
1022 # whole repo.
1024 for b in branches[h]:
1023 for b in branches[h]:
1025 vis = False
1024 vis = False
1026 for bb in branches[h].keys():
1025 for bb in branches[h].keys():
1027 if b != bb:
1026 if b != bb:
1028 if b in visible(bb):
1027 if b in visible(bb):
1029 vis = True
1028 vis = True
1030 break
1029 break
1031 if not vis:
1030 if not vis:
1032 l = out.setdefault(h, [])
1031 l = out.setdefault(h, [])
1033 l[len(l):] = self.nodetags(b)
1032 l[len(l):] = self.nodetags(b)
1034 return out
1033 return out
1035
1034
1036 def branches(self, nodes):
1035 def branches(self, nodes):
1037 if not nodes:
1036 if not nodes:
1038 nodes = [self.changelog.tip()]
1037 nodes = [self.changelog.tip()]
1039 b = []
1038 b = []
1040 for n in nodes:
1039 for n in nodes:
1041 t = n
1040 t = n
1042 while 1:
1041 while 1:
1043 p = self.changelog.parents(n)
1042 p = self.changelog.parents(n)
1044 if p[1] != nullid or p[0] == nullid:
1043 if p[1] != nullid or p[0] == nullid:
1045 b.append((t, n, p[0], p[1]))
1044 b.append((t, n, p[0], p[1]))
1046 break
1045 break
1047 n = p[0]
1046 n = p[0]
1048 return b
1047 return b
1049
1048
1050 def between(self, pairs):
1049 def between(self, pairs):
1051 r = []
1050 r = []
1052
1051
1053 for top, bottom in pairs:
1052 for top, bottom in pairs:
1054 n, l, i = top, [], 0
1053 n, l, i = top, [], 0
1055 f = 1
1054 f = 1
1056
1055
1057 while n != bottom:
1056 while n != bottom:
1058 p = self.changelog.parents(n)[0]
1057 p = self.changelog.parents(n)[0]
1059 if i == f:
1058 if i == f:
1060 l.append(n)
1059 l.append(n)
1061 f = f * 2
1060 f = f * 2
1062 n = p
1061 n = p
1063 i += 1
1062 i += 1
1064
1063
1065 r.append(l)
1064 r.append(l)
1066
1065
1067 return r
1066 return r
1068
1067
1069 def findincoming(self, remote, base=None, heads=None, force=False):
1068 def findincoming(self, remote, base=None, heads=None, force=False):
1070 """Return list of roots of the subsets of missing nodes from remote
1069 """Return list of roots of the subsets of missing nodes from remote
1071
1070
1072 If base dict is specified, assume that these nodes and their parents
1071 If base dict is specified, assume that these nodes and their parents
1073 exist on the remote side and that no child of a node of base exists
1072 exist on the remote side and that no child of a node of base exists
1074 in both remote and self.
1073 in both remote and self.
1075 Furthermore base will be updated to include the nodes that exists
1074 Furthermore base will be updated to include the nodes that exists
1076 in self and remote but no children exists in self and remote.
1075 in self and remote but no children exists in self and remote.
1077 If a list of heads is specified, return only nodes which are heads
1076 If a list of heads is specified, return only nodes which are heads
1078 or ancestors of these heads.
1077 or ancestors of these heads.
1079
1078
1080 All the ancestors of base are in self and in remote.
1079 All the ancestors of base are in self and in remote.
1081 All the descendants of the list returned are missing in self.
1080 All the descendants of the list returned are missing in self.
1082 (and so we know that the rest of the nodes are missing in remote, see
1081 (and so we know that the rest of the nodes are missing in remote, see
1083 outgoing)
1082 outgoing)
1084 """
1083 """
1085 m = self.changelog.nodemap
1084 m = self.changelog.nodemap
1086 search = []
1085 search = []
1087 fetch = {}
1086 fetch = {}
1088 seen = {}
1087 seen = {}
1089 seenbranch = {}
1088 seenbranch = {}
1090 if base == None:
1089 if base == None:
1091 base = {}
1090 base = {}
1092
1091
1093 if not heads:
1092 if not heads:
1094 heads = remote.heads()
1093 heads = remote.heads()
1095
1094
1096 if self.changelog.tip() == nullid:
1095 if self.changelog.tip() == nullid:
1097 base[nullid] = 1
1096 base[nullid] = 1
1098 if heads != [nullid]:
1097 if heads != [nullid]:
1099 return [nullid]
1098 return [nullid]
1100 return []
1099 return []
1101
1100
1102 # assume we're closer to the tip than the root
1101 # assume we're closer to the tip than the root
1103 # and start by examining the heads
1102 # and start by examining the heads
1104 self.ui.status(_("searching for changes\n"))
1103 self.ui.status(_("searching for changes\n"))
1105
1104
1106 unknown = []
1105 unknown = []
1107 for h in heads:
1106 for h in heads:
1108 if h not in m:
1107 if h not in m:
1109 unknown.append(h)
1108 unknown.append(h)
1110 else:
1109 else:
1111 base[h] = 1
1110 base[h] = 1
1112
1111
1113 if not unknown:
1112 if not unknown:
1114 return []
1113 return []
1115
1114
1116 req = dict.fromkeys(unknown)
1115 req = dict.fromkeys(unknown)
1117 reqcnt = 0
1116 reqcnt = 0
1118
1117
1119 # search through remote branches
1118 # search through remote branches
1120 # a 'branch' here is a linear segment of history, with four parts:
1119 # a 'branch' here is a linear segment of history, with four parts:
1121 # head, root, first parent, second parent
1120 # head, root, first parent, second parent
1122 # (a branch always has two parents (or none) by definition)
1121 # (a branch always has two parents (or none) by definition)
1123 unknown = remote.branches(unknown)
1122 unknown = remote.branches(unknown)
1124 while unknown:
1123 while unknown:
1125 r = []
1124 r = []
1126 while unknown:
1125 while unknown:
1127 n = unknown.pop(0)
1126 n = unknown.pop(0)
1128 if n[0] in seen:
1127 if n[0] in seen:
1129 continue
1128 continue
1130
1129
1131 self.ui.debug(_("examining %s:%s\n")
1130 self.ui.debug(_("examining %s:%s\n")
1132 % (short(n[0]), short(n[1])))
1131 % (short(n[0]), short(n[1])))
1133 if n[0] == nullid: # found the end of the branch
1132 if n[0] == nullid: # found the end of the branch
1134 pass
1133 pass
1135 elif n in seenbranch:
1134 elif n in seenbranch:
1136 self.ui.debug(_("branch already found\n"))
1135 self.ui.debug(_("branch already found\n"))
1137 continue
1136 continue
1138 elif n[1] and n[1] in m: # do we know the base?
1137 elif n[1] and n[1] in m: # do we know the base?
1139 self.ui.debug(_("found incomplete branch %s:%s\n")
1138 self.ui.debug(_("found incomplete branch %s:%s\n")
1140 % (short(n[0]), short(n[1])))
1139 % (short(n[0]), short(n[1])))
1141 search.append(n) # schedule branch range for scanning
1140 search.append(n) # schedule branch range for scanning
1142 seenbranch[n] = 1
1141 seenbranch[n] = 1
1143 else:
1142 else:
1144 if n[1] not in seen and n[1] not in fetch:
1143 if n[1] not in seen and n[1] not in fetch:
1145 if n[2] in m and n[3] in m:
1144 if n[2] in m and n[3] in m:
1146 self.ui.debug(_("found new changeset %s\n") %
1145 self.ui.debug(_("found new changeset %s\n") %
1147 short(n[1]))
1146 short(n[1]))
1148 fetch[n[1]] = 1 # earliest unknown
1147 fetch[n[1]] = 1 # earliest unknown
1149 for p in n[2:4]:
1148 for p in n[2:4]:
1150 if p in m:
1149 if p in m:
1151 base[p] = 1 # latest known
1150 base[p] = 1 # latest known
1152
1151
1153 for p in n[2:4]:
1152 for p in n[2:4]:
1154 if p not in req and p not in m:
1153 if p not in req and p not in m:
1155 r.append(p)
1154 r.append(p)
1156 req[p] = 1
1155 req[p] = 1
1157 seen[n[0]] = 1
1156 seen[n[0]] = 1
1158
1157
1159 if r:
1158 if r:
1160 reqcnt += 1
1159 reqcnt += 1
1161 self.ui.debug(_("request %d: %s\n") %
1160 self.ui.debug(_("request %d: %s\n") %
1162 (reqcnt, " ".join(map(short, r))))
1161 (reqcnt, " ".join(map(short, r))))
1163 for p in xrange(0, len(r), 10):
1162 for p in xrange(0, len(r), 10):
1164 for b in remote.branches(r[p:p+10]):
1163 for b in remote.branches(r[p:p+10]):
1165 self.ui.debug(_("received %s:%s\n") %
1164 self.ui.debug(_("received %s:%s\n") %
1166 (short(b[0]), short(b[1])))
1165 (short(b[0]), short(b[1])))
1167 unknown.append(b)
1166 unknown.append(b)
1168
1167
1169 # do binary search on the branches we found
1168 # do binary search on the branches we found
1170 while search:
1169 while search:
1171 n = search.pop(0)
1170 n = search.pop(0)
1172 reqcnt += 1
1171 reqcnt += 1
1173 l = remote.between([(n[0], n[1])])[0]
1172 l = remote.between([(n[0], n[1])])[0]
1174 l.append(n[1])
1173 l.append(n[1])
1175 p = n[0]
1174 p = n[0]
1176 f = 1
1175 f = 1
1177 for i in l:
1176 for i in l:
1178 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1177 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1179 if i in m:
1178 if i in m:
1180 if f <= 2:
1179 if f <= 2:
1181 self.ui.debug(_("found new branch changeset %s\n") %
1180 self.ui.debug(_("found new branch changeset %s\n") %
1182 short(p))
1181 short(p))
1183 fetch[p] = 1
1182 fetch[p] = 1
1184 base[i] = 1
1183 base[i] = 1
1185 else:
1184 else:
1186 self.ui.debug(_("narrowed branch search to %s:%s\n")
1185 self.ui.debug(_("narrowed branch search to %s:%s\n")
1187 % (short(p), short(i)))
1186 % (short(p), short(i)))
1188 search.append((p, i))
1187 search.append((p, i))
1189 break
1188 break
1190 p, f = i, f * 2
1189 p, f = i, f * 2
1191
1190
1192 # sanity check our fetch list
1191 # sanity check our fetch list
1193 for f in fetch.keys():
1192 for f in fetch.keys():
1194 if f in m:
1193 if f in m:
1195 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1194 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1196
1195
1197 if base.keys() == [nullid]:
1196 if base.keys() == [nullid]:
1198 if force:
1197 if force:
1199 self.ui.warn(_("warning: repository is unrelated\n"))
1198 self.ui.warn(_("warning: repository is unrelated\n"))
1200 else:
1199 else:
1201 raise util.Abort(_("repository is unrelated"))
1200 raise util.Abort(_("repository is unrelated"))
1202
1201
1203 self.ui.debug(_("found new changesets starting at ") +
1202 self.ui.debug(_("found new changesets starting at ") +
1204 " ".join([short(f) for f in fetch]) + "\n")
1203 " ".join([short(f) for f in fetch]) + "\n")
1205
1204
1206 self.ui.debug(_("%d total queries\n") % reqcnt)
1205 self.ui.debug(_("%d total queries\n") % reqcnt)
1207
1206
1208 return fetch.keys()
1207 return fetch.keys()
1209
1208
1210 def findoutgoing(self, remote, base=None, heads=None, force=False):
1209 def findoutgoing(self, remote, base=None, heads=None, force=False):
1211 """Return list of nodes that are roots of subsets not in remote
1210 """Return list of nodes that are roots of subsets not in remote
1212
1211
1213 If base dict is specified, assume that these nodes and their parents
1212 If base dict is specified, assume that these nodes and their parents
1214 exist on the remote side.
1213 exist on the remote side.
1215 If a list of heads is specified, return only nodes which are heads
1214 If a list of heads is specified, return only nodes which are heads
1216 or ancestors of these heads, and return a second element which
1215 or ancestors of these heads, and return a second element which
1217 contains all remote heads which get new children.
1216 contains all remote heads which get new children.
1218 """
1217 """
1219 if base == None:
1218 if base == None:
1220 base = {}
1219 base = {}
1221 self.findincoming(remote, base, heads, force=force)
1220 self.findincoming(remote, base, heads, force=force)
1222
1221
1223 self.ui.debug(_("common changesets up to ")
1222 self.ui.debug(_("common changesets up to ")
1224 + " ".join(map(short, base.keys())) + "\n")
1223 + " ".join(map(short, base.keys())) + "\n")
1225
1224
1226 remain = dict.fromkeys(self.changelog.nodemap)
1225 remain = dict.fromkeys(self.changelog.nodemap)
1227
1226
1228 # prune everything remote has from the tree
1227 # prune everything remote has from the tree
1229 del remain[nullid]
1228 del remain[nullid]
1230 remove = base.keys()
1229 remove = base.keys()
1231 while remove:
1230 while remove:
1232 n = remove.pop(0)
1231 n = remove.pop(0)
1233 if n in remain:
1232 if n in remain:
1234 del remain[n]
1233 del remain[n]
1235 for p in self.changelog.parents(n):
1234 for p in self.changelog.parents(n):
1236 remove.append(p)
1235 remove.append(p)
1237
1236
1238 # find every node whose parents have been pruned
1237 # find every node whose parents have been pruned
1239 subset = []
1238 subset = []
1240 # find every remote head that will get new children
1239 # find every remote head that will get new children
1241 updated_heads = {}
1240 updated_heads = {}
1242 for n in remain:
1241 for n in remain:
1243 p1, p2 = self.changelog.parents(n)
1242 p1, p2 = self.changelog.parents(n)
1244 if p1 not in remain and p2 not in remain:
1243 if p1 not in remain and p2 not in remain:
1245 subset.append(n)
1244 subset.append(n)
1246 if heads:
1245 if heads:
1247 if p1 in heads:
1246 if p1 in heads:
1248 updated_heads[p1] = True
1247 updated_heads[p1] = True
1249 if p2 in heads:
1248 if p2 in heads:
1250 updated_heads[p2] = True
1249 updated_heads[p2] = True
1251
1250
1252 # this is the set of all roots we have to push
1251 # this is the set of all roots we have to push
1253 if heads:
1252 if heads:
1254 return subset, updated_heads.keys()
1253 return subset, updated_heads.keys()
1255 else:
1254 else:
1256 return subset
1255 return subset
1257
1256
1258 def pull(self, remote, heads=None, force=False, lock=None):
1257 def pull(self, remote, heads=None, force=False, lock=None):
1259 mylock = False
1258 mylock = False
1260 if not lock:
1259 if not lock:
1261 lock = self.lock()
1260 lock = self.lock()
1262 mylock = True
1261 mylock = True
1263
1262
1264 try:
1263 try:
1265 fetch = self.findincoming(remote, force=force)
1264 fetch = self.findincoming(remote, force=force)
1266 if fetch == [nullid]:
1265 if fetch == [nullid]:
1267 self.ui.status(_("requesting all changes\n"))
1266 self.ui.status(_("requesting all changes\n"))
1268
1267
1269 if not fetch:
1268 if not fetch:
1270 self.ui.status(_("no changes found\n"))
1269 self.ui.status(_("no changes found\n"))
1271 return 0
1270 return 0
1272
1271
1273 if heads is None:
1272 if heads is None:
1274 cg = remote.changegroup(fetch, 'pull')
1273 cg = remote.changegroup(fetch, 'pull')
1275 else:
1274 else:
1276 if 'changegroupsubset' not in remote.capabilities:
1275 if 'changegroupsubset' not in remote.capabilities:
1277 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1276 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1277 cg = remote.changegroupsubset(fetch, heads, 'pull')
1279 return self.addchangegroup(cg, 'pull', remote.url())
1278 return self.addchangegroup(cg, 'pull', remote.url())
1280 finally:
1279 finally:
1281 if mylock:
1280 if mylock:
1282 lock.release()
1281 lock.release()
1283
1282
1284 def push(self, remote, force=False, revs=None):
1283 def push(self, remote, force=False, revs=None):
1285 # there are two ways to push to remote repo:
1284 # there are two ways to push to remote repo:
1286 #
1285 #
1287 # addchangegroup assumes local user can lock remote
1286 # addchangegroup assumes local user can lock remote
1288 # repo (local filesystem, old ssh servers).
1287 # repo (local filesystem, old ssh servers).
1289 #
1288 #
1290 # unbundle assumes local user cannot lock remote repo (new ssh
1289 # unbundle assumes local user cannot lock remote repo (new ssh
1291 # servers, http servers).
1290 # servers, http servers).
1292
1291
1293 if remote.capable('unbundle'):
1292 if remote.capable('unbundle'):
1294 return self.push_unbundle(remote, force, revs)
1293 return self.push_unbundle(remote, force, revs)
1295 return self.push_addchangegroup(remote, force, revs)
1294 return self.push_addchangegroup(remote, force, revs)
1296
1295
1297 def prepush(self, remote, force, revs):
1296 def prepush(self, remote, force, revs):
1298 base = {}
1297 base = {}
1299 remote_heads = remote.heads()
1298 remote_heads = remote.heads()
1300 inc = self.findincoming(remote, base, remote_heads, force=force)
1299 inc = self.findincoming(remote, base, remote_heads, force=force)
1301
1300
1302 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1301 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1303 if revs is not None:
1302 if revs is not None:
1304 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1303 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1305 else:
1304 else:
1306 bases, heads = update, self.changelog.heads()
1305 bases, heads = update, self.changelog.heads()
1307
1306
1308 if not bases:
1307 if not bases:
1309 self.ui.status(_("no changes found\n"))
1308 self.ui.status(_("no changes found\n"))
1310 return None, 1
1309 return None, 1
1311 elif not force:
1310 elif not force:
1312 # check if we're creating new remote heads
1311 # check if we're creating new remote heads
1313 # to be a remote head after push, node must be either
1312 # to be a remote head after push, node must be either
1314 # - unknown locally
1313 # - unknown locally
1315 # - a local outgoing head descended from update
1314 # - a local outgoing head descended from update
1316 # - a remote head that's known locally and not
1315 # - a remote head that's known locally and not
1317 # ancestral to an outgoing head
1316 # ancestral to an outgoing head
1318
1317
1319 warn = 0
1318 warn = 0
1320
1319
1321 if remote_heads == [nullid]:
1320 if remote_heads == [nullid]:
1322 warn = 0
1321 warn = 0
1323 elif not revs and len(heads) > len(remote_heads):
1322 elif not revs and len(heads) > len(remote_heads):
1324 warn = 1
1323 warn = 1
1325 else:
1324 else:
1326 newheads = list(heads)
1325 newheads = list(heads)
1327 for r in remote_heads:
1326 for r in remote_heads:
1328 if r in self.changelog.nodemap:
1327 if r in self.changelog.nodemap:
1329 desc = self.changelog.heads(r)
1328 desc = self.changelog.heads(r)
1330 l = [h for h in heads if h in desc]
1329 l = [h for h in heads if h in desc]
1331 if not l:
1330 if not l:
1332 newheads.append(r)
1331 newheads.append(r)
1333 else:
1332 else:
1334 newheads.append(r)
1333 newheads.append(r)
1335 if len(newheads) > len(remote_heads):
1334 if len(newheads) > len(remote_heads):
1336 warn = 1
1335 warn = 1
1337
1336
1338 if warn:
1337 if warn:
1339 self.ui.warn(_("abort: push creates new remote branches!\n"))
1338 self.ui.warn(_("abort: push creates new remote branches!\n"))
1340 self.ui.status(_("(did you forget to merge?"
1339 self.ui.status(_("(did you forget to merge?"
1341 " use push -f to force)\n"))
1340 " use push -f to force)\n"))
1342 return None, 1
1341 return None, 1
1343 elif inc:
1342 elif inc:
1344 self.ui.warn(_("note: unsynced remote changes!\n"))
1343 self.ui.warn(_("note: unsynced remote changes!\n"))
1345
1344
1346
1345
1347 if revs is None:
1346 if revs is None:
1348 cg = self.changegroup(update, 'push')
1347 cg = self.changegroup(update, 'push')
1349 else:
1348 else:
1350 cg = self.changegroupsubset(update, revs, 'push')
1349 cg = self.changegroupsubset(update, revs, 'push')
1351 return cg, remote_heads
1350 return cg, remote_heads
1352
1351
1353 def push_addchangegroup(self, remote, force, revs):
1352 def push_addchangegroup(self, remote, force, revs):
1354 lock = remote.lock()
1353 lock = remote.lock()
1355
1354
1356 ret = self.prepush(remote, force, revs)
1355 ret = self.prepush(remote, force, revs)
1357 if ret[0] is not None:
1356 if ret[0] is not None:
1358 cg, remote_heads = ret
1357 cg, remote_heads = ret
1359 return remote.addchangegroup(cg, 'push', self.url())
1358 return remote.addchangegroup(cg, 'push', self.url())
1360 return ret[1]
1359 return ret[1]
1361
1360
1362 def push_unbundle(self, remote, force, revs):
1361 def push_unbundle(self, remote, force, revs):
1363 # local repo finds heads on server, finds out what revs it
1362 # local repo finds heads on server, finds out what revs it
1364 # must push. once revs transferred, if server finds it has
1363 # must push. once revs transferred, if server finds it has
1365 # different heads (someone else won commit/push race), server
1364 # different heads (someone else won commit/push race), server
1366 # aborts.
1365 # aborts.
1367
1366
1368 ret = self.prepush(remote, force, revs)
1367 ret = self.prepush(remote, force, revs)
1369 if ret[0] is not None:
1368 if ret[0] is not None:
1370 cg, remote_heads = ret
1369 cg, remote_heads = ret
1371 if force: remote_heads = ['force']
1370 if force: remote_heads = ['force']
1372 return remote.unbundle(cg, remote_heads, 'push')
1371 return remote.unbundle(cg, remote_heads, 'push')
1373 return ret[1]
1372 return ret[1]
1374
1373
1375 def changegroupinfo(self, nodes):
1374 def changegroupinfo(self, nodes):
1376 self.ui.note(_("%d changesets found\n") % len(nodes))
1375 self.ui.note(_("%d changesets found\n") % len(nodes))
1377 if self.ui.debugflag:
1376 if self.ui.debugflag:
1378 self.ui.debug(_("List of changesets:\n"))
1377 self.ui.debug(_("List of changesets:\n"))
1379 for node in nodes:
1378 for node in nodes:
1380 self.ui.debug("%s\n" % hex(node))
1379 self.ui.debug("%s\n" % hex(node))
1381
1380
1382 def changegroupsubset(self, bases, heads, source):
1381 def changegroupsubset(self, bases, heads, source):
1383 """This function generates a changegroup consisting of all the nodes
1382 """This function generates a changegroup consisting of all the nodes
1384 that are descendents of any of the bases, and ancestors of any of
1383 that are descendents of any of the bases, and ancestors of any of
1385 the heads.
1384 the heads.
1386
1385
1387 It is fairly complex as determining which filenodes and which
1386 It is fairly complex as determining which filenodes and which
1388 manifest nodes need to be included for the changeset to be complete
1387 manifest nodes need to be included for the changeset to be complete
1389 is non-trivial.
1388 is non-trivial.
1390
1389
1391 Another wrinkle is doing the reverse, figuring out which changeset in
1390 Another wrinkle is doing the reverse, figuring out which changeset in
1392 the changegroup a particular filenode or manifestnode belongs to."""
1391 the changegroup a particular filenode or manifestnode belongs to."""
1393
1392
1394 self.hook('preoutgoing', throw=True, source=source)
1393 self.hook('preoutgoing', throw=True, source=source)
1395
1394
1396 # Set up some initial variables
1395 # Set up some initial variables
1397 # Make it easy to refer to self.changelog
1396 # Make it easy to refer to self.changelog
1398 cl = self.changelog
1397 cl = self.changelog
1399 # msng is short for missing - compute the list of changesets in this
1398 # msng is short for missing - compute the list of changesets in this
1400 # changegroup.
1399 # changegroup.
1401 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1400 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1402 self.changegroupinfo(msng_cl_lst)
1401 self.changegroupinfo(msng_cl_lst)
1403 # Some bases may turn out to be superfluous, and some heads may be
1402 # Some bases may turn out to be superfluous, and some heads may be
1404 # too. nodesbetween will return the minimal set of bases and heads
1403 # too. nodesbetween will return the minimal set of bases and heads
1405 # necessary to re-create the changegroup.
1404 # necessary to re-create the changegroup.
1406
1405
1407 # Known heads are the list of heads that it is assumed the recipient
1406 # Known heads are the list of heads that it is assumed the recipient
1408 # of this changegroup will know about.
1407 # of this changegroup will know about.
1409 knownheads = {}
1408 knownheads = {}
1410 # We assume that all parents of bases are known heads.
1409 # We assume that all parents of bases are known heads.
1411 for n in bases:
1410 for n in bases:
1412 for p in cl.parents(n):
1411 for p in cl.parents(n):
1413 if p != nullid:
1412 if p != nullid:
1414 knownheads[p] = 1
1413 knownheads[p] = 1
1415 knownheads = knownheads.keys()
1414 knownheads = knownheads.keys()
1416 if knownheads:
1415 if knownheads:
1417 # Now that we know what heads are known, we can compute which
1416 # Now that we know what heads are known, we can compute which
1418 # changesets are known. The recipient must know about all
1417 # changesets are known. The recipient must know about all
1419 # changesets required to reach the known heads from the null
1418 # changesets required to reach the known heads from the null
1420 # changeset.
1419 # changeset.
1421 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1420 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1422 junk = None
1421 junk = None
1423 # Transform the list into an ersatz set.
1422 # Transform the list into an ersatz set.
1424 has_cl_set = dict.fromkeys(has_cl_set)
1423 has_cl_set = dict.fromkeys(has_cl_set)
1425 else:
1424 else:
1426 # If there were no known heads, the recipient cannot be assumed to
1425 # If there were no known heads, the recipient cannot be assumed to
1427 # know about any changesets.
1426 # know about any changesets.
1428 has_cl_set = {}
1427 has_cl_set = {}
1429
1428
1430 # Make it easy to refer to self.manifest
1429 # Make it easy to refer to self.manifest
1431 mnfst = self.manifest
1430 mnfst = self.manifest
1432 # We don't know which manifests are missing yet
1431 # We don't know which manifests are missing yet
1433 msng_mnfst_set = {}
1432 msng_mnfst_set = {}
1434 # Nor do we know which filenodes are missing.
1433 # Nor do we know which filenodes are missing.
1435 msng_filenode_set = {}
1434 msng_filenode_set = {}
1436
1435
1437 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1436 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1438 junk = None
1437 junk = None
1439
1438
1440 # A changeset always belongs to itself, so the changenode lookup
1439 # A changeset always belongs to itself, so the changenode lookup
1441 # function for a changenode is identity.
1440 # function for a changenode is identity.
1442 def identity(x):
1441 def identity(x):
1443 return x
1442 return x
1444
1443
1445 # A function generating function. Sets up an environment for the
1444 # A function generating function. Sets up an environment for the
1446 # inner function.
1445 # inner function.
1447 def cmp_by_rev_func(revlog):
1446 def cmp_by_rev_func(revlog):
1448 # Compare two nodes by their revision number in the environment's
1447 # Compare two nodes by their revision number in the environment's
1449 # revision history. Since the revision number both represents the
1448 # revision history. Since the revision number both represents the
1450 # most efficient order to read the nodes in, and represents a
1449 # most efficient order to read the nodes in, and represents a
1451 # topological sorting of the nodes, this function is often useful.
1450 # topological sorting of the nodes, this function is often useful.
1452 def cmp_by_rev(a, b):
1451 def cmp_by_rev(a, b):
1453 return cmp(revlog.rev(a), revlog.rev(b))
1452 return cmp(revlog.rev(a), revlog.rev(b))
1454 return cmp_by_rev
1453 return cmp_by_rev
1455
1454
1456 # If we determine that a particular file or manifest node must be a
1455 # If we determine that a particular file or manifest node must be a
1457 # node that the recipient of the changegroup will already have, we can
1456 # node that the recipient of the changegroup will already have, we can
1458 # also assume the recipient will have all the parents. This function
1457 # also assume the recipient will have all the parents. This function
1459 # prunes them from the set of missing nodes.
1458 # prunes them from the set of missing nodes.
1460 def prune_parents(revlog, hasset, msngset):
1459 def prune_parents(revlog, hasset, msngset):
1461 haslst = hasset.keys()
1460 haslst = hasset.keys()
1462 haslst.sort(cmp_by_rev_func(revlog))
1461 haslst.sort(cmp_by_rev_func(revlog))
1463 for node in haslst:
1462 for node in haslst:
1464 parentlst = [p for p in revlog.parents(node) if p != nullid]
1463 parentlst = [p for p in revlog.parents(node) if p != nullid]
1465 while parentlst:
1464 while parentlst:
1466 n = parentlst.pop()
1465 n = parentlst.pop()
1467 if n not in hasset:
1466 if n not in hasset:
1468 hasset[n] = 1
1467 hasset[n] = 1
1469 p = [p for p in revlog.parents(n) if p != nullid]
1468 p = [p for p in revlog.parents(n) if p != nullid]
1470 parentlst.extend(p)
1469 parentlst.extend(p)
1471 for n in hasset:
1470 for n in hasset:
1472 msngset.pop(n, None)
1471 msngset.pop(n, None)
1473
1472
1474 # This is a function generating function used to set up an environment
1473 # This is a function generating function used to set up an environment
1475 # for the inner function to execute in.
1474 # for the inner function to execute in.
1476 def manifest_and_file_collector(changedfileset):
1475 def manifest_and_file_collector(changedfileset):
1477 # This is an information gathering function that gathers
1476 # This is an information gathering function that gathers
1478 # information from each changeset node that goes out as part of
1477 # information from each changeset node that goes out as part of
1479 # the changegroup. The information gathered is a list of which
1478 # the changegroup. The information gathered is a list of which
1480 # manifest nodes are potentially required (the recipient may
1479 # manifest nodes are potentially required (the recipient may
1481 # already have them) and total list of all files which were
1480 # already have them) and total list of all files which were
1482 # changed in any changeset in the changegroup.
1481 # changed in any changeset in the changegroup.
1483 #
1482 #
1484 # We also remember the first changenode we saw any manifest
1483 # We also remember the first changenode we saw any manifest
1485 # referenced by so we can later determine which changenode 'owns'
1484 # referenced by so we can later determine which changenode 'owns'
1486 # the manifest.
1485 # the manifest.
1487 def collect_manifests_and_files(clnode):
1486 def collect_manifests_and_files(clnode):
1488 c = cl.read(clnode)
1487 c = cl.read(clnode)
1489 for f in c[3]:
1488 for f in c[3]:
1490 # This is to make sure we only have one instance of each
1489 # This is to make sure we only have one instance of each
1491 # filename string for each filename.
1490 # filename string for each filename.
1492 changedfileset.setdefault(f, f)
1491 changedfileset.setdefault(f, f)
1493 msng_mnfst_set.setdefault(c[0], clnode)
1492 msng_mnfst_set.setdefault(c[0], clnode)
1494 return collect_manifests_and_files
1493 return collect_manifests_and_files
1495
1494
1496 # Figure out which manifest nodes (of the ones we think might be part
1495 # Figure out which manifest nodes (of the ones we think might be part
1497 # of the changegroup) the recipient must know about and remove them
1496 # of the changegroup) the recipient must know about and remove them
1498 # from the changegroup.
1497 # from the changegroup.
1499 def prune_manifests():
1498 def prune_manifests():
1500 has_mnfst_set = {}
1499 has_mnfst_set = {}
1501 for n in msng_mnfst_set:
1500 for n in msng_mnfst_set:
1502 # If a 'missing' manifest thinks it belongs to a changenode
1501 # If a 'missing' manifest thinks it belongs to a changenode
1503 # the recipient is assumed to have, obviously the recipient
1502 # the recipient is assumed to have, obviously the recipient
1504 # must have that manifest.
1503 # must have that manifest.
1505 linknode = cl.node(mnfst.linkrev(n))
1504 linknode = cl.node(mnfst.linkrev(n))
1506 if linknode in has_cl_set:
1505 if linknode in has_cl_set:
1507 has_mnfst_set[n] = 1
1506 has_mnfst_set[n] = 1
1508 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1507 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1509
1508
1510 # Use the information collected in collect_manifests_and_files to say
1509 # Use the information collected in collect_manifests_and_files to say
1511 # which changenode any manifestnode belongs to.
1510 # which changenode any manifestnode belongs to.
1512 def lookup_manifest_link(mnfstnode):
1511 def lookup_manifest_link(mnfstnode):
1513 return msng_mnfst_set[mnfstnode]
1512 return msng_mnfst_set[mnfstnode]
1514
1513
1515 # A function generating function that sets up the initial environment
1514 # A function generating function that sets up the initial environment
1516 # the inner function.
1515 # the inner function.
1517 def filenode_collector(changedfiles):
1516 def filenode_collector(changedfiles):
1518 next_rev = [0]
1517 next_rev = [0]
1519 # This gathers information from each manifestnode included in the
1518 # This gathers information from each manifestnode included in the
1520 # changegroup about which filenodes the manifest node references
1519 # changegroup about which filenodes the manifest node references
1521 # so we can include those in the changegroup too.
1520 # so we can include those in the changegroup too.
1522 #
1521 #
1523 # It also remembers which changenode each filenode belongs to. It
1522 # It also remembers which changenode each filenode belongs to. It
1524 # does this by assuming the a filenode belongs to the changenode
1523 # does this by assuming the a filenode belongs to the changenode
1525 # the first manifest that references it belongs to.
1524 # the first manifest that references it belongs to.
1526 def collect_msng_filenodes(mnfstnode):
1525 def collect_msng_filenodes(mnfstnode):
1527 r = mnfst.rev(mnfstnode)
1526 r = mnfst.rev(mnfstnode)
1528 if r == next_rev[0]:
1527 if r == next_rev[0]:
1529 # If the last rev we looked at was the one just previous,
1528 # If the last rev we looked at was the one just previous,
1530 # we only need to see a diff.
1529 # we only need to see a diff.
1531 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1530 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1532 # For each line in the delta
1531 # For each line in the delta
1533 for dline in delta.splitlines():
1532 for dline in delta.splitlines():
1534 # get the filename and filenode for that line
1533 # get the filename and filenode for that line
1535 f, fnode = dline.split('\0')
1534 f, fnode = dline.split('\0')
1536 fnode = bin(fnode[:40])
1535 fnode = bin(fnode[:40])
1537 f = changedfiles.get(f, None)
1536 f = changedfiles.get(f, None)
1538 # And if the file is in the list of files we care
1537 # And if the file is in the list of files we care
1539 # about.
1538 # about.
1540 if f is not None:
1539 if f is not None:
1541 # Get the changenode this manifest belongs to
1540 # Get the changenode this manifest belongs to
1542 clnode = msng_mnfst_set[mnfstnode]
1541 clnode = msng_mnfst_set[mnfstnode]
1543 # Create the set of filenodes for the file if
1542 # Create the set of filenodes for the file if
1544 # there isn't one already.
1543 # there isn't one already.
1545 ndset = msng_filenode_set.setdefault(f, {})
1544 ndset = msng_filenode_set.setdefault(f, {})
1546 # And set the filenode's changelog node to the
1545 # And set the filenode's changelog node to the
1547 # manifest's if it hasn't been set already.
1546 # manifest's if it hasn't been set already.
1548 ndset.setdefault(fnode, clnode)
1547 ndset.setdefault(fnode, clnode)
1549 else:
1548 else:
1550 # Otherwise we need a full manifest.
1549 # Otherwise we need a full manifest.
1551 m = mnfst.read(mnfstnode)
1550 m = mnfst.read(mnfstnode)
1552 # For every file in we care about.
1551 # For every file in we care about.
1553 for f in changedfiles:
1552 for f in changedfiles:
1554 fnode = m.get(f, None)
1553 fnode = m.get(f, None)
1555 # If it's in the manifest
1554 # If it's in the manifest
1556 if fnode is not None:
1555 if fnode is not None:
1557 # See comments above.
1556 # See comments above.
1558 clnode = msng_mnfst_set[mnfstnode]
1557 clnode = msng_mnfst_set[mnfstnode]
1559 ndset = msng_filenode_set.setdefault(f, {})
1558 ndset = msng_filenode_set.setdefault(f, {})
1560 ndset.setdefault(fnode, clnode)
1559 ndset.setdefault(fnode, clnode)
1561 # Remember the revision we hope to see next.
1560 # Remember the revision we hope to see next.
1562 next_rev[0] = r + 1
1561 next_rev[0] = r + 1
1563 return collect_msng_filenodes
1562 return collect_msng_filenodes
1564
1563
1565 # We have a list of filenodes we think we need for a file, lets remove
1564 # We have a list of filenodes we think we need for a file, lets remove
1566 # all those we now the recipient must have.
1565 # all those we now the recipient must have.
1567 def prune_filenodes(f, filerevlog):
1566 def prune_filenodes(f, filerevlog):
1568 msngset = msng_filenode_set[f]
1567 msngset = msng_filenode_set[f]
1569 hasset = {}
1568 hasset = {}
1570 # If a 'missing' filenode thinks it belongs to a changenode we
1569 # If a 'missing' filenode thinks it belongs to a changenode we
1571 # assume the recipient must have, then the recipient must have
1570 # assume the recipient must have, then the recipient must have
1572 # that filenode.
1571 # that filenode.
1573 for n in msngset:
1572 for n in msngset:
1574 clnode = cl.node(filerevlog.linkrev(n))
1573 clnode = cl.node(filerevlog.linkrev(n))
1575 if clnode in has_cl_set:
1574 if clnode in has_cl_set:
1576 hasset[n] = 1
1575 hasset[n] = 1
1577 prune_parents(filerevlog, hasset, msngset)
1576 prune_parents(filerevlog, hasset, msngset)
1578
1577
1579 # A function generator function that sets up the a context for the
1578 # A function generator function that sets up the a context for the
1580 # inner function.
1579 # inner function.
1581 def lookup_filenode_link_func(fname):
1580 def lookup_filenode_link_func(fname):
1582 msngset = msng_filenode_set[fname]
1581 msngset = msng_filenode_set[fname]
1583 # Lookup the changenode the filenode belongs to.
1582 # Lookup the changenode the filenode belongs to.
1584 def lookup_filenode_link(fnode):
1583 def lookup_filenode_link(fnode):
1585 return msngset[fnode]
1584 return msngset[fnode]
1586 return lookup_filenode_link
1585 return lookup_filenode_link
1587
1586
1588 # Now that we have all theses utility functions to help out and
1587 # Now that we have all theses utility functions to help out and
1589 # logically divide up the task, generate the group.
1588 # logically divide up the task, generate the group.
1590 def gengroup():
1589 def gengroup():
1591 # The set of changed files starts empty.
1590 # The set of changed files starts empty.
1592 changedfiles = {}
1591 changedfiles = {}
1593 # Create a changenode group generator that will call our functions
1592 # Create a changenode group generator that will call our functions
1594 # back to lookup the owning changenode and collect information.
1593 # back to lookup the owning changenode and collect information.
1595 group = cl.group(msng_cl_lst, identity,
1594 group = cl.group(msng_cl_lst, identity,
1596 manifest_and_file_collector(changedfiles))
1595 manifest_and_file_collector(changedfiles))
1597 for chnk in group:
1596 for chnk in group:
1598 yield chnk
1597 yield chnk
1599
1598
1600 # The list of manifests has been collected by the generator
1599 # The list of manifests has been collected by the generator
1601 # calling our functions back.
1600 # calling our functions back.
1602 prune_manifests()
1601 prune_manifests()
1603 msng_mnfst_lst = msng_mnfst_set.keys()
1602 msng_mnfst_lst = msng_mnfst_set.keys()
1604 # Sort the manifestnodes by revision number.
1603 # Sort the manifestnodes by revision number.
1605 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1604 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1606 # Create a generator for the manifestnodes that calls our lookup
1605 # Create a generator for the manifestnodes that calls our lookup
1607 # and data collection functions back.
1606 # and data collection functions back.
1608 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1607 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1609 filenode_collector(changedfiles))
1608 filenode_collector(changedfiles))
1610 for chnk in group:
1609 for chnk in group:
1611 yield chnk
1610 yield chnk
1612
1611
1613 # These are no longer needed, dereference and toss the memory for
1612 # These are no longer needed, dereference and toss the memory for
1614 # them.
1613 # them.
1615 msng_mnfst_lst = None
1614 msng_mnfst_lst = None
1616 msng_mnfst_set.clear()
1615 msng_mnfst_set.clear()
1617
1616
1618 changedfiles = changedfiles.keys()
1617 changedfiles = changedfiles.keys()
1619 changedfiles.sort()
1618 changedfiles.sort()
1620 # Go through all our files in order sorted by name.
1619 # Go through all our files in order sorted by name.
1621 for fname in changedfiles:
1620 for fname in changedfiles:
1622 filerevlog = self.file(fname)
1621 filerevlog = self.file(fname)
1623 # Toss out the filenodes that the recipient isn't really
1622 # Toss out the filenodes that the recipient isn't really
1624 # missing.
1623 # missing.
1625 if msng_filenode_set.has_key(fname):
1624 if msng_filenode_set.has_key(fname):
1626 prune_filenodes(fname, filerevlog)
1625 prune_filenodes(fname, filerevlog)
1627 msng_filenode_lst = msng_filenode_set[fname].keys()
1626 msng_filenode_lst = msng_filenode_set[fname].keys()
1628 else:
1627 else:
1629 msng_filenode_lst = []
1628 msng_filenode_lst = []
1630 # If any filenodes are left, generate the group for them,
1629 # If any filenodes are left, generate the group for them,
1631 # otherwise don't bother.
1630 # otherwise don't bother.
1632 if len(msng_filenode_lst) > 0:
1631 if len(msng_filenode_lst) > 0:
1633 yield changegroup.genchunk(fname)
1632 yield changegroup.genchunk(fname)
1634 # Sort the filenodes by their revision #
1633 # Sort the filenodes by their revision #
1635 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1634 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1636 # Create a group generator and only pass in a changenode
1635 # Create a group generator and only pass in a changenode
1637 # lookup function as we need to collect no information
1636 # lookup function as we need to collect no information
1638 # from filenodes.
1637 # from filenodes.
1639 group = filerevlog.group(msng_filenode_lst,
1638 group = filerevlog.group(msng_filenode_lst,
1640 lookup_filenode_link_func(fname))
1639 lookup_filenode_link_func(fname))
1641 for chnk in group:
1640 for chnk in group:
1642 yield chnk
1641 yield chnk
1643 if msng_filenode_set.has_key(fname):
1642 if msng_filenode_set.has_key(fname):
1644 # Don't need this anymore, toss it to free memory.
1643 # Don't need this anymore, toss it to free memory.
1645 del msng_filenode_set[fname]
1644 del msng_filenode_set[fname]
1646 # Signal that no more groups are left.
1645 # Signal that no more groups are left.
1647 yield changegroup.closechunk()
1646 yield changegroup.closechunk()
1648
1647
1649 if msng_cl_lst:
1648 if msng_cl_lst:
1650 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1649 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1651
1650
1652 return util.chunkbuffer(gengroup())
1651 return util.chunkbuffer(gengroup())
1653
1652
1654 def changegroup(self, basenodes, source):
1653 def changegroup(self, basenodes, source):
1655 """Generate a changegroup of all nodes that we have that a recipient
1654 """Generate a changegroup of all nodes that we have that a recipient
1656 doesn't.
1655 doesn't.
1657
1656
1658 This is much easier than the previous function as we can assume that
1657 This is much easier than the previous function as we can assume that
1659 the recipient has any changenode we aren't sending them."""
1658 the recipient has any changenode we aren't sending them."""
1660
1659
1661 self.hook('preoutgoing', throw=True, source=source)
1660 self.hook('preoutgoing', throw=True, source=source)
1662
1661
1663 cl = self.changelog
1662 cl = self.changelog
1664 nodes = cl.nodesbetween(basenodes, None)[0]
1663 nodes = cl.nodesbetween(basenodes, None)[0]
1665 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1664 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1666 self.changegroupinfo(nodes)
1665 self.changegroupinfo(nodes)
1667
1666
1668 def identity(x):
1667 def identity(x):
1669 return x
1668 return x
1670
1669
1671 def gennodelst(revlog):
1670 def gennodelst(revlog):
1672 for r in xrange(0, revlog.count()):
1671 for r in xrange(0, revlog.count()):
1673 n = revlog.node(r)
1672 n = revlog.node(r)
1674 if revlog.linkrev(n) in revset:
1673 if revlog.linkrev(n) in revset:
1675 yield n
1674 yield n
1676
1675
1677 def changed_file_collector(changedfileset):
1676 def changed_file_collector(changedfileset):
1678 def collect_changed_files(clnode):
1677 def collect_changed_files(clnode):
1679 c = cl.read(clnode)
1678 c = cl.read(clnode)
1680 for fname in c[3]:
1679 for fname in c[3]:
1681 changedfileset[fname] = 1
1680 changedfileset[fname] = 1
1682 return collect_changed_files
1681 return collect_changed_files
1683
1682
1684 def lookuprevlink_func(revlog):
1683 def lookuprevlink_func(revlog):
1685 def lookuprevlink(n):
1684 def lookuprevlink(n):
1686 return cl.node(revlog.linkrev(n))
1685 return cl.node(revlog.linkrev(n))
1687 return lookuprevlink
1686 return lookuprevlink
1688
1687
1689 def gengroup():
1688 def gengroup():
1690 # construct a list of all changed files
1689 # construct a list of all changed files
1691 changedfiles = {}
1690 changedfiles = {}
1692
1691
1693 for chnk in cl.group(nodes, identity,
1692 for chnk in cl.group(nodes, identity,
1694 changed_file_collector(changedfiles)):
1693 changed_file_collector(changedfiles)):
1695 yield chnk
1694 yield chnk
1696 changedfiles = changedfiles.keys()
1695 changedfiles = changedfiles.keys()
1697 changedfiles.sort()
1696 changedfiles.sort()
1698
1697
1699 mnfst = self.manifest
1698 mnfst = self.manifest
1700 nodeiter = gennodelst(mnfst)
1699 nodeiter = gennodelst(mnfst)
1701 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1700 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1702 yield chnk
1701 yield chnk
1703
1702
1704 for fname in changedfiles:
1703 for fname in changedfiles:
1705 filerevlog = self.file(fname)
1704 filerevlog = self.file(fname)
1706 nodeiter = gennodelst(filerevlog)
1705 nodeiter = gennodelst(filerevlog)
1707 nodeiter = list(nodeiter)
1706 nodeiter = list(nodeiter)
1708 if nodeiter:
1707 if nodeiter:
1709 yield changegroup.genchunk(fname)
1708 yield changegroup.genchunk(fname)
1710 lookup = lookuprevlink_func(filerevlog)
1709 lookup = lookuprevlink_func(filerevlog)
1711 for chnk in filerevlog.group(nodeiter, lookup):
1710 for chnk in filerevlog.group(nodeiter, lookup):
1712 yield chnk
1711 yield chnk
1713
1712
1714 yield changegroup.closechunk()
1713 yield changegroup.closechunk()
1715
1714
1716 if nodes:
1715 if nodes:
1717 self.hook('outgoing', node=hex(nodes[0]), source=source)
1716 self.hook('outgoing', node=hex(nodes[0]), source=source)
1718
1717
1719 return util.chunkbuffer(gengroup())
1718 return util.chunkbuffer(gengroup())
1720
1719
1721 def addchangegroup(self, source, srctype, url):
1720 def addchangegroup(self, source, srctype, url):
1722 """add changegroup to repo.
1721 """add changegroup to repo.
1723 returns number of heads modified or added + 1."""
1722 returns number of heads modified or added + 1."""
1724
1723
1725 def csmap(x):
1724 def csmap(x):
1726 self.ui.debug(_("add changeset %s\n") % short(x))
1725 self.ui.debug(_("add changeset %s\n") % short(x))
1727 return cl.count()
1726 return cl.count()
1728
1727
1729 def revmap(x):
1728 def revmap(x):
1730 return cl.rev(x)
1729 return cl.rev(x)
1731
1730
1732 if not source:
1731 if not source:
1733 return 0
1732 return 0
1734
1733
1735 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1734 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1736
1735
1737 changesets = files = revisions = 0
1736 changesets = files = revisions = 0
1738
1737
1739 tr = self.transaction()
1738 tr = self.transaction()
1740
1739
1741 # write changelog data to temp files so concurrent readers will not see
1740 # write changelog data to temp files so concurrent readers will not see
1742 # inconsistent view
1741 # inconsistent view
1743 cl = None
1742 cl = None
1744 try:
1743 try:
1745 cl = appendfile.appendchangelog(self.sopener,
1744 cl = appendfile.appendchangelog(self.sopener,
1746 self.changelog.version)
1745 self.changelog.version)
1747
1746
1748 oldheads = len(cl.heads())
1747 oldheads = len(cl.heads())
1749
1748
1750 # pull off the changeset group
1749 # pull off the changeset group
1751 self.ui.status(_("adding changesets\n"))
1750 self.ui.status(_("adding changesets\n"))
1752 cor = cl.count() - 1
1751 cor = cl.count() - 1
1753 chunkiter = changegroup.chunkiter(source)
1752 chunkiter = changegroup.chunkiter(source)
1754 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1753 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1755 raise util.Abort(_("received changelog group is empty"))
1754 raise util.Abort(_("received changelog group is empty"))
1756 cnr = cl.count() - 1
1755 cnr = cl.count() - 1
1757 changesets = cnr - cor
1756 changesets = cnr - cor
1758
1757
1759 # pull off the manifest group
1758 # pull off the manifest group
1760 self.ui.status(_("adding manifests\n"))
1759 self.ui.status(_("adding manifests\n"))
1761 chunkiter = changegroup.chunkiter(source)
1760 chunkiter = changegroup.chunkiter(source)
1762 # no need to check for empty manifest group here:
1761 # no need to check for empty manifest group here:
1763 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1762 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1764 # no new manifest will be created and the manifest group will
1763 # no new manifest will be created and the manifest group will
1765 # be empty during the pull
1764 # be empty during the pull
1766 self.manifest.addgroup(chunkiter, revmap, tr)
1765 self.manifest.addgroup(chunkiter, revmap, tr)
1767
1766
1768 # process the files
1767 # process the files
1769 self.ui.status(_("adding file changes\n"))
1768 self.ui.status(_("adding file changes\n"))
1770 while 1:
1769 while 1:
1771 f = changegroup.getchunk(source)
1770 f = changegroup.getchunk(source)
1772 if not f:
1771 if not f:
1773 break
1772 break
1774 self.ui.debug(_("adding %s revisions\n") % f)
1773 self.ui.debug(_("adding %s revisions\n") % f)
1775 fl = self.file(f)
1774 fl = self.file(f)
1776 o = fl.count()
1775 o = fl.count()
1777 chunkiter = changegroup.chunkiter(source)
1776 chunkiter = changegroup.chunkiter(source)
1778 if fl.addgroup(chunkiter, revmap, tr) is None:
1777 if fl.addgroup(chunkiter, revmap, tr) is None:
1779 raise util.Abort(_("received file revlog group is empty"))
1778 raise util.Abort(_("received file revlog group is empty"))
1780 revisions += fl.count() - o
1779 revisions += fl.count() - o
1781 files += 1
1780 files += 1
1782
1781
1783 cl.writedata()
1782 cl.writedata()
1784 finally:
1783 finally:
1785 if cl:
1784 if cl:
1786 cl.cleanup()
1785 cl.cleanup()
1787
1786
1788 # make changelog see real files again
1787 # make changelog see real files again
1789 self.changelog = changelog.changelog(self.sopener,
1788 self.changelog = changelog.changelog(self.sopener,
1790 self.changelog.version)
1789 self.changelog.version)
1791 self.changelog.checkinlinesize(tr)
1790 self.changelog.checkinlinesize(tr)
1792
1791
1793 newheads = len(self.changelog.heads())
1792 newheads = len(self.changelog.heads())
1794 heads = ""
1793 heads = ""
1795 if oldheads and newheads != oldheads:
1794 if oldheads and newheads != oldheads:
1796 heads = _(" (%+d heads)") % (newheads - oldheads)
1795 heads = _(" (%+d heads)") % (newheads - oldheads)
1797
1796
1798 self.ui.status(_("added %d changesets"
1797 self.ui.status(_("added %d changesets"
1799 " with %d changes to %d files%s\n")
1798 " with %d changes to %d files%s\n")
1800 % (changesets, revisions, files, heads))
1799 % (changesets, revisions, files, heads))
1801
1800
1802 if changesets > 0:
1801 if changesets > 0:
1803 self.hook('pretxnchangegroup', throw=True,
1802 self.hook('pretxnchangegroup', throw=True,
1804 node=hex(self.changelog.node(cor+1)), source=srctype,
1803 node=hex(self.changelog.node(cor+1)), source=srctype,
1805 url=url)
1804 url=url)
1806
1805
1807 tr.close()
1806 tr.close()
1808
1807
1809 if changesets > 0:
1808 if changesets > 0:
1810 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1809 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1811 source=srctype, url=url)
1810 source=srctype, url=url)
1812
1811
1813 for i in xrange(cor + 1, cnr + 1):
1812 for i in xrange(cor + 1, cnr + 1):
1814 self.hook("incoming", node=hex(self.changelog.node(i)),
1813 self.hook("incoming", node=hex(self.changelog.node(i)),
1815 source=srctype, url=url)
1814 source=srctype, url=url)
1816
1815
1817 return newheads - oldheads + 1
1816 return newheads - oldheads + 1
1818
1817
1819
1818
1820 def stream_in(self, remote):
1819 def stream_in(self, remote):
1821 fp = remote.stream_out()
1820 fp = remote.stream_out()
1822 l = fp.readline()
1821 l = fp.readline()
1823 try:
1822 try:
1824 resp = int(l)
1823 resp = int(l)
1825 except ValueError:
1824 except ValueError:
1826 raise util.UnexpectedOutput(
1825 raise util.UnexpectedOutput(
1827 _('Unexpected response from remote server:'), l)
1826 _('Unexpected response from remote server:'), l)
1828 if resp == 1:
1827 if resp == 1:
1829 raise util.Abort(_('operation forbidden by server'))
1828 raise util.Abort(_('operation forbidden by server'))
1830 elif resp == 2:
1829 elif resp == 2:
1831 raise util.Abort(_('locking the remote repository failed'))
1830 raise util.Abort(_('locking the remote repository failed'))
1832 elif resp != 0:
1831 elif resp != 0:
1833 raise util.Abort(_('the server sent an unknown error code'))
1832 raise util.Abort(_('the server sent an unknown error code'))
1834 self.ui.status(_('streaming all changes\n'))
1833 self.ui.status(_('streaming all changes\n'))
1835 l = fp.readline()
1834 l = fp.readline()
1836 try:
1835 try:
1837 total_files, total_bytes = map(int, l.split(' ', 1))
1836 total_files, total_bytes = map(int, l.split(' ', 1))
1838 except ValueError, TypeError:
1837 except ValueError, TypeError:
1839 raise util.UnexpectedOutput(
1838 raise util.UnexpectedOutput(
1840 _('Unexpected response from remote server:'), l)
1839 _('Unexpected response from remote server:'), l)
1841 self.ui.status(_('%d files to transfer, %s of data\n') %
1840 self.ui.status(_('%d files to transfer, %s of data\n') %
1842 (total_files, util.bytecount(total_bytes)))
1841 (total_files, util.bytecount(total_bytes)))
1843 start = time.time()
1842 start = time.time()
1844 for i in xrange(total_files):
1843 for i in xrange(total_files):
1845 l = fp.readline()
1844 l = fp.readline()
1846 try:
1845 try:
1847 name, size = l.split('\0', 1)
1846 name, size = l.split('\0', 1)
1848 size = int(size)
1847 size = int(size)
1849 except ValueError, TypeError:
1848 except ValueError, TypeError:
1850 raise util.UnexpectedOutput(
1849 raise util.UnexpectedOutput(
1851 _('Unexpected response from remote server:'), l)
1850 _('Unexpected response from remote server:'), l)
1852 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1851 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1853 ofp = self.sopener(name, 'w')
1852 ofp = self.sopener(name, 'w')
1854 for chunk in util.filechunkiter(fp, limit=size):
1853 for chunk in util.filechunkiter(fp, limit=size):
1855 ofp.write(chunk)
1854 ofp.write(chunk)
1856 ofp.close()
1855 ofp.close()
1857 elapsed = time.time() - start
1856 elapsed = time.time() - start
1858 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1857 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1859 (util.bytecount(total_bytes), elapsed,
1858 (util.bytecount(total_bytes), elapsed,
1860 util.bytecount(total_bytes / elapsed)))
1859 util.bytecount(total_bytes / elapsed)))
1861 self.reload()
1860 self.reload()
1862 return len(self.heads()) + 1
1861 return len(self.heads()) + 1
1863
1862
1864 def clone(self, remote, heads=[], stream=False):
1863 def clone(self, remote, heads=[], stream=False):
1865 '''clone remote repository.
1864 '''clone remote repository.
1866
1865
1867 keyword arguments:
1866 keyword arguments:
1868 heads: list of revs to clone (forces use of pull)
1867 heads: list of revs to clone (forces use of pull)
1869 stream: use streaming clone if possible'''
1868 stream: use streaming clone if possible'''
1870
1869
1871 # now, all clients that can request uncompressed clones can
1870 # now, all clients that can request uncompressed clones can
1872 # read repo formats supported by all servers that can serve
1871 # read repo formats supported by all servers that can serve
1873 # them.
1872 # them.
1874
1873
1875 # if revlog format changes, client will have to check version
1874 # if revlog format changes, client will have to check version
1876 # and format flags on "stream" capability, and use
1875 # and format flags on "stream" capability, and use
1877 # uncompressed only if compatible.
1876 # uncompressed only if compatible.
1878
1877
1879 if stream and not heads and remote.capable('stream'):
1878 if stream and not heads and remote.capable('stream'):
1880 return self.stream_in(remote)
1879 return self.stream_in(remote)
1881 return self.pull(remote, heads)
1880 return self.pull(remote, heads)
1882
1881
1883 # used to avoid circular references so destructors work
1882 # used to avoid circular references so destructors work
1884 def aftertrans(base):
1883 def aftertrans(base):
1885 p = base
1884 p = base
1886 def a():
1885 def a():
1887 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1886 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1888 util.rename(os.path.join(p, "journal.dirstate"),
1887 util.rename(os.path.join(p, "journal.dirstate"),
1889 os.path.join(p, "undo.dirstate"))
1888 os.path.join(p, "undo.dirstate"))
1890 return a
1889 return a
1891
1890
1892 def instance(ui, path, create):
1891 def instance(ui, path, create):
1893 return localrepository(ui, util.drop_scheme('file', path), create)
1892 return localrepository(ui, util.drop_scheme('file', path), create)
1894
1893
1895 def islocal(path):
1894 def islocal(path):
1896 return True
1895 return True
@@ -1,9 +1,8
1 0
1 0
2 0
2 0
3 adding changesets
3 adding changesets
4 killed!
4 killed!
5 transaction abort!
5 transaction abort!
6 rollback completed
6 rollback completed
7 00changelog.i
7 00changelog.i
8 data
9 journal.dirstate
8 journal.dirstate
@@ -1,61 +1,61
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test tries to exercise the ssh functionality with a dummy script
3 # This test tries to exercise the ssh functionality with a dummy script
4
4
5 cat <<'EOF' > dummyssh
5 cat <<'EOF' > dummyssh
6 #!/bin/sh
6 #!/bin/sh
7 # this attempts to deal with relative pathnames
7 # this attempts to deal with relative pathnames
8 cd `dirname $0`
8 cd `dirname $0`
9
9
10 # check for proper args
10 # check for proper args
11 if [ $1 != "user@dummy" ] ; then
11 if [ $1 != "user@dummy" ] ; then
12 exit -1
12 exit -1
13 fi
13 fi
14
14
15 # check that we're in the right directory
15 # check that we're in the right directory
16 if [ ! -x dummyssh ] ; then
16 if [ ! -x dummyssh ] ; then
17 exit -1
17 exit -1
18 fi
18 fi
19
19
20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
21 $2
21 $2
22 EOF
22 EOF
23 chmod +x dummyssh
23 chmod +x dummyssh
24
24
25 echo "# creating 'local'"
25 echo "# creating 'local'"
26 hg init local
26 hg init local
27 echo this > local/foo
27 echo this > local/foo
28 hg ci --cwd local -A -m "init" -d "1000000 0"
28 hg ci --cwd local -A -m "init" -d "1000000 0"
29
29
30 echo "#test failure"
30 echo "#test failure"
31 hg init local
31 hg init local
32
32
33 echo "# init+push to remote2"
33 echo "# init+push to remote2"
34 hg init -e ./dummyssh ssh://user@dummy/remote2
34 hg init -e ./dummyssh ssh://user@dummy/remote2
35 hg incoming -R remote2 local
35 hg incoming -R remote2 local
36 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
36 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
37
37
38 echo "# clone to remote1"
38 echo "# clone to remote1"
39 hg clone -e ./dummyssh local ssh://user@dummy/remote1
39 hg clone -e ./dummyssh local ssh://user@dummy/remote1
40
40
41 echo "# init to existing repo"
41 echo "# init to existing repo"
42 hg init -e ./dummyssh ssh://user@dummy/remote1
42 hg init -e ./dummyssh ssh://user@dummy/remote1
43
43
44 echo "# clone to existing repo"
44 echo "# clone to existing repo"
45 hg clone -e ./dummyssh local ssh://user@dummy/remote1
45 hg clone -e ./dummyssh local ssh://user@dummy/remote1
46
46
47 echo "# output of dummyssh"
47 echo "# output of dummyssh"
48 cat dummylog
48 cat dummylog
49
49
50 echo "# comparing repositories"
50 echo "# comparing repositories"
51 hg tip -q -R local
51 hg tip -q -R local
52 hg tip -q -R remote1
52 hg tip -q -R remote1
53 hg tip -q -R remote2
53 hg tip -q -R remote2
54
54
55 echo "# check names for repositories (clashes with URL schemes, special chars)"
55 echo "# check names for repositories (clashes with URL schemes, special chars)"
56 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
56 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
57 echo "# hg init \"$i\""
57 echo "# hg init \"$i\""
58 hg init "$i"
58 hg init "$i"
59 test -d "$i" -a -d "$i/.hg" -a -d "$i/.hg/data" && echo "ok" || echo "failed"
59 test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
60 done
60 done
61
61
General Comments 0
You need to be logged in to leave comments. Login now