##// END OF EJS Templates
Merge Benoit's .hg/store support
Matt Mackall -
r3854:4f6db023 merge default
parent child Browse files
Show More
@@ -0,0 +1,14
1 #!/bin/sh
2
3 mkdir t
4 cd t
5 hg init
6 echo a > a
7 hg add a
8 hg commit -m test -d "1000000 0"
9 rm .hg/requires
10 hg tip
11 echo indoor-pool > .hg/requires
12 hg tip
13
14 true
@@ -0,0 +1,2
1 abort: index 00changelog.i unknown format 2!
2 abort: requirement 'indoor-pool' not supported!
@@ -1,257 +1,271
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from repo import *
10 from repo import *
11 from demandload import *
11 from demandload import *
12 from i18n import gettext as _
12 from i18n import gettext as _
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15
15
16 def _local(path):
16 def _local(path):
17 return (os.path.isfile(util.drop_scheme('file', path)) and
17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 bundlerepo or localrepo)
18 bundlerepo or localrepo)
19
19
20 schemes = {
20 schemes = {
21 'bundle': bundlerepo,
21 'bundle': bundlerepo,
22 'file': _local,
22 'file': _local,
23 'hg': httprepo,
23 'hg': httprepo,
24 'http': httprepo,
24 'http': httprepo,
25 'https': httprepo,
25 'https': httprepo,
26 'old-http': statichttprepo,
26 'old-http': statichttprepo,
27 'ssh': sshrepo,
27 'ssh': sshrepo,
28 'static-http': statichttprepo,
28 'static-http': statichttprepo,
29 }
29 }
30
30
31 def _lookup(path):
31 def _lookup(path):
32 scheme = 'file'
32 scheme = 'file'
33 if path:
33 if path:
34 c = path.find(':')
34 c = path.find(':')
35 if c > 0:
35 if c > 0:
36 scheme = path[:c]
36 scheme = path[:c]
37 thing = schemes.get(scheme) or schemes['file']
37 thing = schemes.get(scheme) or schemes['file']
38 try:
38 try:
39 return thing(path)
39 return thing(path)
40 except TypeError:
40 except TypeError:
41 return thing
41 return thing
42
42
43 def islocal(repo):
43 def islocal(repo):
44 '''return true if repo or path is local'''
44 '''return true if repo or path is local'''
45 if isinstance(repo, str):
45 if isinstance(repo, str):
46 try:
46 try:
47 return _lookup(repo).islocal(repo)
47 return _lookup(repo).islocal(repo)
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50 return repo.local()
50 return repo.local()
51
51
52 repo_setup_hooks = []
52 repo_setup_hooks = []
53
53
54 def repository(ui, path='', create=False):
54 def repository(ui, path='', create=False):
55 """return a repository object for the specified path"""
55 """return a repository object for the specified path"""
56 repo = _lookup(path).instance(ui, path, create)
56 repo = _lookup(path).instance(ui, path, create)
57 for hook in repo_setup_hooks:
57 for hook in repo_setup_hooks:
58 hook(ui, repo)
58 hook(ui, repo)
59 return repo
59 return repo
60
60
61 def defaultdest(source):
61 def defaultdest(source):
62 '''return default destination of clone if none is given'''
62 '''return default destination of clone if none is given'''
63 return os.path.basename(os.path.normpath(source))
63 return os.path.basename(os.path.normpath(source))
64
64
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 stream=False):
66 stream=False):
67 """Make a copy of an existing repository.
67 """Make a copy of an existing repository.
68
68
69 Create a copy of an existing repository in a new directory. The
69 Create a copy of an existing repository in a new directory. The
70 source and destination are URLs, as passed to the repository
70 source and destination are URLs, as passed to the repository
71 function. Returns a pair of repository objects, the source and
71 function. Returns a pair of repository objects, the source and
72 newly created destination.
72 newly created destination.
73
73
74 The location of the source is added to the new repository's
74 The location of the source is added to the new repository's
75 .hg/hgrc file, as the default to be used for future pulls and
75 .hg/hgrc file, as the default to be used for future pulls and
76 pushes.
76 pushes.
77
77
78 If an exception is raised, the partly cloned/updated destination
78 If an exception is raised, the partly cloned/updated destination
79 repository will be deleted.
79 repository will be deleted.
80
80
81 Arguments:
81 Arguments:
82
82
83 source: repository object or URL
83 source: repository object or URL
84
84
85 dest: URL of destination repository to create (defaults to base
85 dest: URL of destination repository to create (defaults to base
86 name of source repository)
86 name of source repository)
87
87
88 pull: always pull from source repository, even in local case
88 pull: always pull from source repository, even in local case
89
89
90 stream: stream raw data uncompressed from repository (fast over
90 stream: stream raw data uncompressed from repository (fast over
91 LAN, slow over WAN)
91 LAN, slow over WAN)
92
92
93 rev: revision to clone up to (implies pull=True)
93 rev: revision to clone up to (implies pull=True)
94
94
95 update: update working directory after clone completes, if
95 update: update working directory after clone completes, if
96 destination is local repository
96 destination is local repository
97 """
97 """
98 if isinstance(source, str):
98 if isinstance(source, str):
99 src_repo = repository(ui, source)
99 src_repo = repository(ui, source)
100 else:
100 else:
101 src_repo = source
101 src_repo = source
102 source = src_repo.url()
102 source = src_repo.url()
103
103
104 if dest is None:
104 if dest is None:
105 dest = defaultdest(source)
105 dest = defaultdest(source)
106 ui.status(_("destination directory: %s\n") % dest)
106 ui.status(_("destination directory: %s\n") % dest)
107
107
108 def localpath(path):
108 def localpath(path):
109 if path.startswith('file://'):
109 if path.startswith('file://'):
110 return path[7:]
110 return path[7:]
111 if path.startswith('file:'):
111 if path.startswith('file:'):
112 return path[5:]
112 return path[5:]
113 return path
113 return path
114
114
115 dest = localpath(dest)
115 dest = localpath(dest)
116 source = localpath(source)
116 source = localpath(source)
117
117
118 if os.path.exists(dest):
118 if os.path.exists(dest):
119 raise util.Abort(_("destination '%s' already exists") % dest)
119 raise util.Abort(_("destination '%s' already exists") % dest)
120
120
121 class DirCleanup(object):
121 class DirCleanup(object):
122 def __init__(self, dir_):
122 def __init__(self, dir_):
123 self.rmtree = shutil.rmtree
123 self.rmtree = shutil.rmtree
124 self.dir_ = dir_
124 self.dir_ = dir_
125 def close(self):
125 def close(self):
126 self.dir_ = None
126 self.dir_ = None
127 def __del__(self):
127 def __del__(self):
128 if self.dir_:
128 if self.dir_:
129 self.rmtree(self.dir_, True)
129 self.rmtree(self.dir_, True)
130
130
131 dest_repo = repository(ui, dest, create=True)
132
133 dir_cleanup = None
131 dir_cleanup = None
134 if dest_repo.local():
132 if islocal(dest):
135 dir_cleanup = DirCleanup(os.path.realpath(dest_repo.root))
133 dir_cleanup = DirCleanup(dest)
136
134
137 abspath = source
135 abspath = source
138 copy = False
136 copy = False
139 if src_repo.local() and dest_repo.local():
137 if src_repo.local() and islocal(dest):
140 abspath = os.path.abspath(source)
138 abspath = os.path.abspath(source)
141 copy = not pull and not rev
139 copy = not pull and not rev
142
140
143 src_lock, dest_lock = None, None
141 src_lock, dest_lock = None, None
144 if copy:
142 if copy:
145 try:
143 try:
146 # we use a lock here because if we race with commit, we
144 # we use a lock here because if we race with commit, we
147 # can end up with extra data in the cloned revlogs that's
145 # can end up with extra data in the cloned revlogs that's
148 # not pointed to by changesets, thus causing verify to
146 # not pointed to by changesets, thus causing verify to
149 # fail
147 # fail
150 src_lock = src_repo.lock()
148 src_lock = src_repo.lock()
151 except lock.LockException:
149 except lock.LockException:
152 copy = False
150 copy = False
153
151
154 if copy:
152 if copy:
155 # we lock here to avoid premature writing to the target
153 def force_copy(src, dst):
154 try:
155 util.copyfiles(src, dst)
156 except OSError, inst:
157 if inst.errno != errno.ENOENT:
158 raise
159
156 src_store = os.path.realpath(src_repo.spath)
160 src_store = os.path.realpath(src_repo.spath)
157 dest_store = os.path.realpath(dest_repo.spath)
161 if not os.path.exists(dest):
162 os.mkdir(dest)
163 dest_path = os.path.realpath(os.path.join(dest, ".hg"))
164 os.mkdir(dest_path)
165 if src_repo.spath != src_repo.path:
166 dest_store = os.path.join(dest_path, "store")
167 os.mkdir(dest_store)
168 else:
169 dest_store = dest_path
170 # copy the requires file
171 force_copy(src_repo.join("requires"),
172 os.path.join(dest_path, "requires"))
173 # we lock here to avoid premature writing to the target
158 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
174 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
159
175
160 files = ("data",
176 files = ("data",
161 "00manifest.d", "00manifest.i",
177 "00manifest.d", "00manifest.i",
162 "00changelog.d", "00changelog.i")
178 "00changelog.d", "00changelog.i")
163 for f in files:
179 for f in files:
164 src = os.path.join(src_store, f)
180 src = os.path.join(src_store, f)
165 dst = os.path.join(dest_store, f)
181 dst = os.path.join(dest_store, f)
166 try:
182 force_copy(src, dst)
167 util.copyfiles(src, dst)
168 except OSError, inst:
169 if inst.errno != errno.ENOENT:
170 raise
171
183
172 # we need to re-init the repo after manually copying the data
184 # we need to re-init the repo after manually copying the data
173 # into it
185 # into it
174 dest_repo = repository(ui, dest)
186 dest_repo = repository(ui, dest)
175
187
176 else:
188 else:
189 dest_repo = repository(ui, dest, create=True)
190
177 revs = None
191 revs = None
178 if rev:
192 if rev:
179 if 'lookup' not in src_repo.capabilities:
193 if 'lookup' not in src_repo.capabilities:
180 raise util.Abort(_("src repository does not support revision "
194 raise util.Abort(_("src repository does not support revision "
181 "lookup and so doesn't support clone by "
195 "lookup and so doesn't support clone by "
182 "revision"))
196 "revision"))
183 revs = [src_repo.lookup(r) for r in rev]
197 revs = [src_repo.lookup(r) for r in rev]
184
198
185 if dest_repo.local():
199 if dest_repo.local():
186 dest_repo.clone(src_repo, heads=revs, stream=stream)
200 dest_repo.clone(src_repo, heads=revs, stream=stream)
187 elif src_repo.local():
201 elif src_repo.local():
188 src_repo.push(dest_repo, revs=revs)
202 src_repo.push(dest_repo, revs=revs)
189 else:
203 else:
190 raise util.Abort(_("clone from remote to remote not supported"))
204 raise util.Abort(_("clone from remote to remote not supported"))
191
205
192 if src_lock:
206 if src_lock:
193 src_lock.release()
207 src_lock.release()
194
208
195 if dest_repo.local():
209 if dest_repo.local():
196 fp = dest_repo.opener("hgrc", "w", text=True)
210 fp = dest_repo.opener("hgrc", "w", text=True)
197 fp.write("[paths]\n")
211 fp.write("[paths]\n")
198 fp.write("default = %s\n" % abspath)
212 fp.write("default = %s\n" % abspath)
199 fp.close()
213 fp.close()
200
214
201 if dest_lock:
215 if dest_lock:
202 dest_lock.release()
216 dest_lock.release()
203
217
204 if update:
218 if update:
205 _update(dest_repo, dest_repo.changelog.tip())
219 _update(dest_repo, dest_repo.changelog.tip())
206 if dir_cleanup:
220 if dir_cleanup:
207 dir_cleanup.close()
221 dir_cleanup.close()
208
222
209 return src_repo, dest_repo
223 return src_repo, dest_repo
210
224
211 def _showstats(repo, stats):
225 def _showstats(repo, stats):
212 stats = ((stats[0], _("updated")),
226 stats = ((stats[0], _("updated")),
213 (stats[1], _("merged")),
227 (stats[1], _("merged")),
214 (stats[2], _("removed")),
228 (stats[2], _("removed")),
215 (stats[3], _("unresolved")))
229 (stats[3], _("unresolved")))
216 note = ", ".join([_("%d files %s") % s for s in stats])
230 note = ", ".join([_("%d files %s") % s for s in stats])
217 repo.ui.status("%s\n" % note)
231 repo.ui.status("%s\n" % note)
218
232
219 def _update(repo, node): return update(repo, node)
233 def _update(repo, node): return update(repo, node)
220
234
221 def update(repo, node):
235 def update(repo, node):
222 """update the working directory to node, merging linear changes"""
236 """update the working directory to node, merging linear changes"""
223 stats = _merge.update(repo, node, False, False, None, None)
237 stats = _merge.update(repo, node, False, False, None, None)
224 _showstats(repo, stats)
238 _showstats(repo, stats)
225 if stats[3]:
239 if stats[3]:
226 repo.ui.status(_("There are unresolved merges with"
240 repo.ui.status(_("There are unresolved merges with"
227 " locally modified files.\n"))
241 " locally modified files.\n"))
228 return stats[3]
242 return stats[3]
229
243
230 def clean(repo, node, wlock=None, show_stats=True):
244 def clean(repo, node, wlock=None, show_stats=True):
231 """forcibly switch the working directory to node, clobbering changes"""
245 """forcibly switch the working directory to node, clobbering changes"""
232 stats = _merge.update(repo, node, False, True, None, wlock)
246 stats = _merge.update(repo, node, False, True, None, wlock)
233 if show_stats: _showstats(repo, stats)
247 if show_stats: _showstats(repo, stats)
234 return stats[3]
248 return stats[3]
235
249
236 def merge(repo, node, force=None, remind=True, wlock=None):
250 def merge(repo, node, force=None, remind=True, wlock=None):
237 """branch merge with node, resolving changes"""
251 """branch merge with node, resolving changes"""
238 stats = _merge.update(repo, node, True, force, False, wlock)
252 stats = _merge.update(repo, node, True, force, False, wlock)
239 _showstats(repo, stats)
253 _showstats(repo, stats)
240 if stats[3]:
254 if stats[3]:
241 pl = repo.parents()
255 pl = repo.parents()
242 repo.ui.status(_("There are unresolved merges,"
256 repo.ui.status(_("There are unresolved merges,"
243 " you can redo the full merge using:\n"
257 " you can redo the full merge using:\n"
244 " hg update -C %s\n"
258 " hg update -C %s\n"
245 " hg merge %s\n")
259 " hg merge %s\n")
246 % (pl[0].rev(), pl[1].rev()))
260 % (pl[0].rev(), pl[1].rev()))
247 elif remind:
261 elif remind:
248 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
262 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
249 return stats[3]
263 return stats[3]
250
264
251 def revert(repo, node, choose, wlock):
265 def revert(repo, node, choose, wlock):
252 """revert changes to revision in node without updating dirstate"""
266 """revert changes to revision in node without updating dirstate"""
253 return _merge.update(repo, node, False, True, choose, wlock)[3]
267 return _merge.update(repo, node, False, True, choose, wlock)[3]
254
268
255 def verify(repo):
269 def verify(repo):
256 """verify the consistency of a repository"""
270 """verify the consistency of a repository"""
257 return _verify.verify(repo)
271 return _verify.verify(repo)
@@ -1,61 +1,63
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os, mimetypes
9 import os, mimetypes
10 import os.path
10 import os.path
11
11
12 def get_mtime(repo_path):
12 def get_mtime(repo_path):
13 hg_path = os.path.join(repo_path, ".hg")
13 store_path = os.path.join(repo_path, ".hg")
14 cl_path = os.path.join(hg_path, "00changelog.i")
14 if not os.path.isdir(os.path.join(store_path, "data")):
15 if os.path.exists(os.path.join(cl_path)):
15 store_path = os.path.join(store_path, "store")
16 cl_path = os.path.join(store_path, "00changelog.i")
17 if os.path.exists(cl_path):
16 return os.stat(cl_path).st_mtime
18 return os.stat(cl_path).st_mtime
17 else:
19 else:
18 return os.stat(hg_path).st_mtime
20 return os.stat(store_path).st_mtime
19
21
20 def staticfile(directory, fname, req):
22 def staticfile(directory, fname, req):
21 """return a file inside directory with guessed content-type header
23 """return a file inside directory with guessed content-type header
22
24
23 fname always uses '/' as directory separator and isn't allowed to
25 fname always uses '/' as directory separator and isn't allowed to
24 contain unusual path components.
26 contain unusual path components.
25 Content-type is guessed using the mimetypes module.
27 Content-type is guessed using the mimetypes module.
26 Return an empty string if fname is illegal or file not found.
28 Return an empty string if fname is illegal or file not found.
27
29
28 """
30 """
29 parts = fname.split('/')
31 parts = fname.split('/')
30 path = directory
32 path = directory
31 for part in parts:
33 for part in parts:
32 if (part in ('', os.curdir, os.pardir) or
34 if (part in ('', os.curdir, os.pardir) or
33 os.sep in part or os.altsep is not None and os.altsep in part):
35 os.sep in part or os.altsep is not None and os.altsep in part):
34 return ""
36 return ""
35 path = os.path.join(path, part)
37 path = os.path.join(path, part)
36 try:
38 try:
37 os.stat(path)
39 os.stat(path)
38 ct = mimetypes.guess_type(path)[0] or "text/plain"
40 ct = mimetypes.guess_type(path)[0] or "text/plain"
39 req.header([('Content-type', ct),
41 req.header([('Content-type', ct),
40 ('Content-length', os.path.getsize(path))])
42 ('Content-length', os.path.getsize(path))])
41 return file(path, 'rb').read()
43 return file(path, 'rb').read()
42 except (TypeError, OSError):
44 except (TypeError, OSError):
43 # illegal fname or unreadable file
45 # illegal fname or unreadable file
44 return ""
46 return ""
45
47
46 def style_map(templatepath, style):
48 def style_map(templatepath, style):
47 """Return path to mapfile for a given style.
49 """Return path to mapfile for a given style.
48
50
49 Searches mapfile in the following locations:
51 Searches mapfile in the following locations:
50 1. templatepath/style/map
52 1. templatepath/style/map
51 2. templatepath/map-style
53 2. templatepath/map-style
52 3. templatepath/map
54 3. templatepath/map
53 """
55 """
54 locations = style and [os.path.join(style, "map"), "map-"+style] or []
56 locations = style and [os.path.join(style, "map"), "map-"+style] or []
55 locations.append("map")
57 locations.append("map")
56 for location in locations:
58 for location in locations:
57 mapfile = os.path.join(templatepath, location)
59 mapfile = os.path.join(templatepath, location)
58 if os.path.isfile(mapfile):
60 if os.path.isfile(mapfile):
59 return mapfile
61 return mapfile
60 raise RuntimeError("No hgweb templates found in %r" % templatepath)
62 raise RuntimeError("No hgweb templates found in %r" % templatepath)
61
63
@@ -1,1935 +1,1964
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19
20
20 def __del__(self):
21 def __del__(self):
21 self.transhandle = None
22 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
24 repo.repository.__init__(self)
24 if not path:
25 if not path:
25 p = os.getcwd()
26 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
28 oldp = p
28 p = os.path.dirname(p)
29 p = os.path.dirname(p)
29 if p == oldp:
30 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
31 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
32 " here (.hg not found)"))
32 path = p
33 path = p
34
33 self.path = os.path.join(path, ".hg")
35 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
36 self.root = os.path.realpath(path)
37 self.origroot = path
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
35
40
36 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
37 if create:
42 if create:
38 if not os.path.exists(path):
43 if not os.path.exists(path):
39 os.mkdir(path)
44 os.mkdir(path)
40 os.mkdir(self.path)
45 os.mkdir(self.path)
41 if self.spath != self.path:
46 os.mkdir(os.path.join(self.path, "store"))
42 os.mkdir(self.spath)
47 requirements = ("revlogv1", "store")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
50 reqfile.write("%s\n" % r)
51 reqfile.close()
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write('\0\0\0\2')
43 else:
54 else:
44 raise repo.RepoError(_("repository %s not found") % path)
55 raise repo.RepoError(_("repository %s not found") % path)
45 elif create:
56 elif create:
46 raise repo.RepoError(_("repository %s already exists") % path)
57 raise repo.RepoError(_("repository %s already exists") % path)
58 else:
59 # find requirements
60 try:
61 requirements = self.opener("requires").read().splitlines()
62 except IOError, inst:
63 if inst.errno != errno.ENOENT:
64 raise
65 requirements = []
66 # check them
67 for r in requirements:
68 if r not in self.supported:
69 raise repo.RepoError(_("requirement '%s' not supported") % r)
47
70
48 self.root = os.path.realpath(path)
71 # setup store
49 self.origroot = path
72 if "store" in requirements:
73 self.encodefn = util.encodefilename
74 self.decodefn = util.decodefilename
75 self.spath = os.path.join(self.path, "store")
76 else:
77 self.encodefn = lambda x: x
78 self.decodefn = lambda x: x
79 self.spath = self.path
80 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81
50 self.ui = ui.ui(parentui=parentui)
82 self.ui = ui.ui(parentui=parentui)
51 self.opener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
53 self.wopener = util.opener(self.root)
54
55 try:
83 try:
56 self.ui.readconfig(self.join("hgrc"), self.root)
84 self.ui.readconfig(self.join("hgrc"), self.root)
57 except IOError:
85 except IOError:
58 pass
86 pass
59
87
60 v = self.ui.configrevlog()
88 v = self.ui.configrevlog()
61 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
89 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
62 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
90 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
63 fl = v.get('flags', None)
91 fl = v.get('flags', None)
64 flags = 0
92 flags = 0
65 if fl != None:
93 if fl != None:
66 for x in fl.split():
94 for x in fl.split():
67 flags |= revlog.flagstr(x)
95 flags |= revlog.flagstr(x)
68 elif self.revlogv1:
96 elif self.revlogv1:
69 flags = revlog.REVLOG_DEFAULT_FLAGS
97 flags = revlog.REVLOG_DEFAULT_FLAGS
70
98
71 v = self.revlogversion | flags
99 v = self.revlogversion | flags
72 self.manifest = manifest.manifest(self.sopener, v)
100 self.manifest = manifest.manifest(self.sopener, v)
73 self.changelog = changelog.changelog(self.sopener, v)
101 self.changelog = changelog.changelog(self.sopener, v)
74
102
75 fallback = self.ui.config('ui', 'fallbackencoding')
103 fallback = self.ui.config('ui', 'fallbackencoding')
76 if fallback:
104 if fallback:
77 util._fallbackencoding = fallback
105 util._fallbackencoding = fallback
78
106
79 # the changelog might not have the inline index flag
107 # the changelog might not have the inline index flag
80 # on. If the format of the changelog is the same as found in
108 # on. If the format of the changelog is the same as found in
81 # .hgrc, apply any flags found in the .hgrc as well.
109 # .hgrc, apply any flags found in the .hgrc as well.
82 # Otherwise, just version from the changelog
110 # Otherwise, just version from the changelog
83 v = self.changelog.version
111 v = self.changelog.version
84 if v == self.revlogversion:
112 if v == self.revlogversion:
85 v |= flags
113 v |= flags
86 self.revlogversion = v
114 self.revlogversion = v
87
115
88 self.tagscache = None
116 self.tagscache = None
89 self.branchcache = None
117 self.branchcache = None
90 self.nodetagscache = None
118 self.nodetagscache = None
91 self.encodepats = None
119 self.encodepats = None
92 self.decodepats = None
120 self.decodepats = None
93 self.transhandle = None
121 self.transhandle = None
94
122
95 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
123 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
96
124
97 def url(self):
125 def url(self):
98 return 'file:' + self.root
126 return 'file:' + self.root
99
127
100 def hook(self, name, throw=False, **args):
128 def hook(self, name, throw=False, **args):
101 def callhook(hname, funcname):
129 def callhook(hname, funcname):
102 '''call python hook. hook is callable object, looked up as
130 '''call python hook. hook is callable object, looked up as
103 name in python module. if callable returns "true", hook
131 name in python module. if callable returns "true", hook
104 fails, else passes. if hook raises exception, treated as
132 fails, else passes. if hook raises exception, treated as
105 hook failure. exception propagates if throw is "true".
133 hook failure. exception propagates if throw is "true".
106
134
107 reason for "true" meaning "hook failed" is so that
135 reason for "true" meaning "hook failed" is so that
108 unmodified commands (e.g. mercurial.commands.update) can
136 unmodified commands (e.g. mercurial.commands.update) can
109 be run as hooks without wrappers to convert return values.'''
137 be run as hooks without wrappers to convert return values.'''
110
138
111 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
139 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
112 d = funcname.rfind('.')
140 d = funcname.rfind('.')
113 if d == -1:
141 if d == -1:
114 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
142 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
115 % (hname, funcname))
143 % (hname, funcname))
116 modname = funcname[:d]
144 modname = funcname[:d]
117 try:
145 try:
118 obj = __import__(modname)
146 obj = __import__(modname)
119 except ImportError:
147 except ImportError:
120 try:
148 try:
121 # extensions are loaded with hgext_ prefix
149 # extensions are loaded with hgext_ prefix
122 obj = __import__("hgext_%s" % modname)
150 obj = __import__("hgext_%s" % modname)
123 except ImportError:
151 except ImportError:
124 raise util.Abort(_('%s hook is invalid '
152 raise util.Abort(_('%s hook is invalid '
125 '(import of "%s" failed)') %
153 '(import of "%s" failed)') %
126 (hname, modname))
154 (hname, modname))
127 try:
155 try:
128 for p in funcname.split('.')[1:]:
156 for p in funcname.split('.')[1:]:
129 obj = getattr(obj, p)
157 obj = getattr(obj, p)
130 except AttributeError, err:
158 except AttributeError, err:
131 raise util.Abort(_('%s hook is invalid '
159 raise util.Abort(_('%s hook is invalid '
132 '("%s" is not defined)') %
160 '("%s" is not defined)') %
133 (hname, funcname))
161 (hname, funcname))
134 if not callable(obj):
162 if not callable(obj):
135 raise util.Abort(_('%s hook is invalid '
163 raise util.Abort(_('%s hook is invalid '
136 '("%s" is not callable)') %
164 '("%s" is not callable)') %
137 (hname, funcname))
165 (hname, funcname))
138 try:
166 try:
139 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
167 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
140 except (KeyboardInterrupt, util.SignalInterrupt):
168 except (KeyboardInterrupt, util.SignalInterrupt):
141 raise
169 raise
142 except Exception, exc:
170 except Exception, exc:
143 if isinstance(exc, util.Abort):
171 if isinstance(exc, util.Abort):
144 self.ui.warn(_('error: %s hook failed: %s\n') %
172 self.ui.warn(_('error: %s hook failed: %s\n') %
145 (hname, exc.args[0]))
173 (hname, exc.args[0]))
146 else:
174 else:
147 self.ui.warn(_('error: %s hook raised an exception: '
175 self.ui.warn(_('error: %s hook raised an exception: '
148 '%s\n') % (hname, exc))
176 '%s\n') % (hname, exc))
149 if throw:
177 if throw:
150 raise
178 raise
151 self.ui.print_exc()
179 self.ui.print_exc()
152 return True
180 return True
153 if r:
181 if r:
154 if throw:
182 if throw:
155 raise util.Abort(_('%s hook failed') % hname)
183 raise util.Abort(_('%s hook failed') % hname)
156 self.ui.warn(_('warning: %s hook failed\n') % hname)
184 self.ui.warn(_('warning: %s hook failed\n') % hname)
157 return r
185 return r
158
186
159 def runhook(name, cmd):
187 def runhook(name, cmd):
160 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
188 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
161 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
189 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
162 r = util.system(cmd, environ=env, cwd=self.root)
190 r = util.system(cmd, environ=env, cwd=self.root)
163 if r:
191 if r:
164 desc, r = util.explain_exit(r)
192 desc, r = util.explain_exit(r)
165 if throw:
193 if throw:
166 raise util.Abort(_('%s hook %s') % (name, desc))
194 raise util.Abort(_('%s hook %s') % (name, desc))
167 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
195 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
168 return r
196 return r
169
197
170 r = False
198 r = False
171 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
199 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
172 if hname.split(".", 1)[0] == name and cmd]
200 if hname.split(".", 1)[0] == name and cmd]
173 hooks.sort()
201 hooks.sort()
174 for hname, cmd in hooks:
202 for hname, cmd in hooks:
175 if cmd.startswith('python:'):
203 if cmd.startswith('python:'):
176 r = callhook(hname, cmd[7:].strip()) or r
204 r = callhook(hname, cmd[7:].strip()) or r
177 else:
205 else:
178 r = runhook(hname, cmd) or r
206 r = runhook(hname, cmd) or r
179 return r
207 return r
180
208
181 tag_disallowed = ':\r\n'
209 tag_disallowed = ':\r\n'
182
210
183 def tag(self, name, node, message, local, user, date):
211 def tag(self, name, node, message, local, user, date):
184 '''tag a revision with a symbolic name.
212 '''tag a revision with a symbolic name.
185
213
186 if local is True, the tag is stored in a per-repository file.
214 if local is True, the tag is stored in a per-repository file.
187 otherwise, it is stored in the .hgtags file, and a new
215 otherwise, it is stored in the .hgtags file, and a new
188 changeset is committed with the change.
216 changeset is committed with the change.
189
217
190 keyword arguments:
218 keyword arguments:
191
219
192 local: whether to store tag in non-version-controlled file
220 local: whether to store tag in non-version-controlled file
193 (default False)
221 (default False)
194
222
195 message: commit message to use if committing
223 message: commit message to use if committing
196
224
197 user: name of user to use if committing
225 user: name of user to use if committing
198
226
199 date: date tuple to use if committing'''
227 date: date tuple to use if committing'''
200
228
201 for c in self.tag_disallowed:
229 for c in self.tag_disallowed:
202 if c in name:
230 if c in name:
203 raise util.Abort(_('%r cannot be used in a tag name') % c)
231 raise util.Abort(_('%r cannot be used in a tag name') % c)
204
232
205 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
233 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
206
234
207 if local:
235 if local:
208 # local tags are stored in the current charset
236 # local tags are stored in the current charset
209 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
237 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
210 self.hook('tag', node=hex(node), tag=name, local=local)
238 self.hook('tag', node=hex(node), tag=name, local=local)
211 return
239 return
212
240
213 for x in self.status()[:5]:
241 for x in self.status()[:5]:
214 if '.hgtags' in x:
242 if '.hgtags' in x:
215 raise util.Abort(_('working copy of .hgtags is changed '
243 raise util.Abort(_('working copy of .hgtags is changed '
216 '(please commit .hgtags manually)'))
244 '(please commit .hgtags manually)'))
217
245
218 # committed tags are stored in UTF-8
246 # committed tags are stored in UTF-8
219 line = '%s %s\n' % (hex(node), util.fromlocal(name))
247 line = '%s %s\n' % (hex(node), util.fromlocal(name))
220 self.wfile('.hgtags', 'ab').write(line)
248 self.wfile('.hgtags', 'ab').write(line)
221 if self.dirstate.state('.hgtags') == '?':
249 if self.dirstate.state('.hgtags') == '?':
222 self.add(['.hgtags'])
250 self.add(['.hgtags'])
223
251
224 self.commit(['.hgtags'], message, user, date)
252 self.commit(['.hgtags'], message, user, date)
225 self.hook('tag', node=hex(node), tag=name, local=local)
253 self.hook('tag', node=hex(node), tag=name, local=local)
226
254
227 def tags(self):
255 def tags(self):
228 '''return a mapping of tag to node'''
256 '''return a mapping of tag to node'''
229 if not self.tagscache:
257 if not self.tagscache:
230 self.tagscache = {}
258 self.tagscache = {}
231
259
232 def parsetag(line, context):
260 def parsetag(line, context):
233 if not line:
261 if not line:
234 return
262 return
235 s = l.split(" ", 1)
263 s = l.split(" ", 1)
236 if len(s) != 2:
264 if len(s) != 2:
237 self.ui.warn(_("%s: cannot parse entry\n") % context)
265 self.ui.warn(_("%s: cannot parse entry\n") % context)
238 return
266 return
239 node, key = s
267 node, key = s
240 key = util.tolocal(key.strip()) # stored in UTF-8
268 key = util.tolocal(key.strip()) # stored in UTF-8
241 try:
269 try:
242 bin_n = bin(node)
270 bin_n = bin(node)
243 except TypeError:
271 except TypeError:
244 self.ui.warn(_("%s: node '%s' is not well formed\n") %
272 self.ui.warn(_("%s: node '%s' is not well formed\n") %
245 (context, node))
273 (context, node))
246 return
274 return
247 if bin_n not in self.changelog.nodemap:
275 if bin_n not in self.changelog.nodemap:
248 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
276 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
249 (context, key))
277 (context, key))
250 return
278 return
251 self.tagscache[key] = bin_n
279 self.tagscache[key] = bin_n
252
280
253 # read the tags file from each head, ending with the tip,
281 # read the tags file from each head, ending with the tip,
254 # and add each tag found to the map, with "newer" ones
282 # and add each tag found to the map, with "newer" ones
255 # taking precedence
283 # taking precedence
256 f = None
284 f = None
257 for rev, node, fnode in self._hgtagsnodes():
285 for rev, node, fnode in self._hgtagsnodes():
258 f = (f and f.filectx(fnode) or
286 f = (f and f.filectx(fnode) or
259 self.filectx('.hgtags', fileid=fnode))
287 self.filectx('.hgtags', fileid=fnode))
260 count = 0
288 count = 0
261 for l in f.data().splitlines():
289 for l in f.data().splitlines():
262 count += 1
290 count += 1
263 parsetag(l, _("%s, line %d") % (str(f), count))
291 parsetag(l, _("%s, line %d") % (str(f), count))
264
292
265 try:
293 try:
266 f = self.opener("localtags")
294 f = self.opener("localtags")
267 count = 0
295 count = 0
268 for l in f:
296 for l in f:
269 # localtags are stored in the local character set
297 # localtags are stored in the local character set
270 # while the internal tag table is stored in UTF-8
298 # while the internal tag table is stored in UTF-8
271 l = util.fromlocal(l)
299 l = util.fromlocal(l)
272 count += 1
300 count += 1
273 parsetag(l, _("localtags, line %d") % count)
301 parsetag(l, _("localtags, line %d") % count)
274 except IOError:
302 except IOError:
275 pass
303 pass
276
304
277 self.tagscache['tip'] = self.changelog.tip()
305 self.tagscache['tip'] = self.changelog.tip()
278
306
279 return self.tagscache
307 return self.tagscache
280
308
281 def _hgtagsnodes(self):
309 def _hgtagsnodes(self):
282 heads = self.heads()
310 heads = self.heads()
283 heads.reverse()
311 heads.reverse()
284 last = {}
312 last = {}
285 ret = []
313 ret = []
286 for node in heads:
314 for node in heads:
287 c = self.changectx(node)
315 c = self.changectx(node)
288 rev = c.rev()
316 rev = c.rev()
289 try:
317 try:
290 fnode = c.filenode('.hgtags')
318 fnode = c.filenode('.hgtags')
291 except repo.LookupError:
319 except repo.LookupError:
292 continue
320 continue
293 ret.append((rev, node, fnode))
321 ret.append((rev, node, fnode))
294 if fnode in last:
322 if fnode in last:
295 ret[last[fnode]] = None
323 ret[last[fnode]] = None
296 last[fnode] = len(ret) - 1
324 last[fnode] = len(ret) - 1
297 return [item for item in ret if item]
325 return [item for item in ret if item]
298
326
299 def tagslist(self):
327 def tagslist(self):
300 '''return a list of tags ordered by revision'''
328 '''return a list of tags ordered by revision'''
301 l = []
329 l = []
302 for t, n in self.tags().items():
330 for t, n in self.tags().items():
303 try:
331 try:
304 r = self.changelog.rev(n)
332 r = self.changelog.rev(n)
305 except:
333 except:
306 r = -2 # sort to the beginning of the list if unknown
334 r = -2 # sort to the beginning of the list if unknown
307 l.append((r, t, n))
335 l.append((r, t, n))
308 l.sort()
336 l.sort()
309 return [(t, n) for r, t, n in l]
337 return [(t, n) for r, t, n in l]
310
338
311 def nodetags(self, node):
339 def nodetags(self, node):
312 '''return the tags associated with a node'''
340 '''return the tags associated with a node'''
313 if not self.nodetagscache:
341 if not self.nodetagscache:
314 self.nodetagscache = {}
342 self.nodetagscache = {}
315 for t, n in self.tags().items():
343 for t, n in self.tags().items():
316 self.nodetagscache.setdefault(n, []).append(t)
344 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
345 return self.nodetagscache.get(node, [])
318
346
319 def _branchtags(self):
347 def _branchtags(self):
320 partial, last, lrev = self._readbranchcache()
348 partial, last, lrev = self._readbranchcache()
321
349
322 tiprev = self.changelog.count() - 1
350 tiprev = self.changelog.count() - 1
323 if lrev != tiprev:
351 if lrev != tiprev:
324 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._updatebranchcache(partial, lrev+1, tiprev+1)
325 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
326
354
327 return partial
355 return partial
328
356
329 def branchtags(self):
357 def branchtags(self):
330 if self.branchcache is not None:
358 if self.branchcache is not None:
331 return self.branchcache
359 return self.branchcache
332
360
333 self.branchcache = {} # avoid recursion in changectx
361 self.branchcache = {} # avoid recursion in changectx
334 partial = self._branchtags()
362 partial = self._branchtags()
335
363
336 # the branch cache is stored on disk as UTF-8, but in the local
364 # the branch cache is stored on disk as UTF-8, but in the local
337 # charset internally
365 # charset internally
338 for k, v in partial.items():
366 for k, v in partial.items():
339 self.branchcache[util.tolocal(k)] = v
367 self.branchcache[util.tolocal(k)] = v
340 return self.branchcache
368 return self.branchcache
341
369
342 def _readbranchcache(self):
370 def _readbranchcache(self):
343 partial = {}
371 partial = {}
344 try:
372 try:
345 f = self.opener("branches.cache")
373 f = self.opener("branches.cache")
346 lines = f.read().split('\n')
374 lines = f.read().split('\n')
347 f.close()
375 f.close()
348 last, lrev = lines.pop(0).rstrip().split(" ", 1)
376 last, lrev = lines.pop(0).rstrip().split(" ", 1)
349 last, lrev = bin(last), int(lrev)
377 last, lrev = bin(last), int(lrev)
350 if not (lrev < self.changelog.count() and
378 if not (lrev < self.changelog.count() and
351 self.changelog.node(lrev) == last): # sanity check
379 self.changelog.node(lrev) == last): # sanity check
352 # invalidate the cache
380 # invalidate the cache
353 raise ValueError('Invalid branch cache: unknown tip')
381 raise ValueError('Invalid branch cache: unknown tip')
354 for l in lines:
382 for l in lines:
355 if not l: continue
383 if not l: continue
356 node, label = l.rstrip().split(" ", 1)
384 node, label = l.rstrip().split(" ", 1)
357 partial[label] = bin(node)
385 partial[label] = bin(node)
358 except (KeyboardInterrupt, util.SignalInterrupt):
386 except (KeyboardInterrupt, util.SignalInterrupt):
359 raise
387 raise
360 except Exception, inst:
388 except Exception, inst:
361 if self.ui.debugflag:
389 if self.ui.debugflag:
362 self.ui.warn(str(inst), '\n')
390 self.ui.warn(str(inst), '\n')
363 partial, last, lrev = {}, nullid, nullrev
391 partial, last, lrev = {}, nullid, nullrev
364 return partial, last, lrev
392 return partial, last, lrev
365
393
366 def _writebranchcache(self, branches, tip, tiprev):
394 def _writebranchcache(self, branches, tip, tiprev):
367 try:
395 try:
368 f = self.opener("branches.cache", "w")
396 f = self.opener("branches.cache", "w")
369 f.write("%s %s\n" % (hex(tip), tiprev))
397 f.write("%s %s\n" % (hex(tip), tiprev))
370 for label, node in branches.iteritems():
398 for label, node in branches.iteritems():
371 f.write("%s %s\n" % (hex(node), label))
399 f.write("%s %s\n" % (hex(node), label))
372 except IOError:
400 except IOError:
373 pass
401 pass
374
402
375 def _updatebranchcache(self, partial, start, end):
403 def _updatebranchcache(self, partial, start, end):
376 for r in xrange(start, end):
404 for r in xrange(start, end):
377 c = self.changectx(r)
405 c = self.changectx(r)
378 b = c.branch()
406 b = c.branch()
379 if b:
407 if b:
380 partial[b] = c.node()
408 partial[b] = c.node()
381
409
382 def lookup(self, key):
410 def lookup(self, key):
383 if key == '.':
411 if key == '.':
384 key = self.dirstate.parents()[0]
412 key = self.dirstate.parents()[0]
385 if key == nullid:
413 if key == nullid:
386 raise repo.RepoError(_("no revision checked out"))
414 raise repo.RepoError(_("no revision checked out"))
387 elif key == 'null':
415 elif key == 'null':
388 return nullid
416 return nullid
389 n = self.changelog._match(key)
417 n = self.changelog._match(key)
390 if n:
418 if n:
391 return n
419 return n
392 if key in self.tags():
420 if key in self.tags():
393 return self.tags()[key]
421 return self.tags()[key]
394 if key in self.branchtags():
422 if key in self.branchtags():
395 return self.branchtags()[key]
423 return self.branchtags()[key]
396 n = self.changelog._partialmatch(key)
424 n = self.changelog._partialmatch(key)
397 if n:
425 if n:
398 return n
426 return n
399 raise repo.RepoError(_("unknown revision '%s'") % key)
427 raise repo.RepoError(_("unknown revision '%s'") % key)
400
428
401 def dev(self):
429 def dev(self):
402 return os.lstat(self.path).st_dev
430 return os.lstat(self.path).st_dev
403
431
404 def local(self):
432 def local(self):
405 return True
433 return True
406
434
407 def join(self, f):
435 def join(self, f):
408 return os.path.join(self.path, f)
436 return os.path.join(self.path, f)
409
437
410 def sjoin(self, f):
438 def sjoin(self, f):
439 f = self.encodefn(f)
411 return os.path.join(self.spath, f)
440 return os.path.join(self.spath, f)
412
441
413 def wjoin(self, f):
442 def wjoin(self, f):
414 return os.path.join(self.root, f)
443 return os.path.join(self.root, f)
415
444
416 def file(self, f):
445 def file(self, f):
417 if f[0] == '/':
446 if f[0] == '/':
418 f = f[1:]
447 f = f[1:]
419 return filelog.filelog(self.sopener, f, self.revlogversion)
448 return filelog.filelog(self.sopener, f, self.revlogversion)
420
449
421 def changectx(self, changeid=None):
450 def changectx(self, changeid=None):
422 return context.changectx(self, changeid)
451 return context.changectx(self, changeid)
423
452
424 def workingctx(self):
453 def workingctx(self):
425 return context.workingctx(self)
454 return context.workingctx(self)
426
455
427 def parents(self, changeid=None):
456 def parents(self, changeid=None):
428 '''
457 '''
429 get list of changectxs for parents of changeid or working directory
458 get list of changectxs for parents of changeid or working directory
430 '''
459 '''
431 if changeid is None:
460 if changeid is None:
432 pl = self.dirstate.parents()
461 pl = self.dirstate.parents()
433 else:
462 else:
434 n = self.changelog.lookup(changeid)
463 n = self.changelog.lookup(changeid)
435 pl = self.changelog.parents(n)
464 pl = self.changelog.parents(n)
436 if pl[1] == nullid:
465 if pl[1] == nullid:
437 return [self.changectx(pl[0])]
466 return [self.changectx(pl[0])]
438 return [self.changectx(pl[0]), self.changectx(pl[1])]
467 return [self.changectx(pl[0]), self.changectx(pl[1])]
439
468
440 def filectx(self, path, changeid=None, fileid=None):
469 def filectx(self, path, changeid=None, fileid=None):
441 """changeid can be a changeset revision, node, or tag.
470 """changeid can be a changeset revision, node, or tag.
442 fileid can be a file revision or node."""
471 fileid can be a file revision or node."""
443 return context.filectx(self, path, changeid, fileid)
472 return context.filectx(self, path, changeid, fileid)
444
473
445 def getcwd(self):
474 def getcwd(self):
446 return self.dirstate.getcwd()
475 return self.dirstate.getcwd()
447
476
448 def wfile(self, f, mode='r'):
477 def wfile(self, f, mode='r'):
449 return self.wopener(f, mode)
478 return self.wopener(f, mode)
450
479
451 def wread(self, filename):
480 def wread(self, filename):
452 if self.encodepats == None:
481 if self.encodepats == None:
453 l = []
482 l = []
454 for pat, cmd in self.ui.configitems("encode"):
483 for pat, cmd in self.ui.configitems("encode"):
455 mf = util.matcher(self.root, "", [pat], [], [])[1]
484 mf = util.matcher(self.root, "", [pat], [], [])[1]
456 l.append((mf, cmd))
485 l.append((mf, cmd))
457 self.encodepats = l
486 self.encodepats = l
458
487
459 data = self.wopener(filename, 'r').read()
488 data = self.wopener(filename, 'r').read()
460
489
461 for mf, cmd in self.encodepats:
490 for mf, cmd in self.encodepats:
462 if mf(filename):
491 if mf(filename):
463 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
492 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
464 data = util.filter(data, cmd)
493 data = util.filter(data, cmd)
465 break
494 break
466
495
467 return data
496 return data
468
497
469 def wwrite(self, filename, data, fd=None):
498 def wwrite(self, filename, data, fd=None):
470 if self.decodepats == None:
499 if self.decodepats == None:
471 l = []
500 l = []
472 for pat, cmd in self.ui.configitems("decode"):
501 for pat, cmd in self.ui.configitems("decode"):
473 mf = util.matcher(self.root, "", [pat], [], [])[1]
502 mf = util.matcher(self.root, "", [pat], [], [])[1]
474 l.append((mf, cmd))
503 l.append((mf, cmd))
475 self.decodepats = l
504 self.decodepats = l
476
505
477 for mf, cmd in self.decodepats:
506 for mf, cmd in self.decodepats:
478 if mf(filename):
507 if mf(filename):
479 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
508 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
480 data = util.filter(data, cmd)
509 data = util.filter(data, cmd)
481 break
510 break
482
511
483 if fd:
512 if fd:
484 return fd.write(data)
513 return fd.write(data)
485 return self.wopener(filename, 'w').write(data)
514 return self.wopener(filename, 'w').write(data)
486
515
487 def transaction(self):
516 def transaction(self):
488 tr = self.transhandle
517 tr = self.transhandle
489 if tr != None and tr.running():
518 if tr != None and tr.running():
490 return tr.nest()
519 return tr.nest()
491
520
492 # save dirstate for rollback
521 # save dirstate for rollback
493 try:
522 try:
494 ds = self.opener("dirstate").read()
523 ds = self.opener("dirstate").read()
495 except IOError:
524 except IOError:
496 ds = ""
525 ds = ""
497 self.opener("journal.dirstate", "w").write(ds)
526 self.opener("journal.dirstate", "w").write(ds)
498
527
499 renames = [(self.sjoin("journal"), self.sjoin("undo")),
528 renames = [(self.sjoin("journal"), self.sjoin("undo")),
500 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
529 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
501 tr = transaction.transaction(self.ui.warn, self.sopener,
530 tr = transaction.transaction(self.ui.warn, self.sopener,
502 self.sjoin("journal"),
531 self.sjoin("journal"),
503 aftertrans(renames))
532 aftertrans(renames))
504 self.transhandle = tr
533 self.transhandle = tr
505 return tr
534 return tr
506
535
507 def recover(self):
536 def recover(self):
508 l = self.lock()
537 l = self.lock()
509 if os.path.exists(self.sjoin("journal")):
538 if os.path.exists(self.sjoin("journal")):
510 self.ui.status(_("rolling back interrupted transaction\n"))
539 self.ui.status(_("rolling back interrupted transaction\n"))
511 transaction.rollback(self.sopener, self.sjoin("journal"))
540 transaction.rollback(self.sopener, self.sjoin("journal"))
512 self.reload()
541 self.reload()
513 return True
542 return True
514 else:
543 else:
515 self.ui.warn(_("no interrupted transaction available\n"))
544 self.ui.warn(_("no interrupted transaction available\n"))
516 return False
545 return False
517
546
518 def rollback(self, wlock=None):
547 def rollback(self, wlock=None):
519 if not wlock:
548 if not wlock:
520 wlock = self.wlock()
549 wlock = self.wlock()
521 l = self.lock()
550 l = self.lock()
522 if os.path.exists(self.sjoin("undo")):
551 if os.path.exists(self.sjoin("undo")):
523 self.ui.status(_("rolling back last transaction\n"))
552 self.ui.status(_("rolling back last transaction\n"))
524 transaction.rollback(self.sopener, self.sjoin("undo"))
553 transaction.rollback(self.sopener, self.sjoin("undo"))
525 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
554 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
526 self.reload()
555 self.reload()
527 self.wreload()
556 self.wreload()
528 else:
557 else:
529 self.ui.warn(_("no rollback information available\n"))
558 self.ui.warn(_("no rollback information available\n"))
530
559
531 def wreload(self):
560 def wreload(self):
532 self.dirstate.read()
561 self.dirstate.read()
533
562
534 def reload(self):
563 def reload(self):
535 self.changelog.load()
564 self.changelog.load()
536 self.manifest.load()
565 self.manifest.load()
537 self.tagscache = None
566 self.tagscache = None
538 self.nodetagscache = None
567 self.nodetagscache = None
539
568
540 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
569 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
541 desc=None):
570 desc=None):
542 try:
571 try:
543 l = lock.lock(lockname, 0, releasefn, desc=desc)
572 l = lock.lock(lockname, 0, releasefn, desc=desc)
544 except lock.LockHeld, inst:
573 except lock.LockHeld, inst:
545 if not wait:
574 if not wait:
546 raise
575 raise
547 self.ui.warn(_("waiting for lock on %s held by %r\n") %
576 self.ui.warn(_("waiting for lock on %s held by %r\n") %
548 (desc, inst.locker))
577 (desc, inst.locker))
549 # default to 600 seconds timeout
578 # default to 600 seconds timeout
550 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
579 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
551 releasefn, desc=desc)
580 releasefn, desc=desc)
552 if acquirefn:
581 if acquirefn:
553 acquirefn()
582 acquirefn()
554 return l
583 return l
555
584
556 def lock(self, wait=1):
585 def lock(self, wait=1):
557 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
586 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
558 desc=_('repository %s') % self.origroot)
587 desc=_('repository %s') % self.origroot)
559
588
560 def wlock(self, wait=1):
589 def wlock(self, wait=1):
561 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
590 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
562 self.wreload,
591 self.wreload,
563 desc=_('working directory of %s') % self.origroot)
592 desc=_('working directory of %s') % self.origroot)
564
593
565 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
594 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
566 """
595 """
567 commit an individual file as part of a larger transaction
596 commit an individual file as part of a larger transaction
568 """
597 """
569
598
570 t = self.wread(fn)
599 t = self.wread(fn)
571 fl = self.file(fn)
600 fl = self.file(fn)
572 fp1 = manifest1.get(fn, nullid)
601 fp1 = manifest1.get(fn, nullid)
573 fp2 = manifest2.get(fn, nullid)
602 fp2 = manifest2.get(fn, nullid)
574
603
575 meta = {}
604 meta = {}
576 cp = self.dirstate.copied(fn)
605 cp = self.dirstate.copied(fn)
577 if cp:
606 if cp:
578 meta["copy"] = cp
607 meta["copy"] = cp
579 if not manifest2: # not a branch merge
608 if not manifest2: # not a branch merge
580 meta["copyrev"] = hex(manifest1.get(cp, nullid))
609 meta["copyrev"] = hex(manifest1.get(cp, nullid))
581 fp2 = nullid
610 fp2 = nullid
582 elif fp2 != nullid: # copied on remote side
611 elif fp2 != nullid: # copied on remote side
583 meta["copyrev"] = hex(manifest1.get(cp, nullid))
612 meta["copyrev"] = hex(manifest1.get(cp, nullid))
584 elif fp1 != nullid: # copied on local side, reversed
613 elif fp1 != nullid: # copied on local side, reversed
585 meta["copyrev"] = hex(manifest2.get(cp))
614 meta["copyrev"] = hex(manifest2.get(cp))
586 fp2 = nullid
615 fp2 = nullid
587 else: # directory rename
616 else: # directory rename
588 meta["copyrev"] = hex(manifest1.get(cp, nullid))
617 meta["copyrev"] = hex(manifest1.get(cp, nullid))
589 self.ui.debug(_(" %s: copy %s:%s\n") %
618 self.ui.debug(_(" %s: copy %s:%s\n") %
590 (fn, cp, meta["copyrev"]))
619 (fn, cp, meta["copyrev"]))
591 fp1 = nullid
620 fp1 = nullid
592 elif fp2 != nullid:
621 elif fp2 != nullid:
593 # is one parent an ancestor of the other?
622 # is one parent an ancestor of the other?
594 fpa = fl.ancestor(fp1, fp2)
623 fpa = fl.ancestor(fp1, fp2)
595 if fpa == fp1:
624 if fpa == fp1:
596 fp1, fp2 = fp2, nullid
625 fp1, fp2 = fp2, nullid
597 elif fpa == fp2:
626 elif fpa == fp2:
598 fp2 = nullid
627 fp2 = nullid
599
628
600 # is the file unmodified from the parent? report existing entry
629 # is the file unmodified from the parent? report existing entry
601 if fp2 == nullid and not fl.cmp(fp1, t):
630 if fp2 == nullid and not fl.cmp(fp1, t):
602 return fp1
631 return fp1
603
632
604 changelist.append(fn)
633 changelist.append(fn)
605 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
634 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
606
635
607 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
636 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
608 if p1 is None:
637 if p1 is None:
609 p1, p2 = self.dirstate.parents()
638 p1, p2 = self.dirstate.parents()
610 return self.commit(files=files, text=text, user=user, date=date,
639 return self.commit(files=files, text=text, user=user, date=date,
611 p1=p1, p2=p2, wlock=wlock)
640 p1=p1, p2=p2, wlock=wlock)
612
641
613 def commit(self, files=None, text="", user=None, date=None,
642 def commit(self, files=None, text="", user=None, date=None,
614 match=util.always, force=False, lock=None, wlock=None,
643 match=util.always, force=False, lock=None, wlock=None,
615 force_editor=False, p1=None, p2=None, extra={}):
644 force_editor=False, p1=None, p2=None, extra={}):
616
645
617 commit = []
646 commit = []
618 remove = []
647 remove = []
619 changed = []
648 changed = []
620 use_dirstate = (p1 is None) # not rawcommit
649 use_dirstate = (p1 is None) # not rawcommit
621 extra = extra.copy()
650 extra = extra.copy()
622
651
623 if use_dirstate:
652 if use_dirstate:
624 if files:
653 if files:
625 for f in files:
654 for f in files:
626 s = self.dirstate.state(f)
655 s = self.dirstate.state(f)
627 if s in 'nmai':
656 if s in 'nmai':
628 commit.append(f)
657 commit.append(f)
629 elif s == 'r':
658 elif s == 'r':
630 remove.append(f)
659 remove.append(f)
631 else:
660 else:
632 self.ui.warn(_("%s not tracked!\n") % f)
661 self.ui.warn(_("%s not tracked!\n") % f)
633 else:
662 else:
634 changes = self.status(match=match)[:5]
663 changes = self.status(match=match)[:5]
635 modified, added, removed, deleted, unknown = changes
664 modified, added, removed, deleted, unknown = changes
636 commit = modified + added
665 commit = modified + added
637 remove = removed
666 remove = removed
638 else:
667 else:
639 commit = files
668 commit = files
640
669
641 if use_dirstate:
670 if use_dirstate:
642 p1, p2 = self.dirstate.parents()
671 p1, p2 = self.dirstate.parents()
643 update_dirstate = True
672 update_dirstate = True
644 else:
673 else:
645 p1, p2 = p1, p2 or nullid
674 p1, p2 = p1, p2 or nullid
646 update_dirstate = (self.dirstate.parents()[0] == p1)
675 update_dirstate = (self.dirstate.parents()[0] == p1)
647
676
648 c1 = self.changelog.read(p1)
677 c1 = self.changelog.read(p1)
649 c2 = self.changelog.read(p2)
678 c2 = self.changelog.read(p2)
650 m1 = self.manifest.read(c1[0]).copy()
679 m1 = self.manifest.read(c1[0]).copy()
651 m2 = self.manifest.read(c2[0])
680 m2 = self.manifest.read(c2[0])
652
681
653 if use_dirstate:
682 if use_dirstate:
654 branchname = util.fromlocal(self.workingctx().branch())
683 branchname = util.fromlocal(self.workingctx().branch())
655 else:
684 else:
656 branchname = ""
685 branchname = ""
657
686
658 if use_dirstate:
687 if use_dirstate:
659 oldname = c1[5].get("branch", "") # stored in UTF-8
688 oldname = c1[5].get("branch", "") # stored in UTF-8
660 if not commit and not remove and not force and p2 == nullid and \
689 if not commit and not remove and not force and p2 == nullid and \
661 branchname == oldname:
690 branchname == oldname:
662 self.ui.status(_("nothing changed\n"))
691 self.ui.status(_("nothing changed\n"))
663 return None
692 return None
664
693
665 xp1 = hex(p1)
694 xp1 = hex(p1)
666 if p2 == nullid: xp2 = ''
695 if p2 == nullid: xp2 = ''
667 else: xp2 = hex(p2)
696 else: xp2 = hex(p2)
668
697
669 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
698 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
670
699
671 if not wlock:
700 if not wlock:
672 wlock = self.wlock()
701 wlock = self.wlock()
673 if not lock:
702 if not lock:
674 lock = self.lock()
703 lock = self.lock()
675 tr = self.transaction()
704 tr = self.transaction()
676
705
677 # check in files
706 # check in files
678 new = {}
707 new = {}
679 linkrev = self.changelog.count()
708 linkrev = self.changelog.count()
680 commit.sort()
709 commit.sort()
681 for f in commit:
710 for f in commit:
682 self.ui.note(f + "\n")
711 self.ui.note(f + "\n")
683 try:
712 try:
684 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
713 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
685 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
714 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
686 except IOError:
715 except IOError:
687 if use_dirstate:
716 if use_dirstate:
688 self.ui.warn(_("trouble committing %s!\n") % f)
717 self.ui.warn(_("trouble committing %s!\n") % f)
689 raise
718 raise
690 else:
719 else:
691 remove.append(f)
720 remove.append(f)
692
721
693 # update manifest
722 # update manifest
694 m1.update(new)
723 m1.update(new)
695 remove.sort()
724 remove.sort()
696
725
697 for f in remove:
726 for f in remove:
698 if f in m1:
727 if f in m1:
699 del m1[f]
728 del m1[f]
700 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
729 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
701
730
702 # add changeset
731 # add changeset
703 new = new.keys()
732 new = new.keys()
704 new.sort()
733 new.sort()
705
734
706 user = user or self.ui.username()
735 user = user or self.ui.username()
707 if not text or force_editor:
736 if not text or force_editor:
708 edittext = []
737 edittext = []
709 if text:
738 if text:
710 edittext.append(text)
739 edittext.append(text)
711 edittext.append("")
740 edittext.append("")
712 edittext.append("HG: user: %s" % user)
741 edittext.append("HG: user: %s" % user)
713 if p2 != nullid:
742 if p2 != nullid:
714 edittext.append("HG: branch merge")
743 edittext.append("HG: branch merge")
715 edittext.extend(["HG: changed %s" % f for f in changed])
744 edittext.extend(["HG: changed %s" % f for f in changed])
716 edittext.extend(["HG: removed %s" % f for f in remove])
745 edittext.extend(["HG: removed %s" % f for f in remove])
717 if not changed and not remove:
746 if not changed and not remove:
718 edittext.append("HG: no files changed")
747 edittext.append("HG: no files changed")
719 edittext.append("")
748 edittext.append("")
720 # run editor in the repository root
749 # run editor in the repository root
721 olddir = os.getcwd()
750 olddir = os.getcwd()
722 os.chdir(self.root)
751 os.chdir(self.root)
723 text = self.ui.edit("\n".join(edittext), user)
752 text = self.ui.edit("\n".join(edittext), user)
724 os.chdir(olddir)
753 os.chdir(olddir)
725
754
726 lines = [line.rstrip() for line in text.rstrip().splitlines()]
755 lines = [line.rstrip() for line in text.rstrip().splitlines()]
727 while lines and not lines[0]:
756 while lines and not lines[0]:
728 del lines[0]
757 del lines[0]
729 if not lines:
758 if not lines:
730 return None
759 return None
731 text = '\n'.join(lines)
760 text = '\n'.join(lines)
732 if branchname:
761 if branchname:
733 extra["branch"] = branchname
762 extra["branch"] = branchname
734 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
763 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
735 user, date, extra)
764 user, date, extra)
736 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
765 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
737 parent2=xp2)
766 parent2=xp2)
738 tr.close()
767 tr.close()
739
768
740 if use_dirstate or update_dirstate:
769 if use_dirstate or update_dirstate:
741 self.dirstate.setparents(n)
770 self.dirstate.setparents(n)
742 if use_dirstate:
771 if use_dirstate:
743 self.dirstate.update(new, "n")
772 self.dirstate.update(new, "n")
744 self.dirstate.forget(remove)
773 self.dirstate.forget(remove)
745
774
746 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
775 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
747 return n
776 return n
748
777
749 def walk(self, node=None, files=[], match=util.always, badmatch=None):
778 def walk(self, node=None, files=[], match=util.always, badmatch=None):
750 '''
779 '''
751 walk recursively through the directory tree or a given
780 walk recursively through the directory tree or a given
752 changeset, finding all files matched by the match
781 changeset, finding all files matched by the match
753 function
782 function
754
783
755 results are yielded in a tuple (src, filename), where src
784 results are yielded in a tuple (src, filename), where src
756 is one of:
785 is one of:
757 'f' the file was found in the directory tree
786 'f' the file was found in the directory tree
758 'm' the file was only in the dirstate and not in the tree
787 'm' the file was only in the dirstate and not in the tree
759 'b' file was not found and matched badmatch
788 'b' file was not found and matched badmatch
760 '''
789 '''
761
790
762 if node:
791 if node:
763 fdict = dict.fromkeys(files)
792 fdict = dict.fromkeys(files)
764 for fn in self.manifest.read(self.changelog.read(node)[0]):
793 for fn in self.manifest.read(self.changelog.read(node)[0]):
765 for ffn in fdict:
794 for ffn in fdict:
766 # match if the file is the exact name or a directory
795 # match if the file is the exact name or a directory
767 if ffn == fn or fn.startswith("%s/" % ffn):
796 if ffn == fn or fn.startswith("%s/" % ffn):
768 del fdict[ffn]
797 del fdict[ffn]
769 break
798 break
770 if match(fn):
799 if match(fn):
771 yield 'm', fn
800 yield 'm', fn
772 for fn in fdict:
801 for fn in fdict:
773 if badmatch and badmatch(fn):
802 if badmatch and badmatch(fn):
774 if match(fn):
803 if match(fn):
775 yield 'b', fn
804 yield 'b', fn
776 else:
805 else:
777 self.ui.warn(_('%s: No such file in rev %s\n') % (
806 self.ui.warn(_('%s: No such file in rev %s\n') % (
778 util.pathto(self.getcwd(), fn), short(node)))
807 util.pathto(self.getcwd(), fn), short(node)))
779 else:
808 else:
780 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
809 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
781 yield src, fn
810 yield src, fn
782
811
783 def status(self, node1=None, node2=None, files=[], match=util.always,
812 def status(self, node1=None, node2=None, files=[], match=util.always,
784 wlock=None, list_ignored=False, list_clean=False):
813 wlock=None, list_ignored=False, list_clean=False):
785 """return status of files between two nodes or node and working directory
814 """return status of files between two nodes or node and working directory
786
815
787 If node1 is None, use the first dirstate parent instead.
816 If node1 is None, use the first dirstate parent instead.
788 If node2 is None, compare node1 with working directory.
817 If node2 is None, compare node1 with working directory.
789 """
818 """
790
819
791 def fcmp(fn, mf):
820 def fcmp(fn, mf):
792 t1 = self.wread(fn)
821 t1 = self.wread(fn)
793 return self.file(fn).cmp(mf.get(fn, nullid), t1)
822 return self.file(fn).cmp(mf.get(fn, nullid), t1)
794
823
795 def mfmatches(node):
824 def mfmatches(node):
796 change = self.changelog.read(node)
825 change = self.changelog.read(node)
797 mf = self.manifest.read(change[0]).copy()
826 mf = self.manifest.read(change[0]).copy()
798 for fn in mf.keys():
827 for fn in mf.keys():
799 if not match(fn):
828 if not match(fn):
800 del mf[fn]
829 del mf[fn]
801 return mf
830 return mf
802
831
803 modified, added, removed, deleted, unknown = [], [], [], [], []
832 modified, added, removed, deleted, unknown = [], [], [], [], []
804 ignored, clean = [], []
833 ignored, clean = [], []
805
834
806 compareworking = False
835 compareworking = False
807 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
836 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
808 compareworking = True
837 compareworking = True
809
838
810 if not compareworking:
839 if not compareworking:
811 # read the manifest from node1 before the manifest from node2,
840 # read the manifest from node1 before the manifest from node2,
812 # so that we'll hit the manifest cache if we're going through
841 # so that we'll hit the manifest cache if we're going through
813 # all the revisions in parent->child order.
842 # all the revisions in parent->child order.
814 mf1 = mfmatches(node1)
843 mf1 = mfmatches(node1)
815
844
816 # are we comparing the working directory?
845 # are we comparing the working directory?
817 if not node2:
846 if not node2:
818 if not wlock:
847 if not wlock:
819 try:
848 try:
820 wlock = self.wlock(wait=0)
849 wlock = self.wlock(wait=0)
821 except lock.LockException:
850 except lock.LockException:
822 wlock = None
851 wlock = None
823 (lookup, modified, added, removed, deleted, unknown,
852 (lookup, modified, added, removed, deleted, unknown,
824 ignored, clean) = self.dirstate.status(files, match,
853 ignored, clean) = self.dirstate.status(files, match,
825 list_ignored, list_clean)
854 list_ignored, list_clean)
826
855
827 # are we comparing working dir against its parent?
856 # are we comparing working dir against its parent?
828 if compareworking:
857 if compareworking:
829 if lookup:
858 if lookup:
830 # do a full compare of any files that might have changed
859 # do a full compare of any files that might have changed
831 mf2 = mfmatches(self.dirstate.parents()[0])
860 mf2 = mfmatches(self.dirstate.parents()[0])
832 for f in lookup:
861 for f in lookup:
833 if fcmp(f, mf2):
862 if fcmp(f, mf2):
834 modified.append(f)
863 modified.append(f)
835 else:
864 else:
836 clean.append(f)
865 clean.append(f)
837 if wlock is not None:
866 if wlock is not None:
838 self.dirstate.update([f], "n")
867 self.dirstate.update([f], "n")
839 else:
868 else:
840 # we are comparing working dir against non-parent
869 # we are comparing working dir against non-parent
841 # generate a pseudo-manifest for the working dir
870 # generate a pseudo-manifest for the working dir
842 # XXX: create it in dirstate.py ?
871 # XXX: create it in dirstate.py ?
843 mf2 = mfmatches(self.dirstate.parents()[0])
872 mf2 = mfmatches(self.dirstate.parents()[0])
844 for f in lookup + modified + added:
873 for f in lookup + modified + added:
845 mf2[f] = ""
874 mf2[f] = ""
846 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
875 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
847 for f in removed:
876 for f in removed:
848 if f in mf2:
877 if f in mf2:
849 del mf2[f]
878 del mf2[f]
850 else:
879 else:
851 # we are comparing two revisions
880 # we are comparing two revisions
852 mf2 = mfmatches(node2)
881 mf2 = mfmatches(node2)
853
882
854 if not compareworking:
883 if not compareworking:
855 # flush lists from dirstate before comparing manifests
884 # flush lists from dirstate before comparing manifests
856 modified, added, clean = [], [], []
885 modified, added, clean = [], [], []
857
886
858 # make sure to sort the files so we talk to the disk in a
887 # make sure to sort the files so we talk to the disk in a
859 # reasonable order
888 # reasonable order
860 mf2keys = mf2.keys()
889 mf2keys = mf2.keys()
861 mf2keys.sort()
890 mf2keys.sort()
862 for fn in mf2keys:
891 for fn in mf2keys:
863 if mf1.has_key(fn):
892 if mf1.has_key(fn):
864 if mf1.flags(fn) != mf2.flags(fn) or \
893 if mf1.flags(fn) != mf2.flags(fn) or \
865 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
894 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
866 modified.append(fn)
895 modified.append(fn)
867 elif list_clean:
896 elif list_clean:
868 clean.append(fn)
897 clean.append(fn)
869 del mf1[fn]
898 del mf1[fn]
870 else:
899 else:
871 added.append(fn)
900 added.append(fn)
872
901
873 removed = mf1.keys()
902 removed = mf1.keys()
874
903
875 # sort and return results:
904 # sort and return results:
876 for l in modified, added, removed, deleted, unknown, ignored, clean:
905 for l in modified, added, removed, deleted, unknown, ignored, clean:
877 l.sort()
906 l.sort()
878 return (modified, added, removed, deleted, unknown, ignored, clean)
907 return (modified, added, removed, deleted, unknown, ignored, clean)
879
908
880 def add(self, list, wlock=None):
909 def add(self, list, wlock=None):
881 if not wlock:
910 if not wlock:
882 wlock = self.wlock()
911 wlock = self.wlock()
883 for f in list:
912 for f in list:
884 p = self.wjoin(f)
913 p = self.wjoin(f)
885 if not os.path.exists(p):
914 if not os.path.exists(p):
886 self.ui.warn(_("%s does not exist!\n") % f)
915 self.ui.warn(_("%s does not exist!\n") % f)
887 elif not os.path.isfile(p):
916 elif not os.path.isfile(p):
888 self.ui.warn(_("%s not added: only files supported currently\n")
917 self.ui.warn(_("%s not added: only files supported currently\n")
889 % f)
918 % f)
890 elif self.dirstate.state(f) in 'an':
919 elif self.dirstate.state(f) in 'an':
891 self.ui.warn(_("%s already tracked!\n") % f)
920 self.ui.warn(_("%s already tracked!\n") % f)
892 else:
921 else:
893 self.dirstate.update([f], "a")
922 self.dirstate.update([f], "a")
894
923
895 def forget(self, list, wlock=None):
924 def forget(self, list, wlock=None):
896 if not wlock:
925 if not wlock:
897 wlock = self.wlock()
926 wlock = self.wlock()
898 for f in list:
927 for f in list:
899 if self.dirstate.state(f) not in 'ai':
928 if self.dirstate.state(f) not in 'ai':
900 self.ui.warn(_("%s not added!\n") % f)
929 self.ui.warn(_("%s not added!\n") % f)
901 else:
930 else:
902 self.dirstate.forget([f])
931 self.dirstate.forget([f])
903
932
904 def remove(self, list, unlink=False, wlock=None):
933 def remove(self, list, unlink=False, wlock=None):
905 if unlink:
934 if unlink:
906 for f in list:
935 for f in list:
907 try:
936 try:
908 util.unlink(self.wjoin(f))
937 util.unlink(self.wjoin(f))
909 except OSError, inst:
938 except OSError, inst:
910 if inst.errno != errno.ENOENT:
939 if inst.errno != errno.ENOENT:
911 raise
940 raise
912 if not wlock:
941 if not wlock:
913 wlock = self.wlock()
942 wlock = self.wlock()
914 for f in list:
943 for f in list:
915 p = self.wjoin(f)
944 p = self.wjoin(f)
916 if os.path.exists(p):
945 if os.path.exists(p):
917 self.ui.warn(_("%s still exists!\n") % f)
946 self.ui.warn(_("%s still exists!\n") % f)
918 elif self.dirstate.state(f) == 'a':
947 elif self.dirstate.state(f) == 'a':
919 self.dirstate.forget([f])
948 self.dirstate.forget([f])
920 elif f not in self.dirstate:
949 elif f not in self.dirstate:
921 self.ui.warn(_("%s not tracked!\n") % f)
950 self.ui.warn(_("%s not tracked!\n") % f)
922 else:
951 else:
923 self.dirstate.update([f], "r")
952 self.dirstate.update([f], "r")
924
953
925 def undelete(self, list, wlock=None):
954 def undelete(self, list, wlock=None):
926 p = self.dirstate.parents()[0]
955 p = self.dirstate.parents()[0]
927 mn = self.changelog.read(p)[0]
956 mn = self.changelog.read(p)[0]
928 m = self.manifest.read(mn)
957 m = self.manifest.read(mn)
929 if not wlock:
958 if not wlock:
930 wlock = self.wlock()
959 wlock = self.wlock()
931 for f in list:
960 for f in list:
932 if self.dirstate.state(f) not in "r":
961 if self.dirstate.state(f) not in "r":
933 self.ui.warn("%s not removed!\n" % f)
962 self.ui.warn("%s not removed!\n" % f)
934 else:
963 else:
935 t = self.file(f).read(m[f])
964 t = self.file(f).read(m[f])
936 self.wwrite(f, t)
965 self.wwrite(f, t)
937 util.set_exec(self.wjoin(f), m.execf(f))
966 util.set_exec(self.wjoin(f), m.execf(f))
938 self.dirstate.update([f], "n")
967 self.dirstate.update([f], "n")
939
968
940 def copy(self, source, dest, wlock=None):
969 def copy(self, source, dest, wlock=None):
941 p = self.wjoin(dest)
970 p = self.wjoin(dest)
942 if not os.path.exists(p):
971 if not os.path.exists(p):
943 self.ui.warn(_("%s does not exist!\n") % dest)
972 self.ui.warn(_("%s does not exist!\n") % dest)
944 elif not os.path.isfile(p):
973 elif not os.path.isfile(p):
945 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
974 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
946 else:
975 else:
947 if not wlock:
976 if not wlock:
948 wlock = self.wlock()
977 wlock = self.wlock()
949 if self.dirstate.state(dest) == '?':
978 if self.dirstate.state(dest) == '?':
950 self.dirstate.update([dest], "a")
979 self.dirstate.update([dest], "a")
951 self.dirstate.copy(source, dest)
980 self.dirstate.copy(source, dest)
952
981
953 def heads(self, start=None):
982 def heads(self, start=None):
954 heads = self.changelog.heads(start)
983 heads = self.changelog.heads(start)
955 # sort the output in rev descending order
984 # sort the output in rev descending order
956 heads = [(-self.changelog.rev(h), h) for h in heads]
985 heads = [(-self.changelog.rev(h), h) for h in heads]
957 heads.sort()
986 heads.sort()
958 return [n for (r, n) in heads]
987 return [n for (r, n) in heads]
959
988
960 # branchlookup returns a dict giving a list of branches for
989 # branchlookup returns a dict giving a list of branches for
961 # each head. A branch is defined as the tag of a node or
990 # each head. A branch is defined as the tag of a node or
962 # the branch of the node's parents. If a node has multiple
991 # the branch of the node's parents. If a node has multiple
963 # branch tags, tags are eliminated if they are visible from other
992 # branch tags, tags are eliminated if they are visible from other
964 # branch tags.
993 # branch tags.
965 #
994 #
966 # So, for this graph: a->b->c->d->e
995 # So, for this graph: a->b->c->d->e
967 # \ /
996 # \ /
968 # aa -----/
997 # aa -----/
969 # a has tag 2.6.12
998 # a has tag 2.6.12
970 # d has tag 2.6.13
999 # d has tag 2.6.13
971 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1000 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
972 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1001 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
973 # from the list.
1002 # from the list.
974 #
1003 #
975 # It is possible that more than one head will have the same branch tag.
1004 # It is possible that more than one head will have the same branch tag.
976 # callers need to check the result for multiple heads under the same
1005 # callers need to check the result for multiple heads under the same
977 # branch tag if that is a problem for them (ie checkout of a specific
1006 # branch tag if that is a problem for them (ie checkout of a specific
978 # branch).
1007 # branch).
979 #
1008 #
980 # passing in a specific branch will limit the depth of the search
1009 # passing in a specific branch will limit the depth of the search
981 # through the parents. It won't limit the branches returned in the
1010 # through the parents. It won't limit the branches returned in the
982 # result though.
1011 # result though.
983 def branchlookup(self, heads=None, branch=None):
1012 def branchlookup(self, heads=None, branch=None):
984 if not heads:
1013 if not heads:
985 heads = self.heads()
1014 heads = self.heads()
986 headt = [ h for h in heads ]
1015 headt = [ h for h in heads ]
987 chlog = self.changelog
1016 chlog = self.changelog
988 branches = {}
1017 branches = {}
989 merges = []
1018 merges = []
990 seenmerge = {}
1019 seenmerge = {}
991
1020
992 # traverse the tree once for each head, recording in the branches
1021 # traverse the tree once for each head, recording in the branches
993 # dict which tags are visible from this head. The branches
1022 # dict which tags are visible from this head. The branches
994 # dict also records which tags are visible from each tag
1023 # dict also records which tags are visible from each tag
995 # while we traverse.
1024 # while we traverse.
996 while headt or merges:
1025 while headt or merges:
997 if merges:
1026 if merges:
998 n, found = merges.pop()
1027 n, found = merges.pop()
999 visit = [n]
1028 visit = [n]
1000 else:
1029 else:
1001 h = headt.pop()
1030 h = headt.pop()
1002 visit = [h]
1031 visit = [h]
1003 found = [h]
1032 found = [h]
1004 seen = {}
1033 seen = {}
1005 while visit:
1034 while visit:
1006 n = visit.pop()
1035 n = visit.pop()
1007 if n in seen:
1036 if n in seen:
1008 continue
1037 continue
1009 pp = chlog.parents(n)
1038 pp = chlog.parents(n)
1010 tags = self.nodetags(n)
1039 tags = self.nodetags(n)
1011 if tags:
1040 if tags:
1012 for x in tags:
1041 for x in tags:
1013 if x == 'tip':
1042 if x == 'tip':
1014 continue
1043 continue
1015 for f in found:
1044 for f in found:
1016 branches.setdefault(f, {})[n] = 1
1045 branches.setdefault(f, {})[n] = 1
1017 branches.setdefault(n, {})[n] = 1
1046 branches.setdefault(n, {})[n] = 1
1018 break
1047 break
1019 if n not in found:
1048 if n not in found:
1020 found.append(n)
1049 found.append(n)
1021 if branch in tags:
1050 if branch in tags:
1022 continue
1051 continue
1023 seen[n] = 1
1052 seen[n] = 1
1024 if pp[1] != nullid and n not in seenmerge:
1053 if pp[1] != nullid and n not in seenmerge:
1025 merges.append((pp[1], [x for x in found]))
1054 merges.append((pp[1], [x for x in found]))
1026 seenmerge[n] = 1
1055 seenmerge[n] = 1
1027 if pp[0] != nullid:
1056 if pp[0] != nullid:
1028 visit.append(pp[0])
1057 visit.append(pp[0])
1029 # traverse the branches dict, eliminating branch tags from each
1058 # traverse the branches dict, eliminating branch tags from each
1030 # head that are visible from another branch tag for that head.
1059 # head that are visible from another branch tag for that head.
1031 out = {}
1060 out = {}
1032 viscache = {}
1061 viscache = {}
1033 for h in heads:
1062 for h in heads:
1034 def visible(node):
1063 def visible(node):
1035 if node in viscache:
1064 if node in viscache:
1036 return viscache[node]
1065 return viscache[node]
1037 ret = {}
1066 ret = {}
1038 visit = [node]
1067 visit = [node]
1039 while visit:
1068 while visit:
1040 x = visit.pop()
1069 x = visit.pop()
1041 if x in viscache:
1070 if x in viscache:
1042 ret.update(viscache[x])
1071 ret.update(viscache[x])
1043 elif x not in ret:
1072 elif x not in ret:
1044 ret[x] = 1
1073 ret[x] = 1
1045 if x in branches:
1074 if x in branches:
1046 visit[len(visit):] = branches[x].keys()
1075 visit[len(visit):] = branches[x].keys()
1047 viscache[node] = ret
1076 viscache[node] = ret
1048 return ret
1077 return ret
1049 if h not in branches:
1078 if h not in branches:
1050 continue
1079 continue
1051 # O(n^2), but somewhat limited. This only searches the
1080 # O(n^2), but somewhat limited. This only searches the
1052 # tags visible from a specific head, not all the tags in the
1081 # tags visible from a specific head, not all the tags in the
1053 # whole repo.
1082 # whole repo.
1054 for b in branches[h]:
1083 for b in branches[h]:
1055 vis = False
1084 vis = False
1056 for bb in branches[h].keys():
1085 for bb in branches[h].keys():
1057 if b != bb:
1086 if b != bb:
1058 if b in visible(bb):
1087 if b in visible(bb):
1059 vis = True
1088 vis = True
1060 break
1089 break
1061 if not vis:
1090 if not vis:
1062 l = out.setdefault(h, [])
1091 l = out.setdefault(h, [])
1063 l[len(l):] = self.nodetags(b)
1092 l[len(l):] = self.nodetags(b)
1064 return out
1093 return out
1065
1094
1066 def branches(self, nodes):
1095 def branches(self, nodes):
1067 if not nodes:
1096 if not nodes:
1068 nodes = [self.changelog.tip()]
1097 nodes = [self.changelog.tip()]
1069 b = []
1098 b = []
1070 for n in nodes:
1099 for n in nodes:
1071 t = n
1100 t = n
1072 while 1:
1101 while 1:
1073 p = self.changelog.parents(n)
1102 p = self.changelog.parents(n)
1074 if p[1] != nullid or p[0] == nullid:
1103 if p[1] != nullid or p[0] == nullid:
1075 b.append((t, n, p[0], p[1]))
1104 b.append((t, n, p[0], p[1]))
1076 break
1105 break
1077 n = p[0]
1106 n = p[0]
1078 return b
1107 return b
1079
1108
1080 def between(self, pairs):
1109 def between(self, pairs):
1081 r = []
1110 r = []
1082
1111
1083 for top, bottom in pairs:
1112 for top, bottom in pairs:
1084 n, l, i = top, [], 0
1113 n, l, i = top, [], 0
1085 f = 1
1114 f = 1
1086
1115
1087 while n != bottom:
1116 while n != bottom:
1088 p = self.changelog.parents(n)[0]
1117 p = self.changelog.parents(n)[0]
1089 if i == f:
1118 if i == f:
1090 l.append(n)
1119 l.append(n)
1091 f = f * 2
1120 f = f * 2
1092 n = p
1121 n = p
1093 i += 1
1122 i += 1
1094
1123
1095 r.append(l)
1124 r.append(l)
1096
1125
1097 return r
1126 return r
1098
1127
1099 def findincoming(self, remote, base=None, heads=None, force=False):
1128 def findincoming(self, remote, base=None, heads=None, force=False):
1100 """Return list of roots of the subsets of missing nodes from remote
1129 """Return list of roots of the subsets of missing nodes from remote
1101
1130
1102 If base dict is specified, assume that these nodes and their parents
1131 If base dict is specified, assume that these nodes and their parents
1103 exist on the remote side and that no child of a node of base exists
1132 exist on the remote side and that no child of a node of base exists
1104 in both remote and self.
1133 in both remote and self.
1105 Furthermore base will be updated to include the nodes that exists
1134 Furthermore base will be updated to include the nodes that exists
1106 in self and remote but no children exists in self and remote.
1135 in self and remote but no children exists in self and remote.
1107 If a list of heads is specified, return only nodes which are heads
1136 If a list of heads is specified, return only nodes which are heads
1108 or ancestors of these heads.
1137 or ancestors of these heads.
1109
1138
1110 All the ancestors of base are in self and in remote.
1139 All the ancestors of base are in self and in remote.
1111 All the descendants of the list returned are missing in self.
1140 All the descendants of the list returned are missing in self.
1112 (and so we know that the rest of the nodes are missing in remote, see
1141 (and so we know that the rest of the nodes are missing in remote, see
1113 outgoing)
1142 outgoing)
1114 """
1143 """
1115 m = self.changelog.nodemap
1144 m = self.changelog.nodemap
1116 search = []
1145 search = []
1117 fetch = {}
1146 fetch = {}
1118 seen = {}
1147 seen = {}
1119 seenbranch = {}
1148 seenbranch = {}
1120 if base == None:
1149 if base == None:
1121 base = {}
1150 base = {}
1122
1151
1123 if not heads:
1152 if not heads:
1124 heads = remote.heads()
1153 heads = remote.heads()
1125
1154
1126 if self.changelog.tip() == nullid:
1155 if self.changelog.tip() == nullid:
1127 base[nullid] = 1
1156 base[nullid] = 1
1128 if heads != [nullid]:
1157 if heads != [nullid]:
1129 return [nullid]
1158 return [nullid]
1130 return []
1159 return []
1131
1160
1132 # assume we're closer to the tip than the root
1161 # assume we're closer to the tip than the root
1133 # and start by examining the heads
1162 # and start by examining the heads
1134 self.ui.status(_("searching for changes\n"))
1163 self.ui.status(_("searching for changes\n"))
1135
1164
1136 unknown = []
1165 unknown = []
1137 for h in heads:
1166 for h in heads:
1138 if h not in m:
1167 if h not in m:
1139 unknown.append(h)
1168 unknown.append(h)
1140 else:
1169 else:
1141 base[h] = 1
1170 base[h] = 1
1142
1171
1143 if not unknown:
1172 if not unknown:
1144 return []
1173 return []
1145
1174
1146 req = dict.fromkeys(unknown)
1175 req = dict.fromkeys(unknown)
1147 reqcnt = 0
1176 reqcnt = 0
1148
1177
1149 # search through remote branches
1178 # search through remote branches
1150 # a 'branch' here is a linear segment of history, with four parts:
1179 # a 'branch' here is a linear segment of history, with four parts:
1151 # head, root, first parent, second parent
1180 # head, root, first parent, second parent
1152 # (a branch always has two parents (or none) by definition)
1181 # (a branch always has two parents (or none) by definition)
1153 unknown = remote.branches(unknown)
1182 unknown = remote.branches(unknown)
1154 while unknown:
1183 while unknown:
1155 r = []
1184 r = []
1156 while unknown:
1185 while unknown:
1157 n = unknown.pop(0)
1186 n = unknown.pop(0)
1158 if n[0] in seen:
1187 if n[0] in seen:
1159 continue
1188 continue
1160
1189
1161 self.ui.debug(_("examining %s:%s\n")
1190 self.ui.debug(_("examining %s:%s\n")
1162 % (short(n[0]), short(n[1])))
1191 % (short(n[0]), short(n[1])))
1163 if n[0] == nullid: # found the end of the branch
1192 if n[0] == nullid: # found the end of the branch
1164 pass
1193 pass
1165 elif n in seenbranch:
1194 elif n in seenbranch:
1166 self.ui.debug(_("branch already found\n"))
1195 self.ui.debug(_("branch already found\n"))
1167 continue
1196 continue
1168 elif n[1] and n[1] in m: # do we know the base?
1197 elif n[1] and n[1] in m: # do we know the base?
1169 self.ui.debug(_("found incomplete branch %s:%s\n")
1198 self.ui.debug(_("found incomplete branch %s:%s\n")
1170 % (short(n[0]), short(n[1])))
1199 % (short(n[0]), short(n[1])))
1171 search.append(n) # schedule branch range for scanning
1200 search.append(n) # schedule branch range for scanning
1172 seenbranch[n] = 1
1201 seenbranch[n] = 1
1173 else:
1202 else:
1174 if n[1] not in seen and n[1] not in fetch:
1203 if n[1] not in seen and n[1] not in fetch:
1175 if n[2] in m and n[3] in m:
1204 if n[2] in m and n[3] in m:
1176 self.ui.debug(_("found new changeset %s\n") %
1205 self.ui.debug(_("found new changeset %s\n") %
1177 short(n[1]))
1206 short(n[1]))
1178 fetch[n[1]] = 1 # earliest unknown
1207 fetch[n[1]] = 1 # earliest unknown
1179 for p in n[2:4]:
1208 for p in n[2:4]:
1180 if p in m:
1209 if p in m:
1181 base[p] = 1 # latest known
1210 base[p] = 1 # latest known
1182
1211
1183 for p in n[2:4]:
1212 for p in n[2:4]:
1184 if p not in req and p not in m:
1213 if p not in req and p not in m:
1185 r.append(p)
1214 r.append(p)
1186 req[p] = 1
1215 req[p] = 1
1187 seen[n[0]] = 1
1216 seen[n[0]] = 1
1188
1217
1189 if r:
1218 if r:
1190 reqcnt += 1
1219 reqcnt += 1
1191 self.ui.debug(_("request %d: %s\n") %
1220 self.ui.debug(_("request %d: %s\n") %
1192 (reqcnt, " ".join(map(short, r))))
1221 (reqcnt, " ".join(map(short, r))))
1193 for p in xrange(0, len(r), 10):
1222 for p in xrange(0, len(r), 10):
1194 for b in remote.branches(r[p:p+10]):
1223 for b in remote.branches(r[p:p+10]):
1195 self.ui.debug(_("received %s:%s\n") %
1224 self.ui.debug(_("received %s:%s\n") %
1196 (short(b[0]), short(b[1])))
1225 (short(b[0]), short(b[1])))
1197 unknown.append(b)
1226 unknown.append(b)
1198
1227
1199 # do binary search on the branches we found
1228 # do binary search on the branches we found
1200 while search:
1229 while search:
1201 n = search.pop(0)
1230 n = search.pop(0)
1202 reqcnt += 1
1231 reqcnt += 1
1203 l = remote.between([(n[0], n[1])])[0]
1232 l = remote.between([(n[0], n[1])])[0]
1204 l.append(n[1])
1233 l.append(n[1])
1205 p = n[0]
1234 p = n[0]
1206 f = 1
1235 f = 1
1207 for i in l:
1236 for i in l:
1208 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1237 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1209 if i in m:
1238 if i in m:
1210 if f <= 2:
1239 if f <= 2:
1211 self.ui.debug(_("found new branch changeset %s\n") %
1240 self.ui.debug(_("found new branch changeset %s\n") %
1212 short(p))
1241 short(p))
1213 fetch[p] = 1
1242 fetch[p] = 1
1214 base[i] = 1
1243 base[i] = 1
1215 else:
1244 else:
1216 self.ui.debug(_("narrowed branch search to %s:%s\n")
1245 self.ui.debug(_("narrowed branch search to %s:%s\n")
1217 % (short(p), short(i)))
1246 % (short(p), short(i)))
1218 search.append((p, i))
1247 search.append((p, i))
1219 break
1248 break
1220 p, f = i, f * 2
1249 p, f = i, f * 2
1221
1250
1222 # sanity check our fetch list
1251 # sanity check our fetch list
1223 for f in fetch.keys():
1252 for f in fetch.keys():
1224 if f in m:
1253 if f in m:
1225 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1254 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1226
1255
1227 if base.keys() == [nullid]:
1256 if base.keys() == [nullid]:
1228 if force:
1257 if force:
1229 self.ui.warn(_("warning: repository is unrelated\n"))
1258 self.ui.warn(_("warning: repository is unrelated\n"))
1230 else:
1259 else:
1231 raise util.Abort(_("repository is unrelated"))
1260 raise util.Abort(_("repository is unrelated"))
1232
1261
1233 self.ui.debug(_("found new changesets starting at ") +
1262 self.ui.debug(_("found new changesets starting at ") +
1234 " ".join([short(f) for f in fetch]) + "\n")
1263 " ".join([short(f) for f in fetch]) + "\n")
1235
1264
1236 self.ui.debug(_("%d total queries\n") % reqcnt)
1265 self.ui.debug(_("%d total queries\n") % reqcnt)
1237
1266
1238 return fetch.keys()
1267 return fetch.keys()
1239
1268
1240 def findoutgoing(self, remote, base=None, heads=None, force=False):
1269 def findoutgoing(self, remote, base=None, heads=None, force=False):
1241 """Return list of nodes that are roots of subsets not in remote
1270 """Return list of nodes that are roots of subsets not in remote
1242
1271
1243 If base dict is specified, assume that these nodes and their parents
1272 If base dict is specified, assume that these nodes and their parents
1244 exist on the remote side.
1273 exist on the remote side.
1245 If a list of heads is specified, return only nodes which are heads
1274 If a list of heads is specified, return only nodes which are heads
1246 or ancestors of these heads, and return a second element which
1275 or ancestors of these heads, and return a second element which
1247 contains all remote heads which get new children.
1276 contains all remote heads which get new children.
1248 """
1277 """
1249 if base == None:
1278 if base == None:
1250 base = {}
1279 base = {}
1251 self.findincoming(remote, base, heads, force=force)
1280 self.findincoming(remote, base, heads, force=force)
1252
1281
1253 self.ui.debug(_("common changesets up to ")
1282 self.ui.debug(_("common changesets up to ")
1254 + " ".join(map(short, base.keys())) + "\n")
1283 + " ".join(map(short, base.keys())) + "\n")
1255
1284
1256 remain = dict.fromkeys(self.changelog.nodemap)
1285 remain = dict.fromkeys(self.changelog.nodemap)
1257
1286
1258 # prune everything remote has from the tree
1287 # prune everything remote has from the tree
1259 del remain[nullid]
1288 del remain[nullid]
1260 remove = base.keys()
1289 remove = base.keys()
1261 while remove:
1290 while remove:
1262 n = remove.pop(0)
1291 n = remove.pop(0)
1263 if n in remain:
1292 if n in remain:
1264 del remain[n]
1293 del remain[n]
1265 for p in self.changelog.parents(n):
1294 for p in self.changelog.parents(n):
1266 remove.append(p)
1295 remove.append(p)
1267
1296
1268 # find every node whose parents have been pruned
1297 # find every node whose parents have been pruned
1269 subset = []
1298 subset = []
1270 # find every remote head that will get new children
1299 # find every remote head that will get new children
1271 updated_heads = {}
1300 updated_heads = {}
1272 for n in remain:
1301 for n in remain:
1273 p1, p2 = self.changelog.parents(n)
1302 p1, p2 = self.changelog.parents(n)
1274 if p1 not in remain and p2 not in remain:
1303 if p1 not in remain and p2 not in remain:
1275 subset.append(n)
1304 subset.append(n)
1276 if heads:
1305 if heads:
1277 if p1 in heads:
1306 if p1 in heads:
1278 updated_heads[p1] = True
1307 updated_heads[p1] = True
1279 if p2 in heads:
1308 if p2 in heads:
1280 updated_heads[p2] = True
1309 updated_heads[p2] = True
1281
1310
1282 # this is the set of all roots we have to push
1311 # this is the set of all roots we have to push
1283 if heads:
1312 if heads:
1284 return subset, updated_heads.keys()
1313 return subset, updated_heads.keys()
1285 else:
1314 else:
1286 return subset
1315 return subset
1287
1316
1288 def pull(self, remote, heads=None, force=False, lock=None):
1317 def pull(self, remote, heads=None, force=False, lock=None):
1289 mylock = False
1318 mylock = False
1290 if not lock:
1319 if not lock:
1291 lock = self.lock()
1320 lock = self.lock()
1292 mylock = True
1321 mylock = True
1293
1322
1294 try:
1323 try:
1295 fetch = self.findincoming(remote, force=force)
1324 fetch = self.findincoming(remote, force=force)
1296 if fetch == [nullid]:
1325 if fetch == [nullid]:
1297 self.ui.status(_("requesting all changes\n"))
1326 self.ui.status(_("requesting all changes\n"))
1298
1327
1299 if not fetch:
1328 if not fetch:
1300 self.ui.status(_("no changes found\n"))
1329 self.ui.status(_("no changes found\n"))
1301 return 0
1330 return 0
1302
1331
1303 if heads is None:
1332 if heads is None:
1304 cg = remote.changegroup(fetch, 'pull')
1333 cg = remote.changegroup(fetch, 'pull')
1305 else:
1334 else:
1306 if 'changegroupsubset' not in remote.capabilities:
1335 if 'changegroupsubset' not in remote.capabilities:
1307 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1336 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1308 cg = remote.changegroupsubset(fetch, heads, 'pull')
1337 cg = remote.changegroupsubset(fetch, heads, 'pull')
1309 return self.addchangegroup(cg, 'pull', remote.url())
1338 return self.addchangegroup(cg, 'pull', remote.url())
1310 finally:
1339 finally:
1311 if mylock:
1340 if mylock:
1312 lock.release()
1341 lock.release()
1313
1342
1314 def push(self, remote, force=False, revs=None):
1343 def push(self, remote, force=False, revs=None):
1315 # there are two ways to push to remote repo:
1344 # there are two ways to push to remote repo:
1316 #
1345 #
1317 # addchangegroup assumes local user can lock remote
1346 # addchangegroup assumes local user can lock remote
1318 # repo (local filesystem, old ssh servers).
1347 # repo (local filesystem, old ssh servers).
1319 #
1348 #
1320 # unbundle assumes local user cannot lock remote repo (new ssh
1349 # unbundle assumes local user cannot lock remote repo (new ssh
1321 # servers, http servers).
1350 # servers, http servers).
1322
1351
1323 if remote.capable('unbundle'):
1352 if remote.capable('unbundle'):
1324 return self.push_unbundle(remote, force, revs)
1353 return self.push_unbundle(remote, force, revs)
1325 return self.push_addchangegroup(remote, force, revs)
1354 return self.push_addchangegroup(remote, force, revs)
1326
1355
1327 def prepush(self, remote, force, revs):
1356 def prepush(self, remote, force, revs):
1328 base = {}
1357 base = {}
1329 remote_heads = remote.heads()
1358 remote_heads = remote.heads()
1330 inc = self.findincoming(remote, base, remote_heads, force=force)
1359 inc = self.findincoming(remote, base, remote_heads, force=force)
1331
1360
1332 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1361 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1333 if revs is not None:
1362 if revs is not None:
1334 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1363 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1335 else:
1364 else:
1336 bases, heads = update, self.changelog.heads()
1365 bases, heads = update, self.changelog.heads()
1337
1366
1338 if not bases:
1367 if not bases:
1339 self.ui.status(_("no changes found\n"))
1368 self.ui.status(_("no changes found\n"))
1340 return None, 1
1369 return None, 1
1341 elif not force:
1370 elif not force:
1342 # check if we're creating new remote heads
1371 # check if we're creating new remote heads
1343 # to be a remote head after push, node must be either
1372 # to be a remote head after push, node must be either
1344 # - unknown locally
1373 # - unknown locally
1345 # - a local outgoing head descended from update
1374 # - a local outgoing head descended from update
1346 # - a remote head that's known locally and not
1375 # - a remote head that's known locally and not
1347 # ancestral to an outgoing head
1376 # ancestral to an outgoing head
1348
1377
1349 warn = 0
1378 warn = 0
1350
1379
1351 if remote_heads == [nullid]:
1380 if remote_heads == [nullid]:
1352 warn = 0
1381 warn = 0
1353 elif not revs and len(heads) > len(remote_heads):
1382 elif not revs and len(heads) > len(remote_heads):
1354 warn = 1
1383 warn = 1
1355 else:
1384 else:
1356 newheads = list(heads)
1385 newheads = list(heads)
1357 for r in remote_heads:
1386 for r in remote_heads:
1358 if r in self.changelog.nodemap:
1387 if r in self.changelog.nodemap:
1359 desc = self.changelog.heads(r)
1388 desc = self.changelog.heads(r)
1360 l = [h for h in heads if h in desc]
1389 l = [h for h in heads if h in desc]
1361 if not l:
1390 if not l:
1362 newheads.append(r)
1391 newheads.append(r)
1363 else:
1392 else:
1364 newheads.append(r)
1393 newheads.append(r)
1365 if len(newheads) > len(remote_heads):
1394 if len(newheads) > len(remote_heads):
1366 warn = 1
1395 warn = 1
1367
1396
1368 if warn:
1397 if warn:
1369 self.ui.warn(_("abort: push creates new remote branches!\n"))
1398 self.ui.warn(_("abort: push creates new remote branches!\n"))
1370 self.ui.status(_("(did you forget to merge?"
1399 self.ui.status(_("(did you forget to merge?"
1371 " use push -f to force)\n"))
1400 " use push -f to force)\n"))
1372 return None, 1
1401 return None, 1
1373 elif inc:
1402 elif inc:
1374 self.ui.warn(_("note: unsynced remote changes!\n"))
1403 self.ui.warn(_("note: unsynced remote changes!\n"))
1375
1404
1376
1405
1377 if revs is None:
1406 if revs is None:
1378 cg = self.changegroup(update, 'push')
1407 cg = self.changegroup(update, 'push')
1379 else:
1408 else:
1380 cg = self.changegroupsubset(update, revs, 'push')
1409 cg = self.changegroupsubset(update, revs, 'push')
1381 return cg, remote_heads
1410 return cg, remote_heads
1382
1411
1383 def push_addchangegroup(self, remote, force, revs):
1412 def push_addchangegroup(self, remote, force, revs):
1384 lock = remote.lock()
1413 lock = remote.lock()
1385
1414
1386 ret = self.prepush(remote, force, revs)
1415 ret = self.prepush(remote, force, revs)
1387 if ret[0] is not None:
1416 if ret[0] is not None:
1388 cg, remote_heads = ret
1417 cg, remote_heads = ret
1389 return remote.addchangegroup(cg, 'push', self.url())
1418 return remote.addchangegroup(cg, 'push', self.url())
1390 return ret[1]
1419 return ret[1]
1391
1420
1392 def push_unbundle(self, remote, force, revs):
1421 def push_unbundle(self, remote, force, revs):
1393 # local repo finds heads on server, finds out what revs it
1422 # local repo finds heads on server, finds out what revs it
1394 # must push. once revs transferred, if server finds it has
1423 # must push. once revs transferred, if server finds it has
1395 # different heads (someone else won commit/push race), server
1424 # different heads (someone else won commit/push race), server
1396 # aborts.
1425 # aborts.
1397
1426
1398 ret = self.prepush(remote, force, revs)
1427 ret = self.prepush(remote, force, revs)
1399 if ret[0] is not None:
1428 if ret[0] is not None:
1400 cg, remote_heads = ret
1429 cg, remote_heads = ret
1401 if force: remote_heads = ['force']
1430 if force: remote_heads = ['force']
1402 return remote.unbundle(cg, remote_heads, 'push')
1431 return remote.unbundle(cg, remote_heads, 'push')
1403 return ret[1]
1432 return ret[1]
1404
1433
1405 def changegroupinfo(self, nodes):
1434 def changegroupinfo(self, nodes):
1406 self.ui.note(_("%d changesets found\n") % len(nodes))
1435 self.ui.note(_("%d changesets found\n") % len(nodes))
1407 if self.ui.debugflag:
1436 if self.ui.debugflag:
1408 self.ui.debug(_("List of changesets:\n"))
1437 self.ui.debug(_("List of changesets:\n"))
1409 for node in nodes:
1438 for node in nodes:
1410 self.ui.debug("%s\n" % hex(node))
1439 self.ui.debug("%s\n" % hex(node))
1411
1440
1412 def changegroupsubset(self, bases, heads, source):
1441 def changegroupsubset(self, bases, heads, source):
1413 """This function generates a changegroup consisting of all the nodes
1442 """This function generates a changegroup consisting of all the nodes
1414 that are descendents of any of the bases, and ancestors of any of
1443 that are descendents of any of the bases, and ancestors of any of
1415 the heads.
1444 the heads.
1416
1445
1417 It is fairly complex as determining which filenodes and which
1446 It is fairly complex as determining which filenodes and which
1418 manifest nodes need to be included for the changeset to be complete
1447 manifest nodes need to be included for the changeset to be complete
1419 is non-trivial.
1448 is non-trivial.
1420
1449
1421 Another wrinkle is doing the reverse, figuring out which changeset in
1450 Another wrinkle is doing the reverse, figuring out which changeset in
1422 the changegroup a particular filenode or manifestnode belongs to."""
1451 the changegroup a particular filenode or manifestnode belongs to."""
1423
1452
1424 self.hook('preoutgoing', throw=True, source=source)
1453 self.hook('preoutgoing', throw=True, source=source)
1425
1454
1426 # Set up some initial variables
1455 # Set up some initial variables
1427 # Make it easy to refer to self.changelog
1456 # Make it easy to refer to self.changelog
1428 cl = self.changelog
1457 cl = self.changelog
1429 # msng is short for missing - compute the list of changesets in this
1458 # msng is short for missing - compute the list of changesets in this
1430 # changegroup.
1459 # changegroup.
1431 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1460 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1432 self.changegroupinfo(msng_cl_lst)
1461 self.changegroupinfo(msng_cl_lst)
1433 # Some bases may turn out to be superfluous, and some heads may be
1462 # Some bases may turn out to be superfluous, and some heads may be
1434 # too. nodesbetween will return the minimal set of bases and heads
1463 # too. nodesbetween will return the minimal set of bases and heads
1435 # necessary to re-create the changegroup.
1464 # necessary to re-create the changegroup.
1436
1465
1437 # Known heads are the list of heads that it is assumed the recipient
1466 # Known heads are the list of heads that it is assumed the recipient
1438 # of this changegroup will know about.
1467 # of this changegroup will know about.
1439 knownheads = {}
1468 knownheads = {}
1440 # We assume that all parents of bases are known heads.
1469 # We assume that all parents of bases are known heads.
1441 for n in bases:
1470 for n in bases:
1442 for p in cl.parents(n):
1471 for p in cl.parents(n):
1443 if p != nullid:
1472 if p != nullid:
1444 knownheads[p] = 1
1473 knownheads[p] = 1
1445 knownheads = knownheads.keys()
1474 knownheads = knownheads.keys()
1446 if knownheads:
1475 if knownheads:
1447 # Now that we know what heads are known, we can compute which
1476 # Now that we know what heads are known, we can compute which
1448 # changesets are known. The recipient must know about all
1477 # changesets are known. The recipient must know about all
1449 # changesets required to reach the known heads from the null
1478 # changesets required to reach the known heads from the null
1450 # changeset.
1479 # changeset.
1451 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1480 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1452 junk = None
1481 junk = None
1453 # Transform the list into an ersatz set.
1482 # Transform the list into an ersatz set.
1454 has_cl_set = dict.fromkeys(has_cl_set)
1483 has_cl_set = dict.fromkeys(has_cl_set)
1455 else:
1484 else:
1456 # If there were no known heads, the recipient cannot be assumed to
1485 # If there were no known heads, the recipient cannot be assumed to
1457 # know about any changesets.
1486 # know about any changesets.
1458 has_cl_set = {}
1487 has_cl_set = {}
1459
1488
1460 # Make it easy to refer to self.manifest
1489 # Make it easy to refer to self.manifest
1461 mnfst = self.manifest
1490 mnfst = self.manifest
1462 # We don't know which manifests are missing yet
1491 # We don't know which manifests are missing yet
1463 msng_mnfst_set = {}
1492 msng_mnfst_set = {}
1464 # Nor do we know which filenodes are missing.
1493 # Nor do we know which filenodes are missing.
1465 msng_filenode_set = {}
1494 msng_filenode_set = {}
1466
1495
1467 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1496 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1468 junk = None
1497 junk = None
1469
1498
1470 # A changeset always belongs to itself, so the changenode lookup
1499 # A changeset always belongs to itself, so the changenode lookup
1471 # function for a changenode is identity.
1500 # function for a changenode is identity.
1472 def identity(x):
1501 def identity(x):
1473 return x
1502 return x
1474
1503
1475 # A function generating function. Sets up an environment for the
1504 # A function generating function. Sets up an environment for the
1476 # inner function.
1505 # inner function.
1477 def cmp_by_rev_func(revlog):
1506 def cmp_by_rev_func(revlog):
1478 # Compare two nodes by their revision number in the environment's
1507 # Compare two nodes by their revision number in the environment's
1479 # revision history. Since the revision number both represents the
1508 # revision history. Since the revision number both represents the
1480 # most efficient order to read the nodes in, and represents a
1509 # most efficient order to read the nodes in, and represents a
1481 # topological sorting of the nodes, this function is often useful.
1510 # topological sorting of the nodes, this function is often useful.
1482 def cmp_by_rev(a, b):
1511 def cmp_by_rev(a, b):
1483 return cmp(revlog.rev(a), revlog.rev(b))
1512 return cmp(revlog.rev(a), revlog.rev(b))
1484 return cmp_by_rev
1513 return cmp_by_rev
1485
1514
1486 # If we determine that a particular file or manifest node must be a
1515 # If we determine that a particular file or manifest node must be a
1487 # node that the recipient of the changegroup will already have, we can
1516 # node that the recipient of the changegroup will already have, we can
1488 # also assume the recipient will have all the parents. This function
1517 # also assume the recipient will have all the parents. This function
1489 # prunes them from the set of missing nodes.
1518 # prunes them from the set of missing nodes.
1490 def prune_parents(revlog, hasset, msngset):
1519 def prune_parents(revlog, hasset, msngset):
1491 haslst = hasset.keys()
1520 haslst = hasset.keys()
1492 haslst.sort(cmp_by_rev_func(revlog))
1521 haslst.sort(cmp_by_rev_func(revlog))
1493 for node in haslst:
1522 for node in haslst:
1494 parentlst = [p for p in revlog.parents(node) if p != nullid]
1523 parentlst = [p for p in revlog.parents(node) if p != nullid]
1495 while parentlst:
1524 while parentlst:
1496 n = parentlst.pop()
1525 n = parentlst.pop()
1497 if n not in hasset:
1526 if n not in hasset:
1498 hasset[n] = 1
1527 hasset[n] = 1
1499 p = [p for p in revlog.parents(n) if p != nullid]
1528 p = [p for p in revlog.parents(n) if p != nullid]
1500 parentlst.extend(p)
1529 parentlst.extend(p)
1501 for n in hasset:
1530 for n in hasset:
1502 msngset.pop(n, None)
1531 msngset.pop(n, None)
1503
1532
1504 # This is a function generating function used to set up an environment
1533 # This is a function generating function used to set up an environment
1505 # for the inner function to execute in.
1534 # for the inner function to execute in.
1506 def manifest_and_file_collector(changedfileset):
1535 def manifest_and_file_collector(changedfileset):
1507 # This is an information gathering function that gathers
1536 # This is an information gathering function that gathers
1508 # information from each changeset node that goes out as part of
1537 # information from each changeset node that goes out as part of
1509 # the changegroup. The information gathered is a list of which
1538 # the changegroup. The information gathered is a list of which
1510 # manifest nodes are potentially required (the recipient may
1539 # manifest nodes are potentially required (the recipient may
1511 # already have them) and total list of all files which were
1540 # already have them) and total list of all files which were
1512 # changed in any changeset in the changegroup.
1541 # changed in any changeset in the changegroup.
1513 #
1542 #
1514 # We also remember the first changenode we saw any manifest
1543 # We also remember the first changenode we saw any manifest
1515 # referenced by so we can later determine which changenode 'owns'
1544 # referenced by so we can later determine which changenode 'owns'
1516 # the manifest.
1545 # the manifest.
1517 def collect_manifests_and_files(clnode):
1546 def collect_manifests_and_files(clnode):
1518 c = cl.read(clnode)
1547 c = cl.read(clnode)
1519 for f in c[3]:
1548 for f in c[3]:
1520 # This is to make sure we only have one instance of each
1549 # This is to make sure we only have one instance of each
1521 # filename string for each filename.
1550 # filename string for each filename.
1522 changedfileset.setdefault(f, f)
1551 changedfileset.setdefault(f, f)
1523 msng_mnfst_set.setdefault(c[0], clnode)
1552 msng_mnfst_set.setdefault(c[0], clnode)
1524 return collect_manifests_and_files
1553 return collect_manifests_and_files
1525
1554
1526 # Figure out which manifest nodes (of the ones we think might be part
1555 # Figure out which manifest nodes (of the ones we think might be part
1527 # of the changegroup) the recipient must know about and remove them
1556 # of the changegroup) the recipient must know about and remove them
1528 # from the changegroup.
1557 # from the changegroup.
1529 def prune_manifests():
1558 def prune_manifests():
1530 has_mnfst_set = {}
1559 has_mnfst_set = {}
1531 for n in msng_mnfst_set:
1560 for n in msng_mnfst_set:
1532 # If a 'missing' manifest thinks it belongs to a changenode
1561 # If a 'missing' manifest thinks it belongs to a changenode
1533 # the recipient is assumed to have, obviously the recipient
1562 # the recipient is assumed to have, obviously the recipient
1534 # must have that manifest.
1563 # must have that manifest.
1535 linknode = cl.node(mnfst.linkrev(n))
1564 linknode = cl.node(mnfst.linkrev(n))
1536 if linknode in has_cl_set:
1565 if linknode in has_cl_set:
1537 has_mnfst_set[n] = 1
1566 has_mnfst_set[n] = 1
1538 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1567 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1539
1568
1540 # Use the information collected in collect_manifests_and_files to say
1569 # Use the information collected in collect_manifests_and_files to say
1541 # which changenode any manifestnode belongs to.
1570 # which changenode any manifestnode belongs to.
1542 def lookup_manifest_link(mnfstnode):
1571 def lookup_manifest_link(mnfstnode):
1543 return msng_mnfst_set[mnfstnode]
1572 return msng_mnfst_set[mnfstnode]
1544
1573
1545 # A function generating function that sets up the initial environment
1574 # A function generating function that sets up the initial environment
1546 # the inner function.
1575 # the inner function.
1547 def filenode_collector(changedfiles):
1576 def filenode_collector(changedfiles):
1548 next_rev = [0]
1577 next_rev = [0]
1549 # This gathers information from each manifestnode included in the
1578 # This gathers information from each manifestnode included in the
1550 # changegroup about which filenodes the manifest node references
1579 # changegroup about which filenodes the manifest node references
1551 # so we can include those in the changegroup too.
1580 # so we can include those in the changegroup too.
1552 #
1581 #
1553 # It also remembers which changenode each filenode belongs to. It
1582 # It also remembers which changenode each filenode belongs to. It
1554 # does this by assuming the a filenode belongs to the changenode
1583 # does this by assuming the a filenode belongs to the changenode
1555 # the first manifest that references it belongs to.
1584 # the first manifest that references it belongs to.
1556 def collect_msng_filenodes(mnfstnode):
1585 def collect_msng_filenodes(mnfstnode):
1557 r = mnfst.rev(mnfstnode)
1586 r = mnfst.rev(mnfstnode)
1558 if r == next_rev[0]:
1587 if r == next_rev[0]:
1559 # If the last rev we looked at was the one just previous,
1588 # If the last rev we looked at was the one just previous,
1560 # we only need to see a diff.
1589 # we only need to see a diff.
1561 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1590 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1562 # For each line in the delta
1591 # For each line in the delta
1563 for dline in delta.splitlines():
1592 for dline in delta.splitlines():
1564 # get the filename and filenode for that line
1593 # get the filename and filenode for that line
1565 f, fnode = dline.split('\0')
1594 f, fnode = dline.split('\0')
1566 fnode = bin(fnode[:40])
1595 fnode = bin(fnode[:40])
1567 f = changedfiles.get(f, None)
1596 f = changedfiles.get(f, None)
1568 # And if the file is in the list of files we care
1597 # And if the file is in the list of files we care
1569 # about.
1598 # about.
1570 if f is not None:
1599 if f is not None:
1571 # Get the changenode this manifest belongs to
1600 # Get the changenode this manifest belongs to
1572 clnode = msng_mnfst_set[mnfstnode]
1601 clnode = msng_mnfst_set[mnfstnode]
1573 # Create the set of filenodes for the file if
1602 # Create the set of filenodes for the file if
1574 # there isn't one already.
1603 # there isn't one already.
1575 ndset = msng_filenode_set.setdefault(f, {})
1604 ndset = msng_filenode_set.setdefault(f, {})
1576 # And set the filenode's changelog node to the
1605 # And set the filenode's changelog node to the
1577 # manifest's if it hasn't been set already.
1606 # manifest's if it hasn't been set already.
1578 ndset.setdefault(fnode, clnode)
1607 ndset.setdefault(fnode, clnode)
1579 else:
1608 else:
1580 # Otherwise we need a full manifest.
1609 # Otherwise we need a full manifest.
1581 m = mnfst.read(mnfstnode)
1610 m = mnfst.read(mnfstnode)
1582 # For every file in we care about.
1611 # For every file in we care about.
1583 for f in changedfiles:
1612 for f in changedfiles:
1584 fnode = m.get(f, None)
1613 fnode = m.get(f, None)
1585 # If it's in the manifest
1614 # If it's in the manifest
1586 if fnode is not None:
1615 if fnode is not None:
1587 # See comments above.
1616 # See comments above.
1588 clnode = msng_mnfst_set[mnfstnode]
1617 clnode = msng_mnfst_set[mnfstnode]
1589 ndset = msng_filenode_set.setdefault(f, {})
1618 ndset = msng_filenode_set.setdefault(f, {})
1590 ndset.setdefault(fnode, clnode)
1619 ndset.setdefault(fnode, clnode)
1591 # Remember the revision we hope to see next.
1620 # Remember the revision we hope to see next.
1592 next_rev[0] = r + 1
1621 next_rev[0] = r + 1
1593 return collect_msng_filenodes
1622 return collect_msng_filenodes
1594
1623
1595 # We have a list of filenodes we think we need for a file, lets remove
1624 # We have a list of filenodes we think we need for a file, lets remove
1596 # all those we now the recipient must have.
1625 # all those we now the recipient must have.
1597 def prune_filenodes(f, filerevlog):
1626 def prune_filenodes(f, filerevlog):
1598 msngset = msng_filenode_set[f]
1627 msngset = msng_filenode_set[f]
1599 hasset = {}
1628 hasset = {}
1600 # If a 'missing' filenode thinks it belongs to a changenode we
1629 # If a 'missing' filenode thinks it belongs to a changenode we
1601 # assume the recipient must have, then the recipient must have
1630 # assume the recipient must have, then the recipient must have
1602 # that filenode.
1631 # that filenode.
1603 for n in msngset:
1632 for n in msngset:
1604 clnode = cl.node(filerevlog.linkrev(n))
1633 clnode = cl.node(filerevlog.linkrev(n))
1605 if clnode in has_cl_set:
1634 if clnode in has_cl_set:
1606 hasset[n] = 1
1635 hasset[n] = 1
1607 prune_parents(filerevlog, hasset, msngset)
1636 prune_parents(filerevlog, hasset, msngset)
1608
1637
1609 # A function generator function that sets up the a context for the
1638 # A function generator function that sets up the a context for the
1610 # inner function.
1639 # inner function.
1611 def lookup_filenode_link_func(fname):
1640 def lookup_filenode_link_func(fname):
1612 msngset = msng_filenode_set[fname]
1641 msngset = msng_filenode_set[fname]
1613 # Lookup the changenode the filenode belongs to.
1642 # Lookup the changenode the filenode belongs to.
1614 def lookup_filenode_link(fnode):
1643 def lookup_filenode_link(fnode):
1615 return msngset[fnode]
1644 return msngset[fnode]
1616 return lookup_filenode_link
1645 return lookup_filenode_link
1617
1646
1618 # Now that we have all theses utility functions to help out and
1647 # Now that we have all theses utility functions to help out and
1619 # logically divide up the task, generate the group.
1648 # logically divide up the task, generate the group.
1620 def gengroup():
1649 def gengroup():
1621 # The set of changed files starts empty.
1650 # The set of changed files starts empty.
1622 changedfiles = {}
1651 changedfiles = {}
1623 # Create a changenode group generator that will call our functions
1652 # Create a changenode group generator that will call our functions
1624 # back to lookup the owning changenode and collect information.
1653 # back to lookup the owning changenode and collect information.
1625 group = cl.group(msng_cl_lst, identity,
1654 group = cl.group(msng_cl_lst, identity,
1626 manifest_and_file_collector(changedfiles))
1655 manifest_and_file_collector(changedfiles))
1627 for chnk in group:
1656 for chnk in group:
1628 yield chnk
1657 yield chnk
1629
1658
1630 # The list of manifests has been collected by the generator
1659 # The list of manifests has been collected by the generator
1631 # calling our functions back.
1660 # calling our functions back.
1632 prune_manifests()
1661 prune_manifests()
1633 msng_mnfst_lst = msng_mnfst_set.keys()
1662 msng_mnfst_lst = msng_mnfst_set.keys()
1634 # Sort the manifestnodes by revision number.
1663 # Sort the manifestnodes by revision number.
1635 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1664 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1636 # Create a generator for the manifestnodes that calls our lookup
1665 # Create a generator for the manifestnodes that calls our lookup
1637 # and data collection functions back.
1666 # and data collection functions back.
1638 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1667 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1639 filenode_collector(changedfiles))
1668 filenode_collector(changedfiles))
1640 for chnk in group:
1669 for chnk in group:
1641 yield chnk
1670 yield chnk
1642
1671
1643 # These are no longer needed, dereference and toss the memory for
1672 # These are no longer needed, dereference and toss the memory for
1644 # them.
1673 # them.
1645 msng_mnfst_lst = None
1674 msng_mnfst_lst = None
1646 msng_mnfst_set.clear()
1675 msng_mnfst_set.clear()
1647
1676
1648 changedfiles = changedfiles.keys()
1677 changedfiles = changedfiles.keys()
1649 changedfiles.sort()
1678 changedfiles.sort()
1650 # Go through all our files in order sorted by name.
1679 # Go through all our files in order sorted by name.
1651 for fname in changedfiles:
1680 for fname in changedfiles:
1652 filerevlog = self.file(fname)
1681 filerevlog = self.file(fname)
1653 # Toss out the filenodes that the recipient isn't really
1682 # Toss out the filenodes that the recipient isn't really
1654 # missing.
1683 # missing.
1655 if msng_filenode_set.has_key(fname):
1684 if msng_filenode_set.has_key(fname):
1656 prune_filenodes(fname, filerevlog)
1685 prune_filenodes(fname, filerevlog)
1657 msng_filenode_lst = msng_filenode_set[fname].keys()
1686 msng_filenode_lst = msng_filenode_set[fname].keys()
1658 else:
1687 else:
1659 msng_filenode_lst = []
1688 msng_filenode_lst = []
1660 # If any filenodes are left, generate the group for them,
1689 # If any filenodes are left, generate the group for them,
1661 # otherwise don't bother.
1690 # otherwise don't bother.
1662 if len(msng_filenode_lst) > 0:
1691 if len(msng_filenode_lst) > 0:
1663 yield changegroup.genchunk(fname)
1692 yield changegroup.genchunk(fname)
1664 # Sort the filenodes by their revision #
1693 # Sort the filenodes by their revision #
1665 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1694 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1666 # Create a group generator and only pass in a changenode
1695 # Create a group generator and only pass in a changenode
1667 # lookup function as we need to collect no information
1696 # lookup function as we need to collect no information
1668 # from filenodes.
1697 # from filenodes.
1669 group = filerevlog.group(msng_filenode_lst,
1698 group = filerevlog.group(msng_filenode_lst,
1670 lookup_filenode_link_func(fname))
1699 lookup_filenode_link_func(fname))
1671 for chnk in group:
1700 for chnk in group:
1672 yield chnk
1701 yield chnk
1673 if msng_filenode_set.has_key(fname):
1702 if msng_filenode_set.has_key(fname):
1674 # Don't need this anymore, toss it to free memory.
1703 # Don't need this anymore, toss it to free memory.
1675 del msng_filenode_set[fname]
1704 del msng_filenode_set[fname]
1676 # Signal that no more groups are left.
1705 # Signal that no more groups are left.
1677 yield changegroup.closechunk()
1706 yield changegroup.closechunk()
1678
1707
1679 if msng_cl_lst:
1708 if msng_cl_lst:
1680 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1709 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1681
1710
1682 return util.chunkbuffer(gengroup())
1711 return util.chunkbuffer(gengroup())
1683
1712
1684 def changegroup(self, basenodes, source):
1713 def changegroup(self, basenodes, source):
1685 """Generate a changegroup of all nodes that we have that a recipient
1714 """Generate a changegroup of all nodes that we have that a recipient
1686 doesn't.
1715 doesn't.
1687
1716
1688 This is much easier than the previous function as we can assume that
1717 This is much easier than the previous function as we can assume that
1689 the recipient has any changenode we aren't sending them."""
1718 the recipient has any changenode we aren't sending them."""
1690
1719
1691 self.hook('preoutgoing', throw=True, source=source)
1720 self.hook('preoutgoing', throw=True, source=source)
1692
1721
1693 cl = self.changelog
1722 cl = self.changelog
1694 nodes = cl.nodesbetween(basenodes, None)[0]
1723 nodes = cl.nodesbetween(basenodes, None)[0]
1695 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1724 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1696 self.changegroupinfo(nodes)
1725 self.changegroupinfo(nodes)
1697
1726
1698 def identity(x):
1727 def identity(x):
1699 return x
1728 return x
1700
1729
1701 def gennodelst(revlog):
1730 def gennodelst(revlog):
1702 for r in xrange(0, revlog.count()):
1731 for r in xrange(0, revlog.count()):
1703 n = revlog.node(r)
1732 n = revlog.node(r)
1704 if revlog.linkrev(n) in revset:
1733 if revlog.linkrev(n) in revset:
1705 yield n
1734 yield n
1706
1735
1707 def changed_file_collector(changedfileset):
1736 def changed_file_collector(changedfileset):
1708 def collect_changed_files(clnode):
1737 def collect_changed_files(clnode):
1709 c = cl.read(clnode)
1738 c = cl.read(clnode)
1710 for fname in c[3]:
1739 for fname in c[3]:
1711 changedfileset[fname] = 1
1740 changedfileset[fname] = 1
1712 return collect_changed_files
1741 return collect_changed_files
1713
1742
1714 def lookuprevlink_func(revlog):
1743 def lookuprevlink_func(revlog):
1715 def lookuprevlink(n):
1744 def lookuprevlink(n):
1716 return cl.node(revlog.linkrev(n))
1745 return cl.node(revlog.linkrev(n))
1717 return lookuprevlink
1746 return lookuprevlink
1718
1747
1719 def gengroup():
1748 def gengroup():
1720 # construct a list of all changed files
1749 # construct a list of all changed files
1721 changedfiles = {}
1750 changedfiles = {}
1722
1751
1723 for chnk in cl.group(nodes, identity,
1752 for chnk in cl.group(nodes, identity,
1724 changed_file_collector(changedfiles)):
1753 changed_file_collector(changedfiles)):
1725 yield chnk
1754 yield chnk
1726 changedfiles = changedfiles.keys()
1755 changedfiles = changedfiles.keys()
1727 changedfiles.sort()
1756 changedfiles.sort()
1728
1757
1729 mnfst = self.manifest
1758 mnfst = self.manifest
1730 nodeiter = gennodelst(mnfst)
1759 nodeiter = gennodelst(mnfst)
1731 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1760 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1732 yield chnk
1761 yield chnk
1733
1762
1734 for fname in changedfiles:
1763 for fname in changedfiles:
1735 filerevlog = self.file(fname)
1764 filerevlog = self.file(fname)
1736 nodeiter = gennodelst(filerevlog)
1765 nodeiter = gennodelst(filerevlog)
1737 nodeiter = list(nodeiter)
1766 nodeiter = list(nodeiter)
1738 if nodeiter:
1767 if nodeiter:
1739 yield changegroup.genchunk(fname)
1768 yield changegroup.genchunk(fname)
1740 lookup = lookuprevlink_func(filerevlog)
1769 lookup = lookuprevlink_func(filerevlog)
1741 for chnk in filerevlog.group(nodeiter, lookup):
1770 for chnk in filerevlog.group(nodeiter, lookup):
1742 yield chnk
1771 yield chnk
1743
1772
1744 yield changegroup.closechunk()
1773 yield changegroup.closechunk()
1745
1774
1746 if nodes:
1775 if nodes:
1747 self.hook('outgoing', node=hex(nodes[0]), source=source)
1776 self.hook('outgoing', node=hex(nodes[0]), source=source)
1748
1777
1749 return util.chunkbuffer(gengroup())
1778 return util.chunkbuffer(gengroup())
1750
1779
1751 def addchangegroup(self, source, srctype, url):
1780 def addchangegroup(self, source, srctype, url):
1752 """add changegroup to repo.
1781 """add changegroup to repo.
1753
1782
1754 return values:
1783 return values:
1755 - nothing changed or no source: 0
1784 - nothing changed or no source: 0
1756 - more heads than before: 1+added heads (2..n)
1785 - more heads than before: 1+added heads (2..n)
1757 - less heads than before: -1-removed heads (-2..-n)
1786 - less heads than before: -1-removed heads (-2..-n)
1758 - number of heads stays the same: 1
1787 - number of heads stays the same: 1
1759 """
1788 """
1760 def csmap(x):
1789 def csmap(x):
1761 self.ui.debug(_("add changeset %s\n") % short(x))
1790 self.ui.debug(_("add changeset %s\n") % short(x))
1762 return cl.count()
1791 return cl.count()
1763
1792
1764 def revmap(x):
1793 def revmap(x):
1765 return cl.rev(x)
1794 return cl.rev(x)
1766
1795
1767 if not source:
1796 if not source:
1768 return 0
1797 return 0
1769
1798
1770 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1799 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1771
1800
1772 changesets = files = revisions = 0
1801 changesets = files = revisions = 0
1773
1802
1774 tr = self.transaction()
1803 tr = self.transaction()
1775
1804
1776 # write changelog data to temp files so concurrent readers will not see
1805 # write changelog data to temp files so concurrent readers will not see
1777 # inconsistent view
1806 # inconsistent view
1778 cl = None
1807 cl = None
1779 try:
1808 try:
1780 cl = appendfile.appendchangelog(self.sopener,
1809 cl = appendfile.appendchangelog(self.sopener,
1781 self.changelog.version)
1810 self.changelog.version)
1782
1811
1783 oldheads = len(cl.heads())
1812 oldheads = len(cl.heads())
1784
1813
1785 # pull off the changeset group
1814 # pull off the changeset group
1786 self.ui.status(_("adding changesets\n"))
1815 self.ui.status(_("adding changesets\n"))
1787 cor = cl.count() - 1
1816 cor = cl.count() - 1
1788 chunkiter = changegroup.chunkiter(source)
1817 chunkiter = changegroup.chunkiter(source)
1789 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1818 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1790 raise util.Abort(_("received changelog group is empty"))
1819 raise util.Abort(_("received changelog group is empty"))
1791 cnr = cl.count() - 1
1820 cnr = cl.count() - 1
1792 changesets = cnr - cor
1821 changesets = cnr - cor
1793
1822
1794 # pull off the manifest group
1823 # pull off the manifest group
1795 self.ui.status(_("adding manifests\n"))
1824 self.ui.status(_("adding manifests\n"))
1796 chunkiter = changegroup.chunkiter(source)
1825 chunkiter = changegroup.chunkiter(source)
1797 # no need to check for empty manifest group here:
1826 # no need to check for empty manifest group here:
1798 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1827 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1799 # no new manifest will be created and the manifest group will
1828 # no new manifest will be created and the manifest group will
1800 # be empty during the pull
1829 # be empty during the pull
1801 self.manifest.addgroup(chunkiter, revmap, tr)
1830 self.manifest.addgroup(chunkiter, revmap, tr)
1802
1831
1803 # process the files
1832 # process the files
1804 self.ui.status(_("adding file changes\n"))
1833 self.ui.status(_("adding file changes\n"))
1805 while 1:
1834 while 1:
1806 f = changegroup.getchunk(source)
1835 f = changegroup.getchunk(source)
1807 if not f:
1836 if not f:
1808 break
1837 break
1809 self.ui.debug(_("adding %s revisions\n") % f)
1838 self.ui.debug(_("adding %s revisions\n") % f)
1810 fl = self.file(f)
1839 fl = self.file(f)
1811 o = fl.count()
1840 o = fl.count()
1812 chunkiter = changegroup.chunkiter(source)
1841 chunkiter = changegroup.chunkiter(source)
1813 if fl.addgroup(chunkiter, revmap, tr) is None:
1842 if fl.addgroup(chunkiter, revmap, tr) is None:
1814 raise util.Abort(_("received file revlog group is empty"))
1843 raise util.Abort(_("received file revlog group is empty"))
1815 revisions += fl.count() - o
1844 revisions += fl.count() - o
1816 files += 1
1845 files += 1
1817
1846
1818 cl.writedata()
1847 cl.writedata()
1819 finally:
1848 finally:
1820 if cl:
1849 if cl:
1821 cl.cleanup()
1850 cl.cleanup()
1822
1851
1823 # make changelog see real files again
1852 # make changelog see real files again
1824 self.changelog = changelog.changelog(self.sopener,
1853 self.changelog = changelog.changelog(self.sopener,
1825 self.changelog.version)
1854 self.changelog.version)
1826 self.changelog.checkinlinesize(tr)
1855 self.changelog.checkinlinesize(tr)
1827
1856
1828 newheads = len(self.changelog.heads())
1857 newheads = len(self.changelog.heads())
1829 heads = ""
1858 heads = ""
1830 if oldheads and newheads != oldheads:
1859 if oldheads and newheads != oldheads:
1831 heads = _(" (%+d heads)") % (newheads - oldheads)
1860 heads = _(" (%+d heads)") % (newheads - oldheads)
1832
1861
1833 self.ui.status(_("added %d changesets"
1862 self.ui.status(_("added %d changesets"
1834 " with %d changes to %d files%s\n")
1863 " with %d changes to %d files%s\n")
1835 % (changesets, revisions, files, heads))
1864 % (changesets, revisions, files, heads))
1836
1865
1837 if changesets > 0:
1866 if changesets > 0:
1838 self.hook('pretxnchangegroup', throw=True,
1867 self.hook('pretxnchangegroup', throw=True,
1839 node=hex(self.changelog.node(cor+1)), source=srctype,
1868 node=hex(self.changelog.node(cor+1)), source=srctype,
1840 url=url)
1869 url=url)
1841
1870
1842 tr.close()
1871 tr.close()
1843
1872
1844 if changesets > 0:
1873 if changesets > 0:
1845 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1874 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1846 source=srctype, url=url)
1875 source=srctype, url=url)
1847
1876
1848 for i in xrange(cor + 1, cnr + 1):
1877 for i in xrange(cor + 1, cnr + 1):
1849 self.hook("incoming", node=hex(self.changelog.node(i)),
1878 self.hook("incoming", node=hex(self.changelog.node(i)),
1850 source=srctype, url=url)
1879 source=srctype, url=url)
1851
1880
1852 # never return 0 here:
1881 # never return 0 here:
1853 if newheads < oldheads:
1882 if newheads < oldheads:
1854 return newheads - oldheads - 1
1883 return newheads - oldheads - 1
1855 else:
1884 else:
1856 return newheads - oldheads + 1
1885 return newheads - oldheads + 1
1857
1886
1858
1887
1859 def stream_in(self, remote):
1888 def stream_in(self, remote):
1860 fp = remote.stream_out()
1889 fp = remote.stream_out()
1861 l = fp.readline()
1890 l = fp.readline()
1862 try:
1891 try:
1863 resp = int(l)
1892 resp = int(l)
1864 except ValueError:
1893 except ValueError:
1865 raise util.UnexpectedOutput(
1894 raise util.UnexpectedOutput(
1866 _('Unexpected response from remote server:'), l)
1895 _('Unexpected response from remote server:'), l)
1867 if resp == 1:
1896 if resp == 1:
1868 raise util.Abort(_('operation forbidden by server'))
1897 raise util.Abort(_('operation forbidden by server'))
1869 elif resp == 2:
1898 elif resp == 2:
1870 raise util.Abort(_('locking the remote repository failed'))
1899 raise util.Abort(_('locking the remote repository failed'))
1871 elif resp != 0:
1900 elif resp != 0:
1872 raise util.Abort(_('the server sent an unknown error code'))
1901 raise util.Abort(_('the server sent an unknown error code'))
1873 self.ui.status(_('streaming all changes\n'))
1902 self.ui.status(_('streaming all changes\n'))
1874 l = fp.readline()
1903 l = fp.readline()
1875 try:
1904 try:
1876 total_files, total_bytes = map(int, l.split(' ', 1))
1905 total_files, total_bytes = map(int, l.split(' ', 1))
1877 except ValueError, TypeError:
1906 except ValueError, TypeError:
1878 raise util.UnexpectedOutput(
1907 raise util.UnexpectedOutput(
1879 _('Unexpected response from remote server:'), l)
1908 _('Unexpected response from remote server:'), l)
1880 self.ui.status(_('%d files to transfer, %s of data\n') %
1909 self.ui.status(_('%d files to transfer, %s of data\n') %
1881 (total_files, util.bytecount(total_bytes)))
1910 (total_files, util.bytecount(total_bytes)))
1882 start = time.time()
1911 start = time.time()
1883 for i in xrange(total_files):
1912 for i in xrange(total_files):
1884 # XXX doesn't support '\n' or '\r' in filenames
1913 # XXX doesn't support '\n' or '\r' in filenames
1885 l = fp.readline()
1914 l = fp.readline()
1886 try:
1915 try:
1887 name, size = l.split('\0', 1)
1916 name, size = l.split('\0', 1)
1888 size = int(size)
1917 size = int(size)
1889 except ValueError, TypeError:
1918 except ValueError, TypeError:
1890 raise util.UnexpectedOutput(
1919 raise util.UnexpectedOutput(
1891 _('Unexpected response from remote server:'), l)
1920 _('Unexpected response from remote server:'), l)
1892 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1921 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1893 ofp = self.sopener(name, 'w')
1922 ofp = self.sopener(name, 'w')
1894 for chunk in util.filechunkiter(fp, limit=size):
1923 for chunk in util.filechunkiter(fp, limit=size):
1895 ofp.write(chunk)
1924 ofp.write(chunk)
1896 ofp.close()
1925 ofp.close()
1897 elapsed = time.time() - start
1926 elapsed = time.time() - start
1898 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1927 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1899 (util.bytecount(total_bytes), elapsed,
1928 (util.bytecount(total_bytes), elapsed,
1900 util.bytecount(total_bytes / elapsed)))
1929 util.bytecount(total_bytes / elapsed)))
1901 self.reload()
1930 self.reload()
1902 return len(self.heads()) + 1
1931 return len(self.heads()) + 1
1903
1932
1904 def clone(self, remote, heads=[], stream=False):
1933 def clone(self, remote, heads=[], stream=False):
1905 '''clone remote repository.
1934 '''clone remote repository.
1906
1935
1907 keyword arguments:
1936 keyword arguments:
1908 heads: list of revs to clone (forces use of pull)
1937 heads: list of revs to clone (forces use of pull)
1909 stream: use streaming clone if possible'''
1938 stream: use streaming clone if possible'''
1910
1939
1911 # now, all clients that can request uncompressed clones can
1940 # now, all clients that can request uncompressed clones can
1912 # read repo formats supported by all servers that can serve
1941 # read repo formats supported by all servers that can serve
1913 # them.
1942 # them.
1914
1943
1915 # if revlog format changes, client will have to check version
1944 # if revlog format changes, client will have to check version
1916 # and format flags on "stream" capability, and use
1945 # and format flags on "stream" capability, and use
1917 # uncompressed only if compatible.
1946 # uncompressed only if compatible.
1918
1947
1919 if stream and not heads and remote.capable('stream'):
1948 if stream and not heads and remote.capable('stream'):
1920 return self.stream_in(remote)
1949 return self.stream_in(remote)
1921 return self.pull(remote, heads)
1950 return self.pull(remote, heads)
1922
1951
1923 # used to avoid circular references so destructors work
1952 # used to avoid circular references so destructors work
1924 def aftertrans(files):
1953 def aftertrans(files):
1925 renamefiles = [tuple(t) for t in files]
1954 renamefiles = [tuple(t) for t in files]
1926 def a():
1955 def a():
1927 for src, dest in renamefiles:
1956 for src, dest in renamefiles:
1928 util.rename(src, dest)
1957 util.rename(src, dest)
1929 return a
1958 return a
1930
1959
1931 def instance(ui, path, create):
1960 def instance(ui, path, create):
1932 return localrepository(ui, util.drop_scheme('file', path), create)
1961 return localrepository(ui, util.drop_scheme('file', path), create)
1933
1962
1934 def islocal(path):
1963 def islocal(path):
1935 return True
1964 return True
@@ -1,66 +1,86
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from demandload import *
10 from demandload import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 demandload(globals(), "changelog filelog httprangereader")
12 demandload(globals(), "changelog filelog httprangereader")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 raise IOError(None, inst)
20 raise IOError(None, inst)
21 except urllib2.URLError, inst:
21 except urllib2.URLError, inst:
22 raise IOError(None, inst.reason[1])
22 raise IOError(None, inst.reason[1])
23
23
24 def opener(base):
24 def opener(base):
25 """return a function that opens files over http"""
25 """return a function that opens files over http"""
26 p = base
26 p = base
27 def o(path, mode="r"):
27 def o(path, mode="r"):
28 f = "/".join((p, urllib.quote(path)))
28 f = "/".join((p, urllib.quote(path)))
29 return rangereader(f)
29 return rangereader(f)
30 return o
30 return o
31
31
32 class statichttprepository(localrepo.localrepository):
32 class statichttprepository(localrepo.localrepository):
33 def __init__(self, ui, path):
33 def __init__(self, ui, path):
34 self._url = path
34 self._url = path
35 self.path = (path + "/.hg")
36 self.spath = self.path
37 self.ui = ui
35 self.ui = ui
38 self.revlogversion = 0
36 self.revlogversion = 0
37
38 self.path = (path + "/.hg")
39 self.opener = opener(self.path)
39 self.opener = opener(self.path)
40 self.sopener = opener(self.spath)
40 # find requirements
41 try:
42 requirements = self.opener("requires").read().splitlines()
43 except IOError:
44 requirements = []
45 # check them
46 for r in requirements:
47 if r not in self.supported:
48 raise repo.RepoError(_("requirement '%s' not supported") % r)
49
50 # setup store
51 if "store" in requirements:
52 self.encodefn = util.encodefilename
53 self.decodefn = util.decodefilename
54 self.spath = self.path + "/store"
55 else:
56 self.encodefn = lambda x: x
57 self.decodefn = lambda x: x
58 self.spath = self.path
59 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
60
41 self.manifest = manifest.manifest(self.sopener)
61 self.manifest = manifest.manifest(self.sopener)
42 self.changelog = changelog.changelog(self.sopener)
62 self.changelog = changelog.changelog(self.sopener)
43 self.tagscache = None
63 self.tagscache = None
44 self.nodetagscache = None
64 self.nodetagscache = None
45 self.encodepats = None
65 self.encodepats = None
46 self.decodepats = None
66 self.decodepats = None
47
67
48 def url(self):
68 def url(self):
49 return 'static-' + self._url
69 return 'static-' + self._url
50
70
51 def dev(self):
71 def dev(self):
52 return -1
72 return -1
53
73
54 def local(self):
74 def local(self):
55 return False
75 return False
56
76
57 def instance(ui, path, create):
77 def instance(ui, path, create):
58 if create:
78 if create:
59 raise util.Abort(_('cannot create new static-http repository'))
79 raise util.Abort(_('cannot create new static-http repository'))
60 if path.startswith('old-http:'):
80 if path.startswith('old-http:'):
61 ui.warn(_("old-http:// syntax is deprecated, "
81 ui.warn(_("old-http:// syntax is deprecated, "
62 "please use static-http:// instead\n"))
82 "please use static-http:// instead\n"))
63 path = path[4:]
83 path = path[4:]
64 else:
84 else:
65 path = path[7:]
85 path = path[7:]
66 return statichttprepository(ui, path)
86 return statichttprepository(ui, path)
@@ -1,95 +1,97
1 # streamclone.py - streaming clone server support for mercurial
1 # streamclone.py - streaming clone server support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from demandload import demandload
8 from demandload import demandload
9 from i18n import gettext as _
9 from i18n import gettext as _
10 demandload(globals(), "os stat util lock")
10 demandload(globals(), "os stat util lock")
11
11
12 # if server supports streaming clone, it advertises "stream"
12 # if server supports streaming clone, it advertises "stream"
13 # capability with value that is version+flags of repo it is serving.
13 # capability with value that is version+flags of repo it is serving.
14 # client only streams if it can read that repo format.
14 # client only streams if it can read that repo format.
15
15
16 def walkrepo(root):
16 def walkrepo(root):
17 '''iterate over metadata files in repository.
17 '''iterate over metadata files in repository.
18 walk in natural (sorted) order.
18 walk in natural (sorted) order.
19 yields 2-tuples: name of .d or .i file, size of file.'''
19 yields 2-tuples: name of .d or .i file, size of file.'''
20
20
21 strip_count = len(root) + len(os.sep)
21 strip_count = len(root) + len(os.sep)
22 def walk(path, recurse):
22 def walk(path, recurse):
23 ents = os.listdir(path)
23 ents = os.listdir(path)
24 ents.sort()
24 ents.sort()
25 for e in ents:
25 for e in ents:
26 pe = os.path.join(path, e)
26 pe = os.path.join(path, e)
27 st = os.lstat(pe)
27 st = os.lstat(pe)
28 if stat.S_ISDIR(st.st_mode):
28 if stat.S_ISDIR(st.st_mode):
29 if recurse:
29 if recurse:
30 for x in walk(pe, True):
30 for x in walk(pe, True):
31 yield x
31 yield x
32 else:
32 else:
33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
34 continue
34 continue
35 sfx = e[-2:]
35 sfx = e[-2:]
36 if sfx in ('.d', '.i'):
36 if sfx in ('.d', '.i'):
37 yield pe[strip_count:], st.st_size
37 yield pe[strip_count:], st.st_size
38 # write file data first
38 # write file data first
39 for x in walk(os.path.join(root, 'data'), True):
39 for x in walk(os.path.join(root, 'data'), True):
40 yield x
40 yield x
41 # write manifest before changelog
41 # write manifest before changelog
42 meta = list(walk(root, False))
42 meta = list(walk(root, False))
43 meta.sort()
43 meta.sort()
44 meta.reverse()
44 meta.reverse()
45 for x in meta:
45 for x in meta:
46 yield x
46 yield x
47
47
48 # stream file format is simple.
48 # stream file format is simple.
49 #
49 #
50 # server writes out line that says how many files, how many total
50 # server writes out line that says how many files, how many total
51 # bytes. separator is ascii space, byte counts are strings.
51 # bytes. separator is ascii space, byte counts are strings.
52 #
52 #
53 # then for each file:
53 # then for each file:
54 #
54 #
55 # server writes out line that says file name, how many bytes in
55 # server writes out line that says file name, how many bytes in
56 # file. separator is ascii nul, byte count is string.
56 # file. separator is ascii nul, byte count is string.
57 #
57 #
58 # server writes out raw file data.
58 # server writes out raw file data.
59
59
60 def stream_out(repo, fileobj):
60 def stream_out(repo, fileobj):
61 '''stream out all metadata files in repository.
61 '''stream out all metadata files in repository.
62 writes to file-like object, must support write() and optional flush().'''
62 writes to file-like object, must support write() and optional flush().'''
63
63
64 if not repo.ui.configbool('server', 'uncompressed'):
64 if not repo.ui.configbool('server', 'uncompressed'):
65 fileobj.write('1\n')
65 fileobj.write('1\n')
66 return
66 return
67
67
68 # get consistent snapshot of repo. lock during scan so lock not
68 # get consistent snapshot of repo. lock during scan so lock not
69 # needed while we stream, and commits can happen.
69 # needed while we stream, and commits can happen.
70 try:
70 try:
71 repolock = repo.lock()
71 repolock = repo.lock()
72 except (lock.LockHeld, lock.LockUnavailable), inst:
72 except (lock.LockHeld, lock.LockUnavailable), inst:
73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
74 fileobj.write('2\n')
74 fileobj.write('2\n')
75 return
75 return
76
76
77 fileobj.write('0\n')
77 fileobj.write('0\n')
78 repo.ui.debug('scanning\n')
78 repo.ui.debug('scanning\n')
79 entries = []
79 entries = []
80 total_bytes = 0
80 total_bytes = 0
81 for name, size in walkrepo(repo.spath):
81 for name, size in walkrepo(repo.spath):
82 if repo.decodefn:
83 name = repo.decodefn(name)
82 entries.append((name, size))
84 entries.append((name, size))
83 total_bytes += size
85 total_bytes += size
84 repolock.release()
86 repolock.release()
85
87
86 repo.ui.debug('%d files, %d bytes to transfer\n' %
88 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 (len(entries), total_bytes))
89 (len(entries), total_bytes))
88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
90 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 for name, size in entries:
91 for name, size in entries:
90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
92 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 fileobj.write('%s\0%d\n' % (name, size))
93 fileobj.write('%s\0%d\n' % (name, size))
92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
94 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 fileobj.write(chunk)
95 fileobj.write(chunk)
94 flush = getattr(fileobj, 'flush', None)
96 flush = getattr(fileobj, 'flush', None)
95 if flush: flush()
97 if flush: flush()
@@ -1,1290 +1,1322
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7
7
8 This software may be used and distributed according to the terms
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
9 of the GNU General Public License, incorporated herein by reference.
10
10
11 This contains helper routines that are independent of the SCM core and hide
11 This contains helper routines that are independent of the SCM core and hide
12 platform-specific details from the core.
12 platform-specific details from the core.
13 """
13 """
14
14
15 from i18n import gettext as _
15 from i18n import gettext as _
16 from demandload import *
16 from demandload import *
17 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
17 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
18 demandload(globals(), "os threading time calendar ConfigParser locale")
18 demandload(globals(), "os threading time calendar ConfigParser locale")
19
19
20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
21 or "ascii"
21 or "ascii"
22 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
22 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
23 _fallbackencoding = 'ISO-8859-1'
23 _fallbackencoding = 'ISO-8859-1'
24
24
25 def tolocal(s):
25 def tolocal(s):
26 """
26 """
27 Convert a string from internal UTF-8 to local encoding
27 Convert a string from internal UTF-8 to local encoding
28
28
29 All internal strings should be UTF-8 but some repos before the
29 All internal strings should be UTF-8 but some repos before the
30 implementation of locale support may contain latin1 or possibly
30 implementation of locale support may contain latin1 or possibly
31 other character sets. We attempt to decode everything strictly
31 other character sets. We attempt to decode everything strictly
32 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
32 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
33 replace unknown characters.
33 replace unknown characters.
34 """
34 """
35 for e in ('UTF-8', _fallbackencoding):
35 for e in ('UTF-8', _fallbackencoding):
36 try:
36 try:
37 u = s.decode(e) # attempt strict decoding
37 u = s.decode(e) # attempt strict decoding
38 return u.encode(_encoding, "replace")
38 return u.encode(_encoding, "replace")
39 except LookupError, k:
39 except LookupError, k:
40 raise Abort(_("%s, please check your locale settings") % k)
40 raise Abort(_("%s, please check your locale settings") % k)
41 except UnicodeDecodeError:
41 except UnicodeDecodeError:
42 pass
42 pass
43 u = s.decode("utf-8", "replace") # last ditch
43 u = s.decode("utf-8", "replace") # last ditch
44 return u.encode(_encoding, "replace")
44 return u.encode(_encoding, "replace")
45
45
46 def fromlocal(s):
46 def fromlocal(s):
47 """
47 """
48 Convert a string from the local character encoding to UTF-8
48 Convert a string from the local character encoding to UTF-8
49
49
50 We attempt to decode strings using the encoding mode set by
50 We attempt to decode strings using the encoding mode set by
51 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
51 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
52 characters will cause an error message. Other modes include
52 characters will cause an error message. Other modes include
53 'replace', which replaces unknown characters with a special
53 'replace', which replaces unknown characters with a special
54 Unicode character, and 'ignore', which drops the character.
54 Unicode character, and 'ignore', which drops the character.
55 """
55 """
56 try:
56 try:
57 return s.decode(_encoding, _encodingmode).encode("utf-8")
57 return s.decode(_encoding, _encodingmode).encode("utf-8")
58 except UnicodeDecodeError, inst:
58 except UnicodeDecodeError, inst:
59 sub = s[max(0, inst.start-10):inst.start+10]
59 sub = s[max(0, inst.start-10):inst.start+10]
60 raise Abort("decoding near '%s': %s!" % (sub, inst))
60 raise Abort("decoding near '%s': %s!" % (sub, inst))
61 except LookupError, k:
61 except LookupError, k:
62 raise Abort(_("%s, please check your locale settings") % k)
62 raise Abort(_("%s, please check your locale settings") % k)
63
63
64 def locallen(s):
64 def locallen(s):
65 """Find the length in characters of a local string"""
65 """Find the length in characters of a local string"""
66 return len(s.decode(_encoding, "replace"))
66 return len(s.decode(_encoding, "replace"))
67
67
68 def localsub(s, a, b=None):
68 def localsub(s, a, b=None):
69 try:
69 try:
70 u = s.decode(_encoding, _encodingmode)
70 u = s.decode(_encoding, _encodingmode)
71 if b is not None:
71 if b is not None:
72 u = u[a:b]
72 u = u[a:b]
73 else:
73 else:
74 u = u[:a]
74 u = u[:a]
75 return u.encode(_encoding, _encodingmode)
75 return u.encode(_encoding, _encodingmode)
76 except UnicodeDecodeError, inst:
76 except UnicodeDecodeError, inst:
77 sub = s[max(0, inst.start-10), inst.start+10]
77 sub = s[max(0, inst.start-10), inst.start+10]
78 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
78 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
79
79
80 # used by parsedate
80 # used by parsedate
81 defaultdateformats = (
81 defaultdateformats = (
82 '%Y-%m-%d %H:%M:%S',
82 '%Y-%m-%d %H:%M:%S',
83 '%Y-%m-%d %I:%M:%S%p',
83 '%Y-%m-%d %I:%M:%S%p',
84 '%Y-%m-%d %H:%M',
84 '%Y-%m-%d %H:%M',
85 '%Y-%m-%d %I:%M%p',
85 '%Y-%m-%d %I:%M%p',
86 '%Y-%m-%d',
86 '%Y-%m-%d',
87 '%m-%d',
87 '%m-%d',
88 '%m/%d',
88 '%m/%d',
89 '%m/%d/%y',
89 '%m/%d/%y',
90 '%m/%d/%Y',
90 '%m/%d/%Y',
91 '%a %b %d %H:%M:%S %Y',
91 '%a %b %d %H:%M:%S %Y',
92 '%a %b %d %I:%M:%S%p %Y',
92 '%a %b %d %I:%M:%S%p %Y',
93 '%b %d %H:%M:%S %Y',
93 '%b %d %H:%M:%S %Y',
94 '%b %d %I:%M:%S%p %Y',
94 '%b %d %I:%M:%S%p %Y',
95 '%b %d %H:%M:%S',
95 '%b %d %H:%M:%S',
96 '%b %d %I:%M:%S%p',
96 '%b %d %I:%M:%S%p',
97 '%b %d %H:%M',
97 '%b %d %H:%M',
98 '%b %d %I:%M%p',
98 '%b %d %I:%M%p',
99 '%b %d %Y',
99 '%b %d %Y',
100 '%b %d',
100 '%b %d',
101 '%H:%M:%S',
101 '%H:%M:%S',
102 '%I:%M:%SP',
102 '%I:%M:%SP',
103 '%H:%M',
103 '%H:%M',
104 '%I:%M%p',
104 '%I:%M%p',
105 )
105 )
106
106
107 extendeddateformats = defaultdateformats + (
107 extendeddateformats = defaultdateformats + (
108 "%Y",
108 "%Y",
109 "%Y-%m",
109 "%Y-%m",
110 "%b",
110 "%b",
111 "%b %Y",
111 "%b %Y",
112 )
112 )
113
113
114 class SignalInterrupt(Exception):
114 class SignalInterrupt(Exception):
115 """Exception raised on SIGTERM and SIGHUP."""
115 """Exception raised on SIGTERM and SIGHUP."""
116
116
117 # like SafeConfigParser but with case-sensitive keys
117 # like SafeConfigParser but with case-sensitive keys
118 class configparser(ConfigParser.SafeConfigParser):
118 class configparser(ConfigParser.SafeConfigParser):
119 def optionxform(self, optionstr):
119 def optionxform(self, optionstr):
120 return optionstr
120 return optionstr
121
121
122 def cachefunc(func):
122 def cachefunc(func):
123 '''cache the result of function calls'''
123 '''cache the result of function calls'''
124 # XXX doesn't handle keywords args
124 # XXX doesn't handle keywords args
125 cache = {}
125 cache = {}
126 if func.func_code.co_argcount == 1:
126 if func.func_code.co_argcount == 1:
127 # we gain a small amount of time because
127 # we gain a small amount of time because
128 # we don't need to pack/unpack the list
128 # we don't need to pack/unpack the list
129 def f(arg):
129 def f(arg):
130 if arg not in cache:
130 if arg not in cache:
131 cache[arg] = func(arg)
131 cache[arg] = func(arg)
132 return cache[arg]
132 return cache[arg]
133 else:
133 else:
134 def f(*args):
134 def f(*args):
135 if args not in cache:
135 if args not in cache:
136 cache[args] = func(*args)
136 cache[args] = func(*args)
137 return cache[args]
137 return cache[args]
138
138
139 return f
139 return f
140
140
141 def pipefilter(s, cmd):
141 def pipefilter(s, cmd):
142 '''filter string S through command CMD, returning its output'''
142 '''filter string S through command CMD, returning its output'''
143 (pout, pin) = popen2.popen2(cmd, -1, 'b')
143 (pout, pin) = popen2.popen2(cmd, -1, 'b')
144 def writer():
144 def writer():
145 try:
145 try:
146 pin.write(s)
146 pin.write(s)
147 pin.close()
147 pin.close()
148 except IOError, inst:
148 except IOError, inst:
149 if inst.errno != errno.EPIPE:
149 if inst.errno != errno.EPIPE:
150 raise
150 raise
151
151
152 # we should use select instead on UNIX, but this will work on most
152 # we should use select instead on UNIX, but this will work on most
153 # systems, including Windows
153 # systems, including Windows
154 w = threading.Thread(target=writer)
154 w = threading.Thread(target=writer)
155 w.start()
155 w.start()
156 f = pout.read()
156 f = pout.read()
157 pout.close()
157 pout.close()
158 w.join()
158 w.join()
159 return f
159 return f
160
160
161 def tempfilter(s, cmd):
161 def tempfilter(s, cmd):
162 '''filter string S through a pair of temporary files with CMD.
162 '''filter string S through a pair of temporary files with CMD.
163 CMD is used as a template to create the real command to be run,
163 CMD is used as a template to create the real command to be run,
164 with the strings INFILE and OUTFILE replaced by the real names of
164 with the strings INFILE and OUTFILE replaced by the real names of
165 the temporary files generated.'''
165 the temporary files generated.'''
166 inname, outname = None, None
166 inname, outname = None, None
167 try:
167 try:
168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
169 fp = os.fdopen(infd, 'wb')
169 fp = os.fdopen(infd, 'wb')
170 fp.write(s)
170 fp.write(s)
171 fp.close()
171 fp.close()
172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
173 os.close(outfd)
173 os.close(outfd)
174 cmd = cmd.replace('INFILE', inname)
174 cmd = cmd.replace('INFILE', inname)
175 cmd = cmd.replace('OUTFILE', outname)
175 cmd = cmd.replace('OUTFILE', outname)
176 code = os.system(cmd)
176 code = os.system(cmd)
177 if code: raise Abort(_("command '%s' failed: %s") %
177 if code: raise Abort(_("command '%s' failed: %s") %
178 (cmd, explain_exit(code)))
178 (cmd, explain_exit(code)))
179 return open(outname, 'rb').read()
179 return open(outname, 'rb').read()
180 finally:
180 finally:
181 try:
181 try:
182 if inname: os.unlink(inname)
182 if inname: os.unlink(inname)
183 except: pass
183 except: pass
184 try:
184 try:
185 if outname: os.unlink(outname)
185 if outname: os.unlink(outname)
186 except: pass
186 except: pass
187
187
188 filtertable = {
188 filtertable = {
189 'tempfile:': tempfilter,
189 'tempfile:': tempfilter,
190 'pipe:': pipefilter,
190 'pipe:': pipefilter,
191 }
191 }
192
192
193 def filter(s, cmd):
193 def filter(s, cmd):
194 "filter a string through a command that transforms its input to its output"
194 "filter a string through a command that transforms its input to its output"
195 for name, fn in filtertable.iteritems():
195 for name, fn in filtertable.iteritems():
196 if cmd.startswith(name):
196 if cmd.startswith(name):
197 return fn(s, cmd[len(name):].lstrip())
197 return fn(s, cmd[len(name):].lstrip())
198 return pipefilter(s, cmd)
198 return pipefilter(s, cmd)
199
199
200 def find_in_path(name, path, default=None):
200 def find_in_path(name, path, default=None):
201 '''find name in search path. path can be string (will be split
201 '''find name in search path. path can be string (will be split
202 with os.pathsep), or iterable thing that returns strings. if name
202 with os.pathsep), or iterable thing that returns strings. if name
203 found, return path to name. else return default.'''
203 found, return path to name. else return default.'''
204 if isinstance(path, str):
204 if isinstance(path, str):
205 path = path.split(os.pathsep)
205 path = path.split(os.pathsep)
206 for p in path:
206 for p in path:
207 p_name = os.path.join(p, name)
207 p_name = os.path.join(p, name)
208 if os.path.exists(p_name):
208 if os.path.exists(p_name):
209 return p_name
209 return p_name
210 return default
210 return default
211
211
212 def binary(s):
212 def binary(s):
213 """return true if a string is binary data using diff's heuristic"""
213 """return true if a string is binary data using diff's heuristic"""
214 if s and '\0' in s[:4096]:
214 if s and '\0' in s[:4096]:
215 return True
215 return True
216 return False
216 return False
217
217
218 def unique(g):
218 def unique(g):
219 """return the uniq elements of iterable g"""
219 """return the uniq elements of iterable g"""
220 seen = {}
220 seen = {}
221 l = []
221 l = []
222 for f in g:
222 for f in g:
223 if f not in seen:
223 if f not in seen:
224 seen[f] = 1
224 seen[f] = 1
225 l.append(f)
225 l.append(f)
226 return l
226 return l
227
227
228 class Abort(Exception):
228 class Abort(Exception):
229 """Raised if a command needs to print an error and exit."""
229 """Raised if a command needs to print an error and exit."""
230
230
231 class UnexpectedOutput(Abort):
231 class UnexpectedOutput(Abort):
232 """Raised to print an error with part of output and exit."""
232 """Raised to print an error with part of output and exit."""
233
233
234 def always(fn): return True
234 def always(fn): return True
235 def never(fn): return False
235 def never(fn): return False
236
236
237 def patkind(name, dflt_pat='glob'):
237 def patkind(name, dflt_pat='glob'):
238 """Split a string into an optional pattern kind prefix and the
238 """Split a string into an optional pattern kind prefix and the
239 actual pattern."""
239 actual pattern."""
240 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
240 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
241 if name.startswith(prefix + ':'): return name.split(':', 1)
241 if name.startswith(prefix + ':'): return name.split(':', 1)
242 return dflt_pat, name
242 return dflt_pat, name
243
243
244 def globre(pat, head='^', tail='$'):
244 def globre(pat, head='^', tail='$'):
245 "convert a glob pattern into a regexp"
245 "convert a glob pattern into a regexp"
246 i, n = 0, len(pat)
246 i, n = 0, len(pat)
247 res = ''
247 res = ''
248 group = False
248 group = False
249 def peek(): return i < n and pat[i]
249 def peek(): return i < n and pat[i]
250 while i < n:
250 while i < n:
251 c = pat[i]
251 c = pat[i]
252 i = i+1
252 i = i+1
253 if c == '*':
253 if c == '*':
254 if peek() == '*':
254 if peek() == '*':
255 i += 1
255 i += 1
256 res += '.*'
256 res += '.*'
257 else:
257 else:
258 res += '[^/]*'
258 res += '[^/]*'
259 elif c == '?':
259 elif c == '?':
260 res += '.'
260 res += '.'
261 elif c == '[':
261 elif c == '[':
262 j = i
262 j = i
263 if j < n and pat[j] in '!]':
263 if j < n and pat[j] in '!]':
264 j += 1
264 j += 1
265 while j < n and pat[j] != ']':
265 while j < n and pat[j] != ']':
266 j += 1
266 j += 1
267 if j >= n:
267 if j >= n:
268 res += '\\['
268 res += '\\['
269 else:
269 else:
270 stuff = pat[i:j].replace('\\','\\\\')
270 stuff = pat[i:j].replace('\\','\\\\')
271 i = j + 1
271 i = j + 1
272 if stuff[0] == '!':
272 if stuff[0] == '!':
273 stuff = '^' + stuff[1:]
273 stuff = '^' + stuff[1:]
274 elif stuff[0] == '^':
274 elif stuff[0] == '^':
275 stuff = '\\' + stuff
275 stuff = '\\' + stuff
276 res = '%s[%s]' % (res, stuff)
276 res = '%s[%s]' % (res, stuff)
277 elif c == '{':
277 elif c == '{':
278 group = True
278 group = True
279 res += '(?:'
279 res += '(?:'
280 elif c == '}' and group:
280 elif c == '}' and group:
281 res += ')'
281 res += ')'
282 group = False
282 group = False
283 elif c == ',' and group:
283 elif c == ',' and group:
284 res += '|'
284 res += '|'
285 elif c == '\\':
285 elif c == '\\':
286 p = peek()
286 p = peek()
287 if p:
287 if p:
288 i += 1
288 i += 1
289 res += re.escape(p)
289 res += re.escape(p)
290 else:
290 else:
291 res += re.escape(c)
291 res += re.escape(c)
292 else:
292 else:
293 res += re.escape(c)
293 res += re.escape(c)
294 return head + res + tail
294 return head + res + tail
295
295
296 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
296 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
297
297
298 def pathto(n1, n2):
298 def pathto(n1, n2):
299 '''return the relative path from one place to another.
299 '''return the relative path from one place to another.
300 n1 should use os.sep to separate directories
300 n1 should use os.sep to separate directories
301 n2 should use "/" to separate directories
301 n2 should use "/" to separate directories
302 returns an os.sep-separated path.
302 returns an os.sep-separated path.
303 '''
303 '''
304 if not n1: return localpath(n2)
304 if not n1: return localpath(n2)
305 a, b = n1.split(os.sep), n2.split('/')
305 a, b = n1.split(os.sep), n2.split('/')
306 a.reverse()
306 a.reverse()
307 b.reverse()
307 b.reverse()
308 while a and b and a[-1] == b[-1]:
308 while a and b and a[-1] == b[-1]:
309 a.pop()
309 a.pop()
310 b.pop()
310 b.pop()
311 b.reverse()
311 b.reverse()
312 return os.sep.join((['..'] * len(a)) + b)
312 return os.sep.join((['..'] * len(a)) + b)
313
313
314 def canonpath(root, cwd, myname):
314 def canonpath(root, cwd, myname):
315 """return the canonical path of myname, given cwd and root"""
315 """return the canonical path of myname, given cwd and root"""
316 if root == os.sep:
316 if root == os.sep:
317 rootsep = os.sep
317 rootsep = os.sep
318 elif root.endswith(os.sep):
318 elif root.endswith(os.sep):
319 rootsep = root
319 rootsep = root
320 else:
320 else:
321 rootsep = root + os.sep
321 rootsep = root + os.sep
322 name = myname
322 name = myname
323 if not os.path.isabs(name):
323 if not os.path.isabs(name):
324 name = os.path.join(root, cwd, name)
324 name = os.path.join(root, cwd, name)
325 name = os.path.normpath(name)
325 name = os.path.normpath(name)
326 if name != rootsep and name.startswith(rootsep):
326 if name != rootsep and name.startswith(rootsep):
327 name = name[len(rootsep):]
327 name = name[len(rootsep):]
328 audit_path(name)
328 audit_path(name)
329 return pconvert(name)
329 return pconvert(name)
330 elif name == root:
330 elif name == root:
331 return ''
331 return ''
332 else:
332 else:
333 # Determine whether `name' is in the hierarchy at or beneath `root',
333 # Determine whether `name' is in the hierarchy at or beneath `root',
334 # by iterating name=dirname(name) until that causes no change (can't
334 # by iterating name=dirname(name) until that causes no change (can't
335 # check name == '/', because that doesn't work on windows). For each
335 # check name == '/', because that doesn't work on windows). For each
336 # `name', compare dev/inode numbers. If they match, the list `rel'
336 # `name', compare dev/inode numbers. If they match, the list `rel'
337 # holds the reversed list of components making up the relative file
337 # holds the reversed list of components making up the relative file
338 # name we want.
338 # name we want.
339 root_st = os.stat(root)
339 root_st = os.stat(root)
340 rel = []
340 rel = []
341 while True:
341 while True:
342 try:
342 try:
343 name_st = os.stat(name)
343 name_st = os.stat(name)
344 except OSError:
344 except OSError:
345 break
345 break
346 if samestat(name_st, root_st):
346 if samestat(name_st, root_st):
347 rel.reverse()
347 rel.reverse()
348 name = os.path.join(*rel)
348 name = os.path.join(*rel)
349 audit_path(name)
349 audit_path(name)
350 return pconvert(name)
350 return pconvert(name)
351 dirname, basename = os.path.split(name)
351 dirname, basename = os.path.split(name)
352 rel.append(basename)
352 rel.append(basename)
353 if dirname == name:
353 if dirname == name:
354 break
354 break
355 name = dirname
355 name = dirname
356
356
357 raise Abort('%s not under root' % myname)
357 raise Abort('%s not under root' % myname)
358
358
359 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
359 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
360 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
360 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
361
361
362 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
362 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
363 if os.name == 'nt':
363 if os.name == 'nt':
364 dflt_pat = 'glob'
364 dflt_pat = 'glob'
365 else:
365 else:
366 dflt_pat = 'relpath'
366 dflt_pat = 'relpath'
367 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
367 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
368
368
369 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
369 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
370 """build a function to match a set of file patterns
370 """build a function to match a set of file patterns
371
371
372 arguments:
372 arguments:
373 canonroot - the canonical root of the tree you're matching against
373 canonroot - the canonical root of the tree you're matching against
374 cwd - the current working directory, if relevant
374 cwd - the current working directory, if relevant
375 names - patterns to find
375 names - patterns to find
376 inc - patterns to include
376 inc - patterns to include
377 exc - patterns to exclude
377 exc - patterns to exclude
378 head - a regex to prepend to patterns to control whether a match is rooted
378 head - a regex to prepend to patterns to control whether a match is rooted
379
379
380 a pattern is one of:
380 a pattern is one of:
381 'glob:<rooted glob>'
381 'glob:<rooted glob>'
382 're:<rooted regexp>'
382 're:<rooted regexp>'
383 'path:<rooted path>'
383 'path:<rooted path>'
384 'relglob:<relative glob>'
384 'relglob:<relative glob>'
385 'relpath:<relative path>'
385 'relpath:<relative path>'
386 'relre:<relative regexp>'
386 'relre:<relative regexp>'
387 '<rooted path or regexp>'
387 '<rooted path or regexp>'
388
388
389 returns:
389 returns:
390 a 3-tuple containing
390 a 3-tuple containing
391 - list of explicit non-pattern names passed in
391 - list of explicit non-pattern names passed in
392 - a bool match(filename) function
392 - a bool match(filename) function
393 - a bool indicating if any patterns were passed in
393 - a bool indicating if any patterns were passed in
394
394
395 todo:
395 todo:
396 make head regex a rooted bool
396 make head regex a rooted bool
397 """
397 """
398
398
399 def contains_glob(name):
399 def contains_glob(name):
400 for c in name:
400 for c in name:
401 if c in _globchars: return True
401 if c in _globchars: return True
402 return False
402 return False
403
403
404 def regex(kind, name, tail):
404 def regex(kind, name, tail):
405 '''convert a pattern into a regular expression'''
405 '''convert a pattern into a regular expression'''
406 if kind == 're':
406 if kind == 're':
407 return name
407 return name
408 elif kind == 'path':
408 elif kind == 'path':
409 return '^' + re.escape(name) + '(?:/|$)'
409 return '^' + re.escape(name) + '(?:/|$)'
410 elif kind == 'relglob':
410 elif kind == 'relglob':
411 return head + globre(name, '(?:|.*/)', tail)
411 return head + globre(name, '(?:|.*/)', tail)
412 elif kind == 'relpath':
412 elif kind == 'relpath':
413 return head + re.escape(name) + tail
413 return head + re.escape(name) + tail
414 elif kind == 'relre':
414 elif kind == 'relre':
415 if name.startswith('^'):
415 if name.startswith('^'):
416 return name
416 return name
417 return '.*' + name
417 return '.*' + name
418 return head + globre(name, '', tail)
418 return head + globre(name, '', tail)
419
419
420 def matchfn(pats, tail):
420 def matchfn(pats, tail):
421 """build a matching function from a set of patterns"""
421 """build a matching function from a set of patterns"""
422 if not pats:
422 if not pats:
423 return
423 return
424 matches = []
424 matches = []
425 for k, p in pats:
425 for k, p in pats:
426 try:
426 try:
427 pat = '(?:%s)' % regex(k, p, tail)
427 pat = '(?:%s)' % regex(k, p, tail)
428 matches.append(re.compile(pat).match)
428 matches.append(re.compile(pat).match)
429 except re.error:
429 except re.error:
430 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
430 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
431 else: raise Abort("invalid pattern (%s): %s" % (k, p))
431 else: raise Abort("invalid pattern (%s): %s" % (k, p))
432
432
433 def buildfn(text):
433 def buildfn(text):
434 for m in matches:
434 for m in matches:
435 r = m(text)
435 r = m(text)
436 if r:
436 if r:
437 return r
437 return r
438
438
439 return buildfn
439 return buildfn
440
440
441 def globprefix(pat):
441 def globprefix(pat):
442 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
442 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
443 root = []
443 root = []
444 for p in pat.split(os.sep):
444 for p in pat.split(os.sep):
445 if contains_glob(p): break
445 if contains_glob(p): break
446 root.append(p)
446 root.append(p)
447 return '/'.join(root)
447 return '/'.join(root)
448
448
449 pats = []
449 pats = []
450 files = []
450 files = []
451 roots = []
451 roots = []
452 for kind, name in [patkind(p, dflt_pat) for p in names]:
452 for kind, name in [patkind(p, dflt_pat) for p in names]:
453 if kind in ('glob', 'relpath'):
453 if kind in ('glob', 'relpath'):
454 name = canonpath(canonroot, cwd, name)
454 name = canonpath(canonroot, cwd, name)
455 if name == '':
455 if name == '':
456 kind, name = 'glob', '**'
456 kind, name = 'glob', '**'
457 if kind in ('glob', 'path', 're'):
457 if kind in ('glob', 'path', 're'):
458 pats.append((kind, name))
458 pats.append((kind, name))
459 if kind == 'glob':
459 if kind == 'glob':
460 root = globprefix(name)
460 root = globprefix(name)
461 if root: roots.append(root)
461 if root: roots.append(root)
462 elif kind == 'relpath':
462 elif kind == 'relpath':
463 files.append((kind, name))
463 files.append((kind, name))
464 roots.append(name)
464 roots.append(name)
465
465
466 patmatch = matchfn(pats, '$') or always
466 patmatch = matchfn(pats, '$') or always
467 filematch = matchfn(files, '(?:/|$)') or always
467 filematch = matchfn(files, '(?:/|$)') or always
468 incmatch = always
468 incmatch = always
469 if inc:
469 if inc:
470 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
470 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
471 incmatch = matchfn(inckinds, '(?:/|$)')
471 incmatch = matchfn(inckinds, '(?:/|$)')
472 excmatch = lambda fn: False
472 excmatch = lambda fn: False
473 if exc:
473 if exc:
474 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
474 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
475 excmatch = matchfn(exckinds, '(?:/|$)')
475 excmatch = matchfn(exckinds, '(?:/|$)')
476
476
477 return (roots,
477 return (roots,
478 lambda fn: (incmatch(fn) and not excmatch(fn) and
478 lambda fn: (incmatch(fn) and not excmatch(fn) and
479 (fn.endswith('/') or
479 (fn.endswith('/') or
480 (not pats and not files) or
480 (not pats and not files) or
481 (pats and patmatch(fn)) or
481 (pats and patmatch(fn)) or
482 (files and filematch(fn)))),
482 (files and filematch(fn)))),
483 (inc or exc or (pats and pats != [('glob', '**')])) and True)
483 (inc or exc or (pats and pats != [('glob', '**')])) and True)
484
484
485 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
485 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
486 '''enhanced shell command execution.
486 '''enhanced shell command execution.
487 run with environment maybe modified, maybe in different dir.
487 run with environment maybe modified, maybe in different dir.
488
488
489 if command fails and onerr is None, return status. if ui object,
489 if command fails and onerr is None, return status. if ui object,
490 print error message and return status, else raise onerr object as
490 print error message and return status, else raise onerr object as
491 exception.'''
491 exception.'''
492 def py2shell(val):
492 def py2shell(val):
493 'convert python object into string that is useful to shell'
493 'convert python object into string that is useful to shell'
494 if val in (None, False):
494 if val in (None, False):
495 return '0'
495 return '0'
496 if val == True:
496 if val == True:
497 return '1'
497 return '1'
498 return str(val)
498 return str(val)
499 oldenv = {}
499 oldenv = {}
500 for k in environ:
500 for k in environ:
501 oldenv[k] = os.environ.get(k)
501 oldenv[k] = os.environ.get(k)
502 if cwd is not None:
502 if cwd is not None:
503 oldcwd = os.getcwd()
503 oldcwd = os.getcwd()
504 try:
504 try:
505 for k, v in environ.iteritems():
505 for k, v in environ.iteritems():
506 os.environ[k] = py2shell(v)
506 os.environ[k] = py2shell(v)
507 if cwd is not None and oldcwd != cwd:
507 if cwd is not None and oldcwd != cwd:
508 os.chdir(cwd)
508 os.chdir(cwd)
509 rc = os.system(cmd)
509 rc = os.system(cmd)
510 if rc and onerr:
510 if rc and onerr:
511 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
511 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
512 explain_exit(rc)[0])
512 explain_exit(rc)[0])
513 if errprefix:
513 if errprefix:
514 errmsg = '%s: %s' % (errprefix, errmsg)
514 errmsg = '%s: %s' % (errprefix, errmsg)
515 try:
515 try:
516 onerr.warn(errmsg + '\n')
516 onerr.warn(errmsg + '\n')
517 except AttributeError:
517 except AttributeError:
518 raise onerr(errmsg)
518 raise onerr(errmsg)
519 return rc
519 return rc
520 finally:
520 finally:
521 for k, v in oldenv.iteritems():
521 for k, v in oldenv.iteritems():
522 if v is None:
522 if v is None:
523 del os.environ[k]
523 del os.environ[k]
524 else:
524 else:
525 os.environ[k] = v
525 os.environ[k] = v
526 if cwd is not None and oldcwd != cwd:
526 if cwd is not None and oldcwd != cwd:
527 os.chdir(oldcwd)
527 os.chdir(oldcwd)
528
528
529 def rename(src, dst):
529 def rename(src, dst):
530 """forcibly rename a file"""
530 """forcibly rename a file"""
531 try:
531 try:
532 os.rename(src, dst)
532 os.rename(src, dst)
533 except OSError, err:
533 except OSError, err:
534 # on windows, rename to existing file is not allowed, so we
534 # on windows, rename to existing file is not allowed, so we
535 # must delete destination first. but if file is open, unlink
535 # must delete destination first. but if file is open, unlink
536 # schedules it for delete but does not delete it. rename
536 # schedules it for delete but does not delete it. rename
537 # happens immediately even for open files, so we create
537 # happens immediately even for open files, so we create
538 # temporary file, delete it, rename destination to that name,
538 # temporary file, delete it, rename destination to that name,
539 # then delete that. then rename is safe to do.
539 # then delete that. then rename is safe to do.
540 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
540 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
541 os.close(fd)
541 os.close(fd)
542 os.unlink(temp)
542 os.unlink(temp)
543 os.rename(dst, temp)
543 os.rename(dst, temp)
544 os.unlink(temp)
544 os.unlink(temp)
545 os.rename(src, dst)
545 os.rename(src, dst)
546
546
547 def unlink(f):
547 def unlink(f):
548 """unlink and remove the directory if it is empty"""
548 """unlink and remove the directory if it is empty"""
549 os.unlink(f)
549 os.unlink(f)
550 # try removing directories that might now be empty
550 # try removing directories that might now be empty
551 try:
551 try:
552 os.removedirs(os.path.dirname(f))
552 os.removedirs(os.path.dirname(f))
553 except OSError:
553 except OSError:
554 pass
554 pass
555
555
556 def copyfile(src, dest):
556 def copyfile(src, dest):
557 "copy a file, preserving mode"
557 "copy a file, preserving mode"
558 try:
558 try:
559 shutil.copyfile(src, dest)
559 shutil.copyfile(src, dest)
560 shutil.copymode(src, dest)
560 shutil.copymode(src, dest)
561 except shutil.Error, inst:
561 except shutil.Error, inst:
562 raise util.Abort(str(inst))
562 raise util.Abort(str(inst))
563
563
564 def copyfiles(src, dst, hardlink=None):
564 def copyfiles(src, dst, hardlink=None):
565 """Copy a directory tree using hardlinks if possible"""
565 """Copy a directory tree using hardlinks if possible"""
566
566
567 if hardlink is None:
567 if hardlink is None:
568 hardlink = (os.stat(src).st_dev ==
568 hardlink = (os.stat(src).st_dev ==
569 os.stat(os.path.dirname(dst)).st_dev)
569 os.stat(os.path.dirname(dst)).st_dev)
570
570
571 if os.path.isdir(src):
571 if os.path.isdir(src):
572 os.mkdir(dst)
572 os.mkdir(dst)
573 for name in os.listdir(src):
573 for name in os.listdir(src):
574 srcname = os.path.join(src, name)
574 srcname = os.path.join(src, name)
575 dstname = os.path.join(dst, name)
575 dstname = os.path.join(dst, name)
576 copyfiles(srcname, dstname, hardlink)
576 copyfiles(srcname, dstname, hardlink)
577 else:
577 else:
578 if hardlink:
578 if hardlink:
579 try:
579 try:
580 os_link(src, dst)
580 os_link(src, dst)
581 except (IOError, OSError):
581 except (IOError, OSError):
582 hardlink = False
582 hardlink = False
583 shutil.copy(src, dst)
583 shutil.copy(src, dst)
584 else:
584 else:
585 shutil.copy(src, dst)
585 shutil.copy(src, dst)
586
586
587 def audit_path(path):
587 def audit_path(path):
588 """Abort if path contains dangerous components"""
588 """Abort if path contains dangerous components"""
589 parts = os.path.normcase(path).split(os.sep)
589 parts = os.path.normcase(path).split(os.sep)
590 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
590 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
591 or os.pardir in parts):
591 or os.pardir in parts):
592 raise Abort(_("path contains illegal component: %s\n") % path)
592 raise Abort(_("path contains illegal component: %s\n") % path)
593
593
594 def _makelock_file(info, pathname):
594 def _makelock_file(info, pathname):
595 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
595 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
596 os.write(ld, info)
596 os.write(ld, info)
597 os.close(ld)
597 os.close(ld)
598
598
599 def _readlock_file(pathname):
599 def _readlock_file(pathname):
600 return posixfile(pathname).read()
600 return posixfile(pathname).read()
601
601
602 def nlinks(pathname):
602 def nlinks(pathname):
603 """Return number of hardlinks for the given file."""
603 """Return number of hardlinks for the given file."""
604 return os.lstat(pathname).st_nlink
604 return os.lstat(pathname).st_nlink
605
605
606 if hasattr(os, 'link'):
606 if hasattr(os, 'link'):
607 os_link = os.link
607 os_link = os.link
608 else:
608 else:
609 def os_link(src, dst):
609 def os_link(src, dst):
610 raise OSError(0, _("Hardlinks not supported"))
610 raise OSError(0, _("Hardlinks not supported"))
611
611
612 def fstat(fp):
612 def fstat(fp):
613 '''stat file object that may not have fileno method.'''
613 '''stat file object that may not have fileno method.'''
614 try:
614 try:
615 return os.fstat(fp.fileno())
615 return os.fstat(fp.fileno())
616 except AttributeError:
616 except AttributeError:
617 return os.stat(fp.name)
617 return os.stat(fp.name)
618
618
619 posixfile = file
619 posixfile = file
620
620
621 def is_win_9x():
621 def is_win_9x():
622 '''return true if run on windows 95, 98 or me.'''
622 '''return true if run on windows 95, 98 or me.'''
623 try:
623 try:
624 return sys.getwindowsversion()[3] == 1
624 return sys.getwindowsversion()[3] == 1
625 except AttributeError:
625 except AttributeError:
626 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
626 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
627
627
628 getuser_fallback = None
628 getuser_fallback = None
629
629
630 def getuser():
630 def getuser():
631 '''return name of current user'''
631 '''return name of current user'''
632 try:
632 try:
633 return getpass.getuser()
633 return getpass.getuser()
634 except ImportError:
634 except ImportError:
635 # import of pwd will fail on windows - try fallback
635 # import of pwd will fail on windows - try fallback
636 if getuser_fallback:
636 if getuser_fallback:
637 return getuser_fallback()
637 return getuser_fallback()
638 # raised if win32api not available
638 # raised if win32api not available
639 raise Abort(_('user name not available - set USERNAME '
639 raise Abort(_('user name not available - set USERNAME '
640 'environment variable'))
640 'environment variable'))
641
641
642 def username(uid=None):
642 def username(uid=None):
643 """Return the name of the user with the given uid.
643 """Return the name of the user with the given uid.
644
644
645 If uid is None, return the name of the current user."""
645 If uid is None, return the name of the current user."""
646 try:
646 try:
647 import pwd
647 import pwd
648 if uid is None:
648 if uid is None:
649 uid = os.getuid()
649 uid = os.getuid()
650 try:
650 try:
651 return pwd.getpwuid(uid)[0]
651 return pwd.getpwuid(uid)[0]
652 except KeyError:
652 except KeyError:
653 return str(uid)
653 return str(uid)
654 except ImportError:
654 except ImportError:
655 return None
655 return None
656
656
657 def groupname(gid=None):
657 def groupname(gid=None):
658 """Return the name of the group with the given gid.
658 """Return the name of the group with the given gid.
659
659
660 If gid is None, return the name of the current group."""
660 If gid is None, return the name of the current group."""
661 try:
661 try:
662 import grp
662 import grp
663 if gid is None:
663 if gid is None:
664 gid = os.getgid()
664 gid = os.getgid()
665 try:
665 try:
666 return grp.getgrgid(gid)[0]
666 return grp.getgrgid(gid)[0]
667 except KeyError:
667 except KeyError:
668 return str(gid)
668 return str(gid)
669 except ImportError:
669 except ImportError:
670 return None
670 return None
671
671
672 # File system features
672 # File system features
673
673
674 def checkfolding(path):
674 def checkfolding(path):
675 """
675 """
676 Check whether the given path is on a case-sensitive filesystem
676 Check whether the given path is on a case-sensitive filesystem
677
677
678 Requires a path (like /foo/.hg) ending with a foldable final
678 Requires a path (like /foo/.hg) ending with a foldable final
679 directory component.
679 directory component.
680 """
680 """
681 s1 = os.stat(path)
681 s1 = os.stat(path)
682 d, b = os.path.split(path)
682 d, b = os.path.split(path)
683 p2 = os.path.join(d, b.upper())
683 p2 = os.path.join(d, b.upper())
684 if path == p2:
684 if path == p2:
685 p2 = os.path.join(d, b.lower())
685 p2 = os.path.join(d, b.lower())
686 try:
686 try:
687 s2 = os.stat(p2)
687 s2 = os.stat(p2)
688 if s2 == s1:
688 if s2 == s1:
689 return False
689 return False
690 return True
690 return True
691 except:
691 except:
692 return True
692 return True
693
693
694 # Platform specific variants
694 # Platform specific variants
695 if os.name == 'nt':
695 if os.name == 'nt':
696 demandload(globals(), "msvcrt")
696 demandload(globals(), "msvcrt")
697 nulldev = 'NUL:'
697 nulldev = 'NUL:'
698
698
699 class winstdout:
699 class winstdout:
700 '''stdout on windows misbehaves if sent through a pipe'''
700 '''stdout on windows misbehaves if sent through a pipe'''
701
701
702 def __init__(self, fp):
702 def __init__(self, fp):
703 self.fp = fp
703 self.fp = fp
704
704
705 def __getattr__(self, key):
705 def __getattr__(self, key):
706 return getattr(self.fp, key)
706 return getattr(self.fp, key)
707
707
708 def close(self):
708 def close(self):
709 try:
709 try:
710 self.fp.close()
710 self.fp.close()
711 except: pass
711 except: pass
712
712
713 def write(self, s):
713 def write(self, s):
714 try:
714 try:
715 return self.fp.write(s)
715 return self.fp.write(s)
716 except IOError, inst:
716 except IOError, inst:
717 if inst.errno != 0: raise
717 if inst.errno != 0: raise
718 self.close()
718 self.close()
719 raise IOError(errno.EPIPE, 'Broken pipe')
719 raise IOError(errno.EPIPE, 'Broken pipe')
720
720
721 sys.stdout = winstdout(sys.stdout)
721 sys.stdout = winstdout(sys.stdout)
722
722
723 def system_rcpath():
723 def system_rcpath():
724 try:
724 try:
725 return system_rcpath_win32()
725 return system_rcpath_win32()
726 except:
726 except:
727 return [r'c:\mercurial\mercurial.ini']
727 return [r'c:\mercurial\mercurial.ini']
728
728
729 def os_rcpath():
729 def os_rcpath():
730 '''return default os-specific hgrc search path'''
730 '''return default os-specific hgrc search path'''
731 path = system_rcpath()
731 path = system_rcpath()
732 path.append(user_rcpath())
732 path.append(user_rcpath())
733 userprofile = os.environ.get('USERPROFILE')
733 userprofile = os.environ.get('USERPROFILE')
734 if userprofile:
734 if userprofile:
735 path.append(os.path.join(userprofile, 'mercurial.ini'))
735 path.append(os.path.join(userprofile, 'mercurial.ini'))
736 return path
736 return path
737
737
738 def user_rcpath():
738 def user_rcpath():
739 '''return os-specific hgrc search path to the user dir'''
739 '''return os-specific hgrc search path to the user dir'''
740 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
740 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
741
741
742 def parse_patch_output(output_line):
742 def parse_patch_output(output_line):
743 """parses the output produced by patch and returns the file name"""
743 """parses the output produced by patch and returns the file name"""
744 pf = output_line[14:]
744 pf = output_line[14:]
745 if pf[0] == '`':
745 if pf[0] == '`':
746 pf = pf[1:-1] # Remove the quotes
746 pf = pf[1:-1] # Remove the quotes
747 return pf
747 return pf
748
748
749 def testpid(pid):
749 def testpid(pid):
750 '''return False if pid dead, True if running or not known'''
750 '''return False if pid dead, True if running or not known'''
751 return True
751 return True
752
752
753 def is_exec(f, last):
753 def is_exec(f, last):
754 return last
754 return last
755
755
756 def set_exec(f, mode):
756 def set_exec(f, mode):
757 pass
757 pass
758
758
759 def set_binary(fd):
759 def set_binary(fd):
760 msvcrt.setmode(fd.fileno(), os.O_BINARY)
760 msvcrt.setmode(fd.fileno(), os.O_BINARY)
761
761
762 def pconvert(path):
762 def pconvert(path):
763 return path.replace("\\", "/")
763 return path.replace("\\", "/")
764
764
765 def localpath(path):
765 def localpath(path):
766 return path.replace('/', '\\')
766 return path.replace('/', '\\')
767
767
768 def normpath(path):
768 def normpath(path):
769 return pconvert(os.path.normpath(path))
769 return pconvert(os.path.normpath(path))
770
770
771 makelock = _makelock_file
771 makelock = _makelock_file
772 readlock = _readlock_file
772 readlock = _readlock_file
773
773
774 def samestat(s1, s2):
774 def samestat(s1, s2):
775 return False
775 return False
776
776
777 def shellquote(s):
777 def shellquote(s):
778 return '"%s"' % s.replace('"', '\\"')
778 return '"%s"' % s.replace('"', '\\"')
779
779
780 def explain_exit(code):
780 def explain_exit(code):
781 return _("exited with status %d") % code, code
781 return _("exited with status %d") % code, code
782
782
783 # if you change this stub into a real check, please try to implement the
783 # if you change this stub into a real check, please try to implement the
784 # username and groupname functions above, too.
784 # username and groupname functions above, too.
785 def isowner(fp, st=None):
785 def isowner(fp, st=None):
786 return True
786 return True
787
787
788 try:
788 try:
789 # override functions with win32 versions if possible
789 # override functions with win32 versions if possible
790 from util_win32 import *
790 from util_win32 import *
791 if not is_win_9x():
791 if not is_win_9x():
792 posixfile = posixfile_nt
792 posixfile = posixfile_nt
793 except ImportError:
793 except ImportError:
794 pass
794 pass
795
795
796 else:
796 else:
797 nulldev = '/dev/null'
797 nulldev = '/dev/null'
798
798
799 def rcfiles(path):
799 def rcfiles(path):
800 rcs = [os.path.join(path, 'hgrc')]
800 rcs = [os.path.join(path, 'hgrc')]
801 rcdir = os.path.join(path, 'hgrc.d')
801 rcdir = os.path.join(path, 'hgrc.d')
802 try:
802 try:
803 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
803 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
804 if f.endswith(".rc")])
804 if f.endswith(".rc")])
805 except OSError:
805 except OSError:
806 pass
806 pass
807 return rcs
807 return rcs
808
808
809 def os_rcpath():
809 def os_rcpath():
810 '''return default os-specific hgrc search path'''
810 '''return default os-specific hgrc search path'''
811 path = []
811 path = []
812 # old mod_python does not set sys.argv
812 # old mod_python does not set sys.argv
813 if len(getattr(sys, 'argv', [])) > 0:
813 if len(getattr(sys, 'argv', [])) > 0:
814 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
814 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
815 '/../etc/mercurial'))
815 '/../etc/mercurial'))
816 path.extend(rcfiles('/etc/mercurial'))
816 path.extend(rcfiles('/etc/mercurial'))
817 path.append(os.path.expanduser('~/.hgrc'))
817 path.append(os.path.expanduser('~/.hgrc'))
818 path = [os.path.normpath(f) for f in path]
818 path = [os.path.normpath(f) for f in path]
819 return path
819 return path
820
820
821 def parse_patch_output(output_line):
821 def parse_patch_output(output_line):
822 """parses the output produced by patch and returns the file name"""
822 """parses the output produced by patch and returns the file name"""
823 pf = output_line[14:]
823 pf = output_line[14:]
824 if pf.startswith("'") and pf.endswith("'") and " " in pf:
824 if pf.startswith("'") and pf.endswith("'") and " " in pf:
825 pf = pf[1:-1] # Remove the quotes
825 pf = pf[1:-1] # Remove the quotes
826 return pf
826 return pf
827
827
828 def is_exec(f, last):
828 def is_exec(f, last):
829 """check whether a file is executable"""
829 """check whether a file is executable"""
830 return (os.lstat(f).st_mode & 0100 != 0)
830 return (os.lstat(f).st_mode & 0100 != 0)
831
831
832 def set_exec(f, mode):
832 def set_exec(f, mode):
833 s = os.lstat(f).st_mode
833 s = os.lstat(f).st_mode
834 if (s & 0100 != 0) == mode:
834 if (s & 0100 != 0) == mode:
835 return
835 return
836 if mode:
836 if mode:
837 # Turn on +x for every +r bit when making a file executable
837 # Turn on +x for every +r bit when making a file executable
838 # and obey umask.
838 # and obey umask.
839 umask = os.umask(0)
839 umask = os.umask(0)
840 os.umask(umask)
840 os.umask(umask)
841 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
841 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
842 else:
842 else:
843 os.chmod(f, s & 0666)
843 os.chmod(f, s & 0666)
844
844
845 def set_binary(fd):
845 def set_binary(fd):
846 pass
846 pass
847
847
848 def pconvert(path):
848 def pconvert(path):
849 return path
849 return path
850
850
851 def localpath(path):
851 def localpath(path):
852 return path
852 return path
853
853
854 normpath = os.path.normpath
854 normpath = os.path.normpath
855 samestat = os.path.samestat
855 samestat = os.path.samestat
856
856
857 def makelock(info, pathname):
857 def makelock(info, pathname):
858 try:
858 try:
859 os.symlink(info, pathname)
859 os.symlink(info, pathname)
860 except OSError, why:
860 except OSError, why:
861 if why.errno == errno.EEXIST:
861 if why.errno == errno.EEXIST:
862 raise
862 raise
863 else:
863 else:
864 _makelock_file(info, pathname)
864 _makelock_file(info, pathname)
865
865
866 def readlock(pathname):
866 def readlock(pathname):
867 try:
867 try:
868 return os.readlink(pathname)
868 return os.readlink(pathname)
869 except OSError, why:
869 except OSError, why:
870 if why.errno == errno.EINVAL:
870 if why.errno == errno.EINVAL:
871 return _readlock_file(pathname)
871 return _readlock_file(pathname)
872 else:
872 else:
873 raise
873 raise
874
874
875 def shellquote(s):
875 def shellquote(s):
876 return "'%s'" % s.replace("'", "'\\''")
876 return "'%s'" % s.replace("'", "'\\''")
877
877
878 def testpid(pid):
878 def testpid(pid):
879 '''return False if pid dead, True if running or not sure'''
879 '''return False if pid dead, True if running or not sure'''
880 try:
880 try:
881 os.kill(pid, 0)
881 os.kill(pid, 0)
882 return True
882 return True
883 except OSError, inst:
883 except OSError, inst:
884 return inst.errno != errno.ESRCH
884 return inst.errno != errno.ESRCH
885
885
886 def explain_exit(code):
886 def explain_exit(code):
887 """return a 2-tuple (desc, code) describing a process's status"""
887 """return a 2-tuple (desc, code) describing a process's status"""
888 if os.WIFEXITED(code):
888 if os.WIFEXITED(code):
889 val = os.WEXITSTATUS(code)
889 val = os.WEXITSTATUS(code)
890 return _("exited with status %d") % val, val
890 return _("exited with status %d") % val, val
891 elif os.WIFSIGNALED(code):
891 elif os.WIFSIGNALED(code):
892 val = os.WTERMSIG(code)
892 val = os.WTERMSIG(code)
893 return _("killed by signal %d") % val, val
893 return _("killed by signal %d") % val, val
894 elif os.WIFSTOPPED(code):
894 elif os.WIFSTOPPED(code):
895 val = os.WSTOPSIG(code)
895 val = os.WSTOPSIG(code)
896 return _("stopped by signal %d") % val, val
896 return _("stopped by signal %d") % val, val
897 raise ValueError(_("invalid exit code"))
897 raise ValueError(_("invalid exit code"))
898
898
899 def isowner(fp, st=None):
899 def isowner(fp, st=None):
900 """Return True if the file object f belongs to the current user.
900 """Return True if the file object f belongs to the current user.
901
901
902 The return value of a util.fstat(f) may be passed as the st argument.
902 The return value of a util.fstat(f) may be passed as the st argument.
903 """
903 """
904 if st is None:
904 if st is None:
905 st = fstat(f)
905 st = fstat(f)
906 return st.st_uid == os.getuid()
906 return st.st_uid == os.getuid()
907
907
908 def _buildencodefun():
909 e = '_'
910 win_reserved = [ord(x) for x in '|\?*<":>+[]']
911 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
912 for x in (range(32) + range(126, 256) + win_reserved):
913 cmap[chr(x)] = "~%02x" % x
914 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
915 cmap[chr(x)] = e + chr(x).lower()
916 dmap = {}
917 for k, v in cmap.iteritems():
918 dmap[v] = k
919 def decode(s):
920 i = 0
921 while i < len(s):
922 for l in xrange(1, 4):
923 try:
924 yield dmap[s[i:i+l]]
925 i += l
926 break
927 except KeyError:
928 pass
929 else:
930 raise KeyError
931 return (lambda s: "".join([cmap[c] for c in s]),
932 lambda s: "".join(list(decode(s))))
933
934 encodefilename, decodefilename = _buildencodefun()
935
936 def encodedopener(openerfn, fn):
937 def o(path, *args, **kw):
938 return openerfn(fn(path), *args, **kw)
939 return o
908
940
909 def opener(base, audit=True):
941 def opener(base, audit=True):
910 """
942 """
911 return a function that opens files relative to base
943 return a function that opens files relative to base
912
944
913 this function is used to hide the details of COW semantics and
945 this function is used to hide the details of COW semantics and
914 remote file access from higher level code.
946 remote file access from higher level code.
915 """
947 """
916 p = base
948 p = base
917 audit_p = audit
949 audit_p = audit
918
950
919 def mktempcopy(name):
951 def mktempcopy(name):
920 d, fn = os.path.split(name)
952 d, fn = os.path.split(name)
921 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
953 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
922 os.close(fd)
954 os.close(fd)
923 ofp = posixfile(temp, "wb")
955 ofp = posixfile(temp, "wb")
924 try:
956 try:
925 try:
957 try:
926 ifp = posixfile(name, "rb")
958 ifp = posixfile(name, "rb")
927 except IOError, inst:
959 except IOError, inst:
928 if not getattr(inst, 'filename', None):
960 if not getattr(inst, 'filename', None):
929 inst.filename = name
961 inst.filename = name
930 raise
962 raise
931 for chunk in filechunkiter(ifp):
963 for chunk in filechunkiter(ifp):
932 ofp.write(chunk)
964 ofp.write(chunk)
933 ifp.close()
965 ifp.close()
934 ofp.close()
966 ofp.close()
935 except:
967 except:
936 try: os.unlink(temp)
968 try: os.unlink(temp)
937 except: pass
969 except: pass
938 raise
970 raise
939 st = os.lstat(name)
971 st = os.lstat(name)
940 os.chmod(temp, st.st_mode)
972 os.chmod(temp, st.st_mode)
941 return temp
973 return temp
942
974
943 class atomictempfile(posixfile):
975 class atomictempfile(posixfile):
944 """the file will only be copied when rename is called"""
976 """the file will only be copied when rename is called"""
945 def __init__(self, name, mode):
977 def __init__(self, name, mode):
946 self.__name = name
978 self.__name = name
947 self.temp = mktempcopy(name)
979 self.temp = mktempcopy(name)
948 posixfile.__init__(self, self.temp, mode)
980 posixfile.__init__(self, self.temp, mode)
949 def rename(self):
981 def rename(self):
950 if not self.closed:
982 if not self.closed:
951 posixfile.close(self)
983 posixfile.close(self)
952 rename(self.temp, localpath(self.__name))
984 rename(self.temp, localpath(self.__name))
953 def __del__(self):
985 def __del__(self):
954 if not self.closed:
986 if not self.closed:
955 try:
987 try:
956 os.unlink(self.temp)
988 os.unlink(self.temp)
957 except: pass
989 except: pass
958 posixfile.close(self)
990 posixfile.close(self)
959
991
960 class atomicfile(atomictempfile):
992 class atomicfile(atomictempfile):
961 """the file will only be copied on close"""
993 """the file will only be copied on close"""
962 def __init__(self, name, mode):
994 def __init__(self, name, mode):
963 atomictempfile.__init__(self, name, mode)
995 atomictempfile.__init__(self, name, mode)
964 def close(self):
996 def close(self):
965 self.rename()
997 self.rename()
966 def __del__(self):
998 def __del__(self):
967 self.rename()
999 self.rename()
968
1000
969 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
1001 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
970 if audit_p:
1002 if audit_p:
971 audit_path(path)
1003 audit_path(path)
972 f = os.path.join(p, path)
1004 f = os.path.join(p, path)
973
1005
974 if not text:
1006 if not text:
975 mode += "b" # for that other OS
1007 mode += "b" # for that other OS
976
1008
977 if mode[0] != "r":
1009 if mode[0] != "r":
978 try:
1010 try:
979 nlink = nlinks(f)
1011 nlink = nlinks(f)
980 except OSError:
1012 except OSError:
981 d = os.path.dirname(f)
1013 d = os.path.dirname(f)
982 if not os.path.isdir(d):
1014 if not os.path.isdir(d):
983 os.makedirs(d)
1015 os.makedirs(d)
984 else:
1016 else:
985 if atomic:
1017 if atomic:
986 return atomicfile(f, mode)
1018 return atomicfile(f, mode)
987 elif atomictemp:
1019 elif atomictemp:
988 return atomictempfile(f, mode)
1020 return atomictempfile(f, mode)
989 if nlink > 1:
1021 if nlink > 1:
990 rename(mktempcopy(f), f)
1022 rename(mktempcopy(f), f)
991 return posixfile(f, mode)
1023 return posixfile(f, mode)
992
1024
993 return o
1025 return o
994
1026
995 class chunkbuffer(object):
1027 class chunkbuffer(object):
996 """Allow arbitrary sized chunks of data to be efficiently read from an
1028 """Allow arbitrary sized chunks of data to be efficiently read from an
997 iterator over chunks of arbitrary size."""
1029 iterator over chunks of arbitrary size."""
998
1030
999 def __init__(self, in_iter, targetsize = 2**16):
1031 def __init__(self, in_iter, targetsize = 2**16):
1000 """in_iter is the iterator that's iterating over the input chunks.
1032 """in_iter is the iterator that's iterating over the input chunks.
1001 targetsize is how big a buffer to try to maintain."""
1033 targetsize is how big a buffer to try to maintain."""
1002 self.in_iter = iter(in_iter)
1034 self.in_iter = iter(in_iter)
1003 self.buf = ''
1035 self.buf = ''
1004 self.targetsize = int(targetsize)
1036 self.targetsize = int(targetsize)
1005 if self.targetsize <= 0:
1037 if self.targetsize <= 0:
1006 raise ValueError(_("targetsize must be greater than 0, was %d") %
1038 raise ValueError(_("targetsize must be greater than 0, was %d") %
1007 targetsize)
1039 targetsize)
1008 self.iterempty = False
1040 self.iterempty = False
1009
1041
1010 def fillbuf(self):
1042 def fillbuf(self):
1011 """Ignore target size; read every chunk from iterator until empty."""
1043 """Ignore target size; read every chunk from iterator until empty."""
1012 if not self.iterempty:
1044 if not self.iterempty:
1013 collector = cStringIO.StringIO()
1045 collector = cStringIO.StringIO()
1014 collector.write(self.buf)
1046 collector.write(self.buf)
1015 for ch in self.in_iter:
1047 for ch in self.in_iter:
1016 collector.write(ch)
1048 collector.write(ch)
1017 self.buf = collector.getvalue()
1049 self.buf = collector.getvalue()
1018 self.iterempty = True
1050 self.iterempty = True
1019
1051
1020 def read(self, l):
1052 def read(self, l):
1021 """Read L bytes of data from the iterator of chunks of data.
1053 """Read L bytes of data from the iterator of chunks of data.
1022 Returns less than L bytes if the iterator runs dry."""
1054 Returns less than L bytes if the iterator runs dry."""
1023 if l > len(self.buf) and not self.iterempty:
1055 if l > len(self.buf) and not self.iterempty:
1024 # Clamp to a multiple of self.targetsize
1056 # Clamp to a multiple of self.targetsize
1025 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1057 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1026 collector = cStringIO.StringIO()
1058 collector = cStringIO.StringIO()
1027 collector.write(self.buf)
1059 collector.write(self.buf)
1028 collected = len(self.buf)
1060 collected = len(self.buf)
1029 for chunk in self.in_iter:
1061 for chunk in self.in_iter:
1030 collector.write(chunk)
1062 collector.write(chunk)
1031 collected += len(chunk)
1063 collected += len(chunk)
1032 if collected >= targetsize:
1064 if collected >= targetsize:
1033 break
1065 break
1034 if collected < targetsize:
1066 if collected < targetsize:
1035 self.iterempty = True
1067 self.iterempty = True
1036 self.buf = collector.getvalue()
1068 self.buf = collector.getvalue()
1037 s, self.buf = self.buf[:l], buffer(self.buf, l)
1069 s, self.buf = self.buf[:l], buffer(self.buf, l)
1038 return s
1070 return s
1039
1071
1040 def filechunkiter(f, size=65536, limit=None):
1072 def filechunkiter(f, size=65536, limit=None):
1041 """Create a generator that produces the data in the file size
1073 """Create a generator that produces the data in the file size
1042 (default 65536) bytes at a time, up to optional limit (default is
1074 (default 65536) bytes at a time, up to optional limit (default is
1043 to read all data). Chunks may be less than size bytes if the
1075 to read all data). Chunks may be less than size bytes if the
1044 chunk is the last chunk in the file, or the file is a socket or
1076 chunk is the last chunk in the file, or the file is a socket or
1045 some other type of file that sometimes reads less data than is
1077 some other type of file that sometimes reads less data than is
1046 requested."""
1078 requested."""
1047 assert size >= 0
1079 assert size >= 0
1048 assert limit is None or limit >= 0
1080 assert limit is None or limit >= 0
1049 while True:
1081 while True:
1050 if limit is None: nbytes = size
1082 if limit is None: nbytes = size
1051 else: nbytes = min(limit, size)
1083 else: nbytes = min(limit, size)
1052 s = nbytes and f.read(nbytes)
1084 s = nbytes and f.read(nbytes)
1053 if not s: break
1085 if not s: break
1054 if limit: limit -= len(s)
1086 if limit: limit -= len(s)
1055 yield s
1087 yield s
1056
1088
1057 def makedate():
1089 def makedate():
1058 lt = time.localtime()
1090 lt = time.localtime()
1059 if lt[8] == 1 and time.daylight:
1091 if lt[8] == 1 and time.daylight:
1060 tz = time.altzone
1092 tz = time.altzone
1061 else:
1093 else:
1062 tz = time.timezone
1094 tz = time.timezone
1063 return time.mktime(lt), tz
1095 return time.mktime(lt), tz
1064
1096
1065 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1097 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1066 """represent a (unixtime, offset) tuple as a localized time.
1098 """represent a (unixtime, offset) tuple as a localized time.
1067 unixtime is seconds since the epoch, and offset is the time zone's
1099 unixtime is seconds since the epoch, and offset is the time zone's
1068 number of seconds away from UTC. if timezone is false, do not
1100 number of seconds away from UTC. if timezone is false, do not
1069 append time zone to string."""
1101 append time zone to string."""
1070 t, tz = date or makedate()
1102 t, tz = date or makedate()
1071 s = time.strftime(format, time.gmtime(float(t) - tz))
1103 s = time.strftime(format, time.gmtime(float(t) - tz))
1072 if timezone:
1104 if timezone:
1073 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1105 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1074 return s
1106 return s
1075
1107
1076 def strdate(string, format, defaults):
1108 def strdate(string, format, defaults):
1077 """parse a localized time string and return a (unixtime, offset) tuple.
1109 """parse a localized time string and return a (unixtime, offset) tuple.
1078 if the string cannot be parsed, ValueError is raised."""
1110 if the string cannot be parsed, ValueError is raised."""
1079 def timezone(string):
1111 def timezone(string):
1080 tz = string.split()[-1]
1112 tz = string.split()[-1]
1081 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1113 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1082 tz = int(tz)
1114 tz = int(tz)
1083 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1115 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1084 return offset
1116 return offset
1085 if tz == "GMT" or tz == "UTC":
1117 if tz == "GMT" or tz == "UTC":
1086 return 0
1118 return 0
1087 return None
1119 return None
1088
1120
1089 # NOTE: unixtime = localunixtime + offset
1121 # NOTE: unixtime = localunixtime + offset
1090 offset, date = timezone(string), string
1122 offset, date = timezone(string), string
1091 if offset != None:
1123 if offset != None:
1092 date = " ".join(string.split()[:-1])
1124 date = " ".join(string.split()[:-1])
1093
1125
1094 # add missing elements from defaults
1126 # add missing elements from defaults
1095 for part in defaults:
1127 for part in defaults:
1096 found = [True for p in part if ("%"+p) in format]
1128 found = [True for p in part if ("%"+p) in format]
1097 if not found:
1129 if not found:
1098 date += "@" + defaults[part]
1130 date += "@" + defaults[part]
1099 format += "@%" + part[0]
1131 format += "@%" + part[0]
1100
1132
1101 timetuple = time.strptime(date, format)
1133 timetuple = time.strptime(date, format)
1102 localunixtime = int(calendar.timegm(timetuple))
1134 localunixtime = int(calendar.timegm(timetuple))
1103 if offset is None:
1135 if offset is None:
1104 # local timezone
1136 # local timezone
1105 unixtime = int(time.mktime(timetuple))
1137 unixtime = int(time.mktime(timetuple))
1106 offset = unixtime - localunixtime
1138 offset = unixtime - localunixtime
1107 else:
1139 else:
1108 unixtime = localunixtime + offset
1140 unixtime = localunixtime + offset
1109 return unixtime, offset
1141 return unixtime, offset
1110
1142
1111 def parsedate(string, formats=None, defaults=None):
1143 def parsedate(string, formats=None, defaults=None):
1112 """parse a localized time string and return a (unixtime, offset) tuple.
1144 """parse a localized time string and return a (unixtime, offset) tuple.
1113 The date may be a "unixtime offset" string or in one of the specified
1145 The date may be a "unixtime offset" string or in one of the specified
1114 formats."""
1146 formats."""
1115 if not string:
1147 if not string:
1116 return 0, 0
1148 return 0, 0
1117 if not formats:
1149 if not formats:
1118 formats = defaultdateformats
1150 formats = defaultdateformats
1119 string = string.strip()
1151 string = string.strip()
1120 try:
1152 try:
1121 when, offset = map(int, string.split(' '))
1153 when, offset = map(int, string.split(' '))
1122 except ValueError:
1154 except ValueError:
1123 # fill out defaults
1155 # fill out defaults
1124 if not defaults:
1156 if not defaults:
1125 defaults = {}
1157 defaults = {}
1126 now = makedate()
1158 now = makedate()
1127 for part in "d mb yY HI M S".split():
1159 for part in "d mb yY HI M S".split():
1128 if part not in defaults:
1160 if part not in defaults:
1129 if part[0] in "HMS":
1161 if part[0] in "HMS":
1130 defaults[part] = "00"
1162 defaults[part] = "00"
1131 elif part[0] in "dm":
1163 elif part[0] in "dm":
1132 defaults[part] = "1"
1164 defaults[part] = "1"
1133 else:
1165 else:
1134 defaults[part] = datestr(now, "%" + part[0], False)
1166 defaults[part] = datestr(now, "%" + part[0], False)
1135
1167
1136 for format in formats:
1168 for format in formats:
1137 try:
1169 try:
1138 when, offset = strdate(string, format, defaults)
1170 when, offset = strdate(string, format, defaults)
1139 except ValueError:
1171 except ValueError:
1140 pass
1172 pass
1141 else:
1173 else:
1142 break
1174 break
1143 else:
1175 else:
1144 raise Abort(_('invalid date: %r ') % string)
1176 raise Abort(_('invalid date: %r ') % string)
1145 # validate explicit (probably user-specified) date and
1177 # validate explicit (probably user-specified) date and
1146 # time zone offset. values must fit in signed 32 bits for
1178 # time zone offset. values must fit in signed 32 bits for
1147 # current 32-bit linux runtimes. timezones go from UTC-12
1179 # current 32-bit linux runtimes. timezones go from UTC-12
1148 # to UTC+14
1180 # to UTC+14
1149 if abs(when) > 0x7fffffff:
1181 if abs(when) > 0x7fffffff:
1150 raise Abort(_('date exceeds 32 bits: %d') % when)
1182 raise Abort(_('date exceeds 32 bits: %d') % when)
1151 if offset < -50400 or offset > 43200:
1183 if offset < -50400 or offset > 43200:
1152 raise Abort(_('impossible time zone offset: %d') % offset)
1184 raise Abort(_('impossible time zone offset: %d') % offset)
1153 return when, offset
1185 return when, offset
1154
1186
1155 def matchdate(date):
1187 def matchdate(date):
1156 """Return a function that matches a given date match specifier
1188 """Return a function that matches a given date match specifier
1157
1189
1158 Formats include:
1190 Formats include:
1159
1191
1160 '{date}' match a given date to the accuracy provided
1192 '{date}' match a given date to the accuracy provided
1161
1193
1162 '<{date}' on or before a given date
1194 '<{date}' on or before a given date
1163
1195
1164 '>{date}' on or after a given date
1196 '>{date}' on or after a given date
1165
1197
1166 """
1198 """
1167
1199
1168 def lower(date):
1200 def lower(date):
1169 return parsedate(date, extendeddateformats)[0]
1201 return parsedate(date, extendeddateformats)[0]
1170
1202
1171 def upper(date):
1203 def upper(date):
1172 d = dict(mb="12", HI="23", M="59", S="59")
1204 d = dict(mb="12", HI="23", M="59", S="59")
1173 for days in "31 30 29".split():
1205 for days in "31 30 29".split():
1174 try:
1206 try:
1175 d["d"] = days
1207 d["d"] = days
1176 return parsedate(date, extendeddateformats, d)[0]
1208 return parsedate(date, extendeddateformats, d)[0]
1177 except:
1209 except:
1178 pass
1210 pass
1179 d["d"] = "28"
1211 d["d"] = "28"
1180 return parsedate(date, extendeddateformats, d)[0]
1212 return parsedate(date, extendeddateformats, d)[0]
1181
1213
1182 if date[0] == "<":
1214 if date[0] == "<":
1183 when = upper(date[1:])
1215 when = upper(date[1:])
1184 return lambda x: x <= when
1216 return lambda x: x <= when
1185 elif date[0] == ">":
1217 elif date[0] == ">":
1186 when = lower(date[1:])
1218 when = lower(date[1:])
1187 return lambda x: x >= when
1219 return lambda x: x >= when
1188 elif date[0] == "-":
1220 elif date[0] == "-":
1189 try:
1221 try:
1190 days = int(date[1:])
1222 days = int(date[1:])
1191 except ValueError:
1223 except ValueError:
1192 raise Abort(_("invalid day spec: %s") % date[1:])
1224 raise Abort(_("invalid day spec: %s") % date[1:])
1193 when = makedate()[0] - days * 3600 * 24
1225 when = makedate()[0] - days * 3600 * 24
1194 return lambda x: x >= when
1226 return lambda x: x >= when
1195 elif " to " in date:
1227 elif " to " in date:
1196 a, b = date.split(" to ")
1228 a, b = date.split(" to ")
1197 start, stop = lower(a), upper(b)
1229 start, stop = lower(a), upper(b)
1198 return lambda x: x >= start and x <= stop
1230 return lambda x: x >= start and x <= stop
1199 else:
1231 else:
1200 start, stop = lower(date), upper(date)
1232 start, stop = lower(date), upper(date)
1201 return lambda x: x >= start and x <= stop
1233 return lambda x: x >= start and x <= stop
1202
1234
1203 def shortuser(user):
1235 def shortuser(user):
1204 """Return a short representation of a user name or email address."""
1236 """Return a short representation of a user name or email address."""
1205 f = user.find('@')
1237 f = user.find('@')
1206 if f >= 0:
1238 if f >= 0:
1207 user = user[:f]
1239 user = user[:f]
1208 f = user.find('<')
1240 f = user.find('<')
1209 if f >= 0:
1241 if f >= 0:
1210 user = user[f+1:]
1242 user = user[f+1:]
1211 f = user.find(' ')
1243 f = user.find(' ')
1212 if f >= 0:
1244 if f >= 0:
1213 user = user[:f]
1245 user = user[:f]
1214 f = user.find('.')
1246 f = user.find('.')
1215 if f >= 0:
1247 if f >= 0:
1216 user = user[:f]
1248 user = user[:f]
1217 return user
1249 return user
1218
1250
1219 def ellipsis(text, maxlength=400):
1251 def ellipsis(text, maxlength=400):
1220 """Trim string to at most maxlength (default: 400) characters."""
1252 """Trim string to at most maxlength (default: 400) characters."""
1221 if len(text) <= maxlength:
1253 if len(text) <= maxlength:
1222 return text
1254 return text
1223 else:
1255 else:
1224 return "%s..." % (text[:maxlength-3])
1256 return "%s..." % (text[:maxlength-3])
1225
1257
1226 def walkrepos(path):
1258 def walkrepos(path):
1227 '''yield every hg repository under path, recursively.'''
1259 '''yield every hg repository under path, recursively.'''
1228 def errhandler(err):
1260 def errhandler(err):
1229 if err.filename == path:
1261 if err.filename == path:
1230 raise err
1262 raise err
1231
1263
1232 for root, dirs, files in os.walk(path, onerror=errhandler):
1264 for root, dirs, files in os.walk(path, onerror=errhandler):
1233 for d in dirs:
1265 for d in dirs:
1234 if d == '.hg':
1266 if d == '.hg':
1235 yield root
1267 yield root
1236 dirs[:] = []
1268 dirs[:] = []
1237 break
1269 break
1238
1270
1239 _rcpath = None
1271 _rcpath = None
1240
1272
1241 def rcpath():
1273 def rcpath():
1242 '''return hgrc search path. if env var HGRCPATH is set, use it.
1274 '''return hgrc search path. if env var HGRCPATH is set, use it.
1243 for each item in path, if directory, use files ending in .rc,
1275 for each item in path, if directory, use files ending in .rc,
1244 else use item.
1276 else use item.
1245 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1277 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1246 if no HGRCPATH, use default os-specific path.'''
1278 if no HGRCPATH, use default os-specific path.'''
1247 global _rcpath
1279 global _rcpath
1248 if _rcpath is None:
1280 if _rcpath is None:
1249 if 'HGRCPATH' in os.environ:
1281 if 'HGRCPATH' in os.environ:
1250 _rcpath = []
1282 _rcpath = []
1251 for p in os.environ['HGRCPATH'].split(os.pathsep):
1283 for p in os.environ['HGRCPATH'].split(os.pathsep):
1252 if not p: continue
1284 if not p: continue
1253 if os.path.isdir(p):
1285 if os.path.isdir(p):
1254 for f in os.listdir(p):
1286 for f in os.listdir(p):
1255 if f.endswith('.rc'):
1287 if f.endswith('.rc'):
1256 _rcpath.append(os.path.join(p, f))
1288 _rcpath.append(os.path.join(p, f))
1257 else:
1289 else:
1258 _rcpath.append(p)
1290 _rcpath.append(p)
1259 else:
1291 else:
1260 _rcpath = os_rcpath()
1292 _rcpath = os_rcpath()
1261 return _rcpath
1293 return _rcpath
1262
1294
1263 def bytecount(nbytes):
1295 def bytecount(nbytes):
1264 '''return byte count formatted as readable string, with units'''
1296 '''return byte count formatted as readable string, with units'''
1265
1297
1266 units = (
1298 units = (
1267 (100, 1<<30, _('%.0f GB')),
1299 (100, 1<<30, _('%.0f GB')),
1268 (10, 1<<30, _('%.1f GB')),
1300 (10, 1<<30, _('%.1f GB')),
1269 (1, 1<<30, _('%.2f GB')),
1301 (1, 1<<30, _('%.2f GB')),
1270 (100, 1<<20, _('%.0f MB')),
1302 (100, 1<<20, _('%.0f MB')),
1271 (10, 1<<20, _('%.1f MB')),
1303 (10, 1<<20, _('%.1f MB')),
1272 (1, 1<<20, _('%.2f MB')),
1304 (1, 1<<20, _('%.2f MB')),
1273 (100, 1<<10, _('%.0f KB')),
1305 (100, 1<<10, _('%.0f KB')),
1274 (10, 1<<10, _('%.1f KB')),
1306 (10, 1<<10, _('%.1f KB')),
1275 (1, 1<<10, _('%.2f KB')),
1307 (1, 1<<10, _('%.2f KB')),
1276 (1, 1, _('%.0f bytes')),
1308 (1, 1, _('%.0f bytes')),
1277 )
1309 )
1278
1310
1279 for multiplier, divisor, format in units:
1311 for multiplier, divisor, format in units:
1280 if nbytes >= divisor * multiplier:
1312 if nbytes >= divisor * multiplier:
1281 return format % (nbytes / float(divisor))
1313 return format % (nbytes / float(divisor))
1282 return units[-1][2] % nbytes
1314 return units[-1][2] % nbytes
1283
1315
1284 def drop_scheme(scheme, path):
1316 def drop_scheme(scheme, path):
1285 sc = scheme + ':'
1317 sc = scheme + ':'
1286 if path.startswith(sc):
1318 if path.startswith(sc):
1287 path = path[len(sc):]
1319 path = path[len(sc):]
1288 if path.startswith('//'):
1320 if path.startswith('//'):
1289 path = path[2:]
1321 path = path[2:]
1290 return path
1322 return path
@@ -1,113 +1,113
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init test
3 hg init test
4 cd test
4 cd test
5 cat >>afile <<EOF
5 cat >>afile <<EOF
6 0
6 0
7 EOF
7 EOF
8 hg add afile
8 hg add afile
9 hg commit -m "0.0" -d "1000000 0"
9 hg commit -m "0.0" -d "1000000 0"
10 cat >>afile <<EOF
10 cat >>afile <<EOF
11 1
11 1
12 EOF
12 EOF
13 hg commit -m "0.1" -d "1000000 0"
13 hg commit -m "0.1" -d "1000000 0"
14 cat >>afile <<EOF
14 cat >>afile <<EOF
15 2
15 2
16 EOF
16 EOF
17 hg commit -m "0.2" -d "1000000 0"
17 hg commit -m "0.2" -d "1000000 0"
18 cat >>afile <<EOF
18 cat >>afile <<EOF
19 3
19 3
20 EOF
20 EOF
21 hg commit -m "0.3" -d "1000000 0"
21 hg commit -m "0.3" -d "1000000 0"
22 hg update -C 0
22 hg update -C 0
23 cat >>afile <<EOF
23 cat >>afile <<EOF
24 1
24 1
25 EOF
25 EOF
26 hg commit -m "1.1" -d "1000000 0"
26 hg commit -m "1.1" -d "1000000 0"
27 cat >>afile <<EOF
27 cat >>afile <<EOF
28 2
28 2
29 EOF
29 EOF
30 hg commit -m "1.2" -d "1000000 0"
30 hg commit -m "1.2" -d "1000000 0"
31 cat >fred <<EOF
31 cat >fred <<EOF
32 a line
32 a line
33 EOF
33 EOF
34 cat >>afile <<EOF
34 cat >>afile <<EOF
35 3
35 3
36 EOF
36 EOF
37 hg add fred
37 hg add fred
38 hg commit -m "1.3" -d "1000000 0"
38 hg commit -m "1.3" -d "1000000 0"
39 hg mv afile adifferentfile
39 hg mv afile adifferentfile
40 hg commit -m "1.3m" -d "1000000 0"
40 hg commit -m "1.3m" -d "1000000 0"
41 hg update -C 3
41 hg update -C 3
42 hg mv afile anotherfile
42 hg mv afile anotherfile
43 hg commit -m "0.3m" -d "1000000 0"
43 hg commit -m "0.3m" -d "1000000 0"
44 hg debugindex .hg/data/afile.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/00manifest.i
48 hg debugindex .hg/store/00manifest.i
49 hg verify
49 hg verify
50 cd ..
50 cd ..
51 for i in 0 1 2 3 4 5 6 7 8; do
51 for i in 0 1 2 3 4 5 6 7 8; do
52 mkdir test-"$i"
52 mkdir test-"$i"
53 hg --cwd test-"$i" init
53 hg --cwd test-"$i" init
54 hg -R test bundle -r "$i" test-"$i".hg test-"$i"
54 hg -R test bundle -r "$i" test-"$i".hg test-"$i"
55 cd test-"$i"
55 cd test-"$i"
56 hg unbundle ../test-"$i".hg
56 hg unbundle ../test-"$i".hg
57 hg verify
57 hg verify
58 hg tip -q
58 hg tip -q
59 cd ..
59 cd ..
60 done
60 done
61 cd test-8
61 cd test-8
62 hg pull ../test-7
62 hg pull ../test-7
63 hg verify
63 hg verify
64 hg rollback
64 hg rollback
65 cd ..
65 cd ..
66
66
67 echo % should fail
67 echo % should fail
68 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg test-3
68 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg test-3
69 hg -R test bundle -r tip test-bundle-branch1.hg
69 hg -R test bundle -r tip test-bundle-branch1.hg
70
70
71 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg
71 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg
72 hg -R test bundle --base 2 -r 7 test-bundle-branch2.hg
72 hg -R test bundle --base 2 -r 7 test-bundle-branch2.hg
73 hg -R test bundle --base 2 test-bundle-all.hg
73 hg -R test bundle --base 2 test-bundle-all.hg
74 hg -R test bundle --base 3 -r tip test-bundle-should-fail.hg
74 hg -R test bundle --base 3 -r tip test-bundle-should-fail.hg
75
75
76 # issue76 msg2163
76 # issue76 msg2163
77 hg -R test bundle --base 3 -r 3 -r 3 test-bundle-cset-3.hg
77 hg -R test bundle --base 3 -r 3 -r 3 test-bundle-cset-3.hg
78
78
79 hg clone test-2 test-9
79 hg clone test-2 test-9
80 cd test-9
80 cd test-9
81 echo % 2
81 echo % 2
82 hg tip -q
82 hg tip -q
83 hg unbundle ../test-bundle-should-fail.hg
83 hg unbundle ../test-bundle-should-fail.hg
84 echo % 2
84 echo % 2
85 hg tip -q
85 hg tip -q
86 hg unbundle ../test-bundle-all.hg
86 hg unbundle ../test-bundle-all.hg
87 echo % 8
87 echo % 8
88 hg tip -q
88 hg tip -q
89 hg verify
89 hg verify
90 hg rollback
90 hg rollback
91 echo % 2
91 echo % 2
92 hg tip -q
92 hg tip -q
93 hg unbundle ../test-bundle-branch1.hg
93 hg unbundle ../test-bundle-branch1.hg
94 echo % 4
94 echo % 4
95 hg tip -q
95 hg tip -q
96 hg verify
96 hg verify
97 hg rollback
97 hg rollback
98 hg unbundle ../test-bundle-branch2.hg
98 hg unbundle ../test-bundle-branch2.hg
99 echo % 6
99 echo % 6
100 hg tip -q
100 hg tip -q
101 hg verify
101 hg verify
102
102
103 cd ../test
103 cd ../test
104 hg merge 7
104 hg merge 7
105 hg ci -m merge -d "1000000 0"
105 hg ci -m merge -d "1000000 0"
106 cd ..
106 cd ..
107 hg -R test bundle --base 2 test-bundle-head.hg
107 hg -R test bundle --base 2 test-bundle-head.hg
108 hg clone test-2 test-10
108 hg clone test-2 test-10
109 cd test-10
109 cd test-10
110 hg unbundle ../test-bundle-head.hg
110 hg unbundle ../test-bundle-head.hg
111 echo % 9
111 echo % 9
112 hg tip -q
112 hg tip -q
113 hg verify
113 hg verify
@@ -1,59 +1,59
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init test
3 hg init test
4 cd test
4 cd test
5 cat >>afile <<EOF
5 cat >>afile <<EOF
6 0
6 0
7 EOF
7 EOF
8 hg add afile
8 hg add afile
9 hg commit -m "0.0"
9 hg commit -m "0.0"
10 cat >>afile <<EOF
10 cat >>afile <<EOF
11 1
11 1
12 EOF
12 EOF
13 hg commit -m "0.1"
13 hg commit -m "0.1"
14 cat >>afile <<EOF
14 cat >>afile <<EOF
15 2
15 2
16 EOF
16 EOF
17 hg commit -m "0.2"
17 hg commit -m "0.2"
18 cat >>afile <<EOF
18 cat >>afile <<EOF
19 3
19 3
20 EOF
20 EOF
21 hg commit -m "0.3"
21 hg commit -m "0.3"
22 hg update -C 0
22 hg update -C 0
23 cat >>afile <<EOF
23 cat >>afile <<EOF
24 1
24 1
25 EOF
25 EOF
26 hg commit -m "1.1"
26 hg commit -m "1.1"
27 cat >>afile <<EOF
27 cat >>afile <<EOF
28 2
28 2
29 EOF
29 EOF
30 hg commit -m "1.2"
30 hg commit -m "1.2"
31 cat >fred <<EOF
31 cat >fred <<EOF
32 a line
32 a line
33 EOF
33 EOF
34 cat >>afile <<EOF
34 cat >>afile <<EOF
35 3
35 3
36 EOF
36 EOF
37 hg add fred
37 hg add fred
38 hg commit -m "1.3"
38 hg commit -m "1.3"
39 hg mv afile adifferentfile
39 hg mv afile adifferentfile
40 hg commit -m "1.3m"
40 hg commit -m "1.3m"
41 hg update -C 3
41 hg update -C 3
42 hg mv afile anotherfile
42 hg mv afile anotherfile
43 hg commit -m "0.3m"
43 hg commit -m "0.3m"
44 hg debugindex .hg/data/afile.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/00manifest.i
48 hg debugindex .hg/store/00manifest.i
49 hg verify
49 hg verify
50 cd ..
50 cd ..
51 for i in 0 1 2 3 4 5 6 7 8; do
51 for i in 0 1 2 3 4 5 6 7 8; do
52 hg clone -r "$i" test test-"$i"
52 hg clone -r "$i" test test-"$i"
53 cd test-"$i"
53 cd test-"$i"
54 hg verify
54 hg verify
55 cd ..
55 cd ..
56 done
56 done
57 cd test-8
57 cd test-8
58 hg pull ../test-7
58 hg pull ../test-7
59 hg verify
59 hg verify
@@ -1,14 +1,14
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init dir
3 hg init dir
4 cd dir
4 cd dir
5 echo bleh > bar
5 echo bleh > bar
6 hg add bar
6 hg add bar
7 hg ci -m 'add bar'
7 hg ci -m 'add bar'
8
8
9 hg cp bar foo
9 hg cp bar foo
10 echo >> bar
10 echo >> bar
11 hg ci -m 'cp bar foo; change bar'
11 hg ci -m 'cp bar foo; change bar'
12
12
13 hg debugrename foo
13 hg debugrename foo
14 hg debugindex .hg/data/bar.i
14 hg debugindex .hg/store/data/bar.i
@@ -1,30 +1,30
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4 echo a > a
4 echo a > a
5 hg add a
5 hg add a
6 hg commit -m "1" -d "1000000 0"
6 hg commit -m "1" -d "1000000 0"
7 hg status
7 hg status
8 hg copy a b
8 hg copy a b
9 hg status
9 hg status
10 hg --debug commit -m "2" -d "1000000 0"
10 hg --debug commit -m "2" -d "1000000 0"
11 echo "we should see two history entries"
11 echo "we should see two history entries"
12 hg history -v
12 hg history -v
13 echo "we should see one log entry for a"
13 echo "we should see one log entry for a"
14 hg log a
14 hg log a
15 echo "this should show a revision linked to changeset 0"
15 echo "this should show a revision linked to changeset 0"
16 hg debugindex .hg/data/a.i
16 hg debugindex .hg/store/data/a.i
17 echo "we should see one log entry for b"
17 echo "we should see one log entry for b"
18 hg log b
18 hg log b
19 echo "this should show a revision linked to changeset 1"
19 echo "this should show a revision linked to changeset 1"
20 hg debugindex .hg/data/b.i
20 hg debugindex .hg/store/data/b.i
21
21
22 echo "this should show the rename information in the metadata"
22 echo "this should show the rename information in the metadata"
23 hg debugdata .hg/data/b.d 0 | head -3 | tail -2
23 hg debugdata .hg/store/data/b.d 0 | head -3 | tail -2
24
24
25 $TESTDIR/md5sum.py .hg/data/b.i
25 $TESTDIR/md5sum.py .hg/store/data/b.i
26 hg cat b > bsum
26 hg cat b > bsum
27 $TESTDIR/md5sum.py bsum
27 $TESTDIR/md5sum.py bsum
28 hg cat a > asum
28 hg cat a > asum
29 $TESTDIR/md5sum.py asum
29 $TESTDIR/md5sum.py asum
30 hg verify
30 hg verify
@@ -1,51 +1,51
1 A b
1 A b
2 b
2 b
3 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
3 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
4 we should see two history entries
4 we should see two history entries
5 changeset: 1:386a3cc01532
5 changeset: 1:386a3cc01532
6 tag: tip
6 tag: tip
7 user: test
7 user: test
8 date: Mon Jan 12 13:46:40 1970 +0000
8 date: Mon Jan 12 13:46:40 1970 +0000
9 files: b
9 files: b
10 description:
10 description:
11 2
11 2
12
12
13
13
14 changeset: 0:33aaa84a386b
14 changeset: 0:33aaa84a386b
15 user: test
15 user: test
16 date: Mon Jan 12 13:46:40 1970 +0000
16 date: Mon Jan 12 13:46:40 1970 +0000
17 files: a
17 files: a
18 description:
18 description:
19 1
19 1
20
20
21
21
22 we should see one log entry for a
22 we should see one log entry for a
23 changeset: 0:33aaa84a386b
23 changeset: 0:33aaa84a386b
24 user: test
24 user: test
25 date: Mon Jan 12 13:46:40 1970 +0000
25 date: Mon Jan 12 13:46:40 1970 +0000
26 summary: 1
26 summary: 1
27
27
28 this should show a revision linked to changeset 0
28 this should show a revision linked to changeset 0
29 rev offset length base linkrev nodeid p1 p2
29 rev offset length base linkrev nodeid p1 p2
30 0 0 3 0 0 b789fdd96dc2 000000000000 000000000000
30 0 0 3 0 0 b789fdd96dc2 000000000000 000000000000
31 we should see one log entry for b
31 we should see one log entry for b
32 changeset: 1:386a3cc01532
32 changeset: 1:386a3cc01532
33 tag: tip
33 tag: tip
34 user: test
34 user: test
35 date: Mon Jan 12 13:46:40 1970 +0000
35 date: Mon Jan 12 13:46:40 1970 +0000
36 summary: 2
36 summary: 2
37
37
38 this should show a revision linked to changeset 1
38 this should show a revision linked to changeset 1
39 rev offset length base linkrev nodeid p1 p2
39 rev offset length base linkrev nodeid p1 p2
40 0 0 65 0 1 9a263dd772e0 000000000000 000000000000
40 0 0 65 0 1 9a263dd772e0 000000000000 000000000000
41 this should show the rename information in the metadata
41 this should show the rename information in the metadata
42 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
42 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
43 copy: a
43 copy: a
44 ed156f22f0a6fde642de0b5eba0cbbb2 .hg/data/b.i
44 ed156f22f0a6fde642de0b5eba0cbbb2 .hg/store/data/b.i
45 60b725f10c9c85c70d97880dfe8191b3 bsum
45 60b725f10c9c85c70d97880dfe8191b3 bsum
46 60b725f10c9c85c70d97880dfe8191b3 asum
46 60b725f10c9c85c70d97880dfe8191b3 asum
47 checking changesets
47 checking changesets
48 checking manifests
48 checking manifests
49 crosschecking files in changesets and manifests
49 crosschecking files in changesets and manifests
50 checking files
50 checking files
51 2 files, 2 changesets, 2 total revisions
51 2 files, 2 changesets, 2 total revisions
@@ -1,41 +1,41
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4 echo foo > foo
4 echo foo > foo
5 hg add foo
5 hg add foo
6 hg commit -m1 -d"0 0"
6 hg commit -m1 -d"0 0"
7
7
8 echo "# should show copy"
8 echo "# should show copy"
9 hg copy foo bar
9 hg copy foo bar
10 hg debugstate|grep '^copy'
10 hg debugstate|grep '^copy'
11
11
12 echo "# shouldn't show copy"
12 echo "# shouldn't show copy"
13 hg commit -m2 -d"0 0"
13 hg commit -m2 -d"0 0"
14 hg debugstate|grep '^copy'
14 hg debugstate|grep '^copy'
15
15
16 echo "# should match"
16 echo "# should match"
17 hg debugindex .hg/data/foo.i
17 hg debugindex .hg/store/data/foo.i
18 hg debugrename bar
18 hg debugrename bar
19
19
20 echo bleah > foo
20 echo bleah > foo
21 echo quux > bar
21 echo quux > bar
22 hg commit -m3 -d"0 0"
22 hg commit -m3 -d"0 0"
23
23
24 echo "# should not be renamed"
24 echo "# should not be renamed"
25 hg debugrename bar
25 hg debugrename bar
26
26
27 hg copy -f foo bar
27 hg copy -f foo bar
28 echo "# should show copy"
28 echo "# should show copy"
29 hg debugstate|grep '^copy'
29 hg debugstate|grep '^copy'
30 hg commit -m3 -d"0 0"
30 hg commit -m3 -d"0 0"
31
31
32 echo "# should show no parents for tip"
32 echo "# should show no parents for tip"
33 hg debugindex .hg/data/bar.i
33 hg debugindex .hg/store/data/bar.i
34 echo "# should match"
34 echo "# should match"
35 hg debugindex .hg/data/foo.i
35 hg debugindex .hg/store/data/foo.i
36 hg debugrename bar
36 hg debugrename bar
37
37
38 echo "# should show no copies"
38 echo "# should show no copies"
39 hg debugstate|grep '^copy'
39 hg debugstate|grep '^copy'
40
40
41 exit 0
41 exit 0
@@ -1,49 +1,49
1 #!/bin/sh
1 #!/bin/sh
2 #
2 #
3 # A B
3 # A B
4 #
4 #
5 # 3 4 3
5 # 3 4 3
6 # |\/| |\
6 # |\/| |\
7 # |/\| | \
7 # |/\| | \
8 # 1 2 1 2
8 # 1 2 1 2
9 # \ / \ /
9 # \ / \ /
10 # 0 0
10 # 0 0
11 #
11 #
12 # if the result of the merge of 1 and 2
12 # if the result of the merge of 1 and 2
13 # is the same in 3 and 4, no new manifest
13 # is the same in 3 and 4, no new manifest
14 # will be created and the manifest group
14 # will be created and the manifest group
15 # will be empty during the pull
15 # will be empty during the pull
16 #
16 #
17 # (plus we test a failure where outgoing
17 # (plus we test a failure where outgoing
18 # wrongly reported the number of csets)
18 # wrongly reported the number of csets)
19 #
19 #
20
20
21 hg init a
21 hg init a
22 cd a
22 cd a
23 touch init
23 touch init
24 hg ci -A -m 0 -d "1000000 0"
24 hg ci -A -m 0 -d "1000000 0"
25 touch x y
25 touch x y
26 hg ci -A -m 1 -d "1000000 0"
26 hg ci -A -m 1 -d "1000000 0"
27 hg update 0
27 hg update 0
28 touch x y
28 touch x y
29 hg ci -A -m 2 -d "1000000 0"
29 hg ci -A -m 2 -d "1000000 0"
30 hg merge 1
30 hg merge 1
31 hg ci -A -m m1 -d "1000000 0"
31 hg ci -A -m m1 -d "1000000 0"
32 #hg log
32 #hg log
33 #hg debugindex .hg/00manifest.i
33 #hg debugindex .hg/store/00manifest.i
34 hg update -C 1
34 hg update -C 1
35 hg merge 2
35 hg merge 2
36 hg ci -A -m m2 -d "1000000 0"
36 hg ci -A -m m2 -d "1000000 0"
37 #hg log
37 #hg log
38 #hg debugindex .hg/00manifest.i
38 #hg debugindex .hg/store/00manifest.i
39
39
40 cd ..
40 cd ..
41 hg clone -r 3 a b
41 hg clone -r 3 a b
42 hg clone -r 4 a c
42 hg clone -r 4 a c
43 hg -R a outgoing b
43 hg -R a outgoing b
44 hg -R a outgoing c
44 hg -R a outgoing c
45 hg -R b outgoing c
45 hg -R b outgoing c
46 hg -R c outgoing b
46 hg -R c outgoing b
47
47
48 hg -R b pull a
48 hg -R b pull a
49 hg -R c pull a
49 hg -R c pull a
@@ -1,34 +1,34
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4
4
5 cat > .hg/hgrc <<EOF
5 cat > .hg/hgrc <<EOF
6 [encode]
6 [encode]
7 *.gz = gunzip
7 *.gz = gunzip
8
8
9 [decode]
9 [decode]
10 *.gz = gzip
10 *.gz = gzip
11
11
12 EOF
12 EOF
13
13
14 echo "this is a test" | gzip > a.gz
14 echo "this is a test" | gzip > a.gz
15 hg add a.gz
15 hg add a.gz
16 hg ci -m "test" -d "1000000 0"
16 hg ci -m "test" -d "1000000 0"
17 echo %% no changes
17 echo %% no changes
18 hg status
18 hg status
19 touch a.gz
19 touch a.gz
20
20
21 echo %% no changes
21 echo %% no changes
22 hg status
22 hg status
23
23
24 echo %% uncompressed contents in repo
24 echo %% uncompressed contents in repo
25 hg debugdata .hg/data/a.gz.d 0
25 hg debugdata .hg/store/data/a.gz.d 0
26
26
27 echo %% uncompress our working dir copy
27 echo %% uncompress our working dir copy
28 gunzip < a.gz
28 gunzip < a.gz
29
29
30 rm a.gz
30 rm a.gz
31 hg co
31 hg co
32
32
33 echo %% uncompress our new working dir copy
33 echo %% uncompress our new working dir copy
34 gunzip < a.gz
34 gunzip < a.gz
@@ -1,46 +1,46
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4
4
5 echo foo > a
5 echo foo > a
6 echo foo > b
6 echo foo > b
7 hg add a b
7 hg add a b
8
8
9 hg ci -m "test" -d "1000000 0"
9 hg ci -m "test" -d "1000000 0"
10
10
11 echo blah > a
11 echo blah > a
12
12
13 hg ci -m "branch a" -d "1000000 0"
13 hg ci -m "branch a" -d "1000000 0"
14
14
15 hg co 0
15 hg co 0
16
16
17 echo blah > b
17 echo blah > b
18
18
19 hg ci -m "branch b" -d "1000000 0"
19 hg ci -m "branch b" -d "1000000 0"
20 HGMERGE=true hg merge 1
20 HGMERGE=true hg merge 1
21
21
22 hg ci -m "merge b/a -> blah" -d "1000000 0"
22 hg ci -m "merge b/a -> blah" -d "1000000 0"
23
23
24 hg co 1
24 hg co 1
25 HGMERGE=true hg merge 2
25 HGMERGE=true hg merge 2
26 hg ci -m "merge a/b -> blah" -d "1000000 0"
26 hg ci -m "merge a/b -> blah" -d "1000000 0"
27
27
28 hg log
28 hg log
29 hg debugindex .hg/00changelog.i
29 hg debugindex .hg/store/00changelog.i
30
30
31 echo
31 echo
32
32
33 echo 1
33 echo 1
34 hg manifest --debug 1
34 hg manifest --debug 1
35 echo 2
35 echo 2
36 hg manifest --debug 2
36 hg manifest --debug 2
37 echo 3
37 echo 3
38 hg manifest --debug 3
38 hg manifest --debug 3
39 echo 4
39 echo 4
40 hg manifest --debug 4
40 hg manifest --debug 4
41
41
42 echo
42 echo
43
43
44 hg debugindex .hg/data/a.i
44 hg debugindex .hg/store/data/a.i
45
45
46 hg verify
46 hg verify
@@ -1,79 +1,79
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test makes sure that we don't mark a file as merged with its ancestor
3 # This test makes sure that we don't mark a file as merged with its ancestor
4 # when we do a merge.
4 # when we do a merge.
5
5
6 cat <<'EOF' > merge
6 cat <<'EOF' > merge
7 #!/bin/sh
7 #!/bin/sh
8 echo merging for `basename $1`
8 echo merging for `basename $1`
9 EOF
9 EOF
10 chmod +x merge
10 chmod +x merge
11
11
12 echo creating base
12 echo creating base
13 hg init a
13 hg init a
14 cd a
14 cd a
15 echo 1 > foo
15 echo 1 > foo
16 echo 1 > bar
16 echo 1 > bar
17 echo 1 > baz
17 echo 1 > baz
18 echo 1 > quux
18 echo 1 > quux
19 hg add foo bar baz quux
19 hg add foo bar baz quux
20 hg commit -m "base" -d "1000000 0"
20 hg commit -m "base" -d "1000000 0"
21
21
22 cd ..
22 cd ..
23 hg clone a b
23 hg clone a b
24
24
25 echo creating branch a
25 echo creating branch a
26 cd a
26 cd a
27 echo 2a > foo
27 echo 2a > foo
28 echo 2a > bar
28 echo 2a > bar
29 hg commit -m "branch a" -d "1000000 0"
29 hg commit -m "branch a" -d "1000000 0"
30
30
31 echo creating branch b
31 echo creating branch b
32
32
33 cd ..
33 cd ..
34 cd b
34 cd b
35 echo 2b > foo
35 echo 2b > foo
36 echo 2b > baz
36 echo 2b > baz
37 hg commit -m "branch b" -d "1000000 0"
37 hg commit -m "branch b" -d "1000000 0"
38
38
39 echo "we shouldn't have anything but n state here"
39 echo "we shouldn't have anything but n state here"
40 hg debugstate | cut -b 1-16,35-
40 hg debugstate | cut -b 1-16,35-
41
41
42 echo merging
42 echo merging
43 hg pull ../a
43 hg pull ../a
44 env HGMERGE=../merge hg merge -v
44 env HGMERGE=../merge hg merge -v
45
45
46 echo 2m > foo
46 echo 2m > foo
47 echo 2b > baz
47 echo 2b > baz
48 echo new > quux
48 echo new > quux
49
49
50 echo "we shouldn't have anything but foo in merge state here"
50 echo "we shouldn't have anything but foo in merge state here"
51 hg debugstate | cut -b 1-16,35- | grep "^m"
51 hg debugstate | cut -b 1-16,35- | grep "^m"
52
52
53 hg ci -m "merge" -d "1000000 0"
53 hg ci -m "merge" -d "1000000 0"
54
54
55 echo "main: we should have a merge here"
55 echo "main: we should have a merge here"
56 hg debugindex .hg/00changelog.i
56 hg debugindex .hg/store/00changelog.i
57
57
58 echo "log should show foo and quux changed"
58 echo "log should show foo and quux changed"
59 hg log -v -r tip
59 hg log -v -r tip
60
60
61 echo "foo: we should have a merge here"
61 echo "foo: we should have a merge here"
62 hg debugindex .hg/data/foo.i
62 hg debugindex .hg/store/data/foo.i
63
63
64 echo "bar: we shouldn't have a merge here"
64 echo "bar: we shouldn't have a merge here"
65 hg debugindex .hg/data/bar.i
65 hg debugindex .hg/store/data/bar.i
66
66
67 echo "baz: we shouldn't have a merge here"
67 echo "baz: we shouldn't have a merge here"
68 hg debugindex .hg/data/baz.i
68 hg debugindex .hg/store/data/baz.i
69
69
70 echo "quux: we shouldn't have a merge here"
70 echo "quux: we shouldn't have a merge here"
71 hg debugindex .hg/data/quux.i
71 hg debugindex .hg/store/data/quux.i
72
72
73 echo "manifest entries should match tips of all files"
73 echo "manifest entries should match tips of all files"
74 hg manifest --debug
74 hg manifest --debug
75
75
76 echo "everything should be clean now"
76 echo "everything should be clean now"
77 hg status
77 hg status
78
78
79 hg verify
79 hg verify
@@ -1,48 +1,48
1 #!/bin/sh -e
1 #!/bin/sh -e
2
2
3 umask 027
3 umask 027
4 mkdir test1
4 mkdir test1
5 cd test1
5 cd test1
6
6
7 hg init
7 hg init
8 touch a b
8 touch a b
9 hg add a b
9 hg add a b
10 hg ci -m "added a b" -d "1000000 0"
10 hg ci -m "added a b" -d "1000000 0"
11
11
12 cd ..
12 cd ..
13 hg clone test1 test3
13 hg clone test1 test3
14 mkdir test2
14 mkdir test2
15 cd test2
15 cd test2
16
16
17 hg init
17 hg init
18 hg pull ../test1
18 hg pull ../test1
19 hg co
19 hg co
20 chmod +x a
20 chmod +x a
21 hg ci -m "chmod +x a" -d "1000000 0"
21 hg ci -m "chmod +x a" -d "1000000 0"
22
22
23 cd ../test1
23 cd ../test1
24 echo 123 >>a
24 echo 123 >>a
25 hg ci -m "a updated" -d "1000000 0"
25 hg ci -m "a updated" -d "1000000 0"
26
26
27 hg pull ../test2
27 hg pull ../test2
28 hg heads
28 hg heads
29 hg history
29 hg history
30
30
31 hg -v merge
31 hg -v merge
32
32
33 cd ../test3
33 cd ../test3
34 echo 123 >>b
34 echo 123 >>b
35 hg ci -m "b updated" -d "1000000 0"
35 hg ci -m "b updated" -d "1000000 0"
36
36
37 hg pull ../test2
37 hg pull ../test2
38 hg heads
38 hg heads
39 hg history
39 hg history
40
40
41 hg -v merge
41 hg -v merge
42
42
43 ls -l ../test[123]/a > foo
43 ls -l ../test[123]/a > foo
44 cut -b 1-10 < foo
44 cut -b 1-10 < foo
45
45
46 hg debugindex .hg/data/a.i
46 hg debugindex .hg/store/data/a.i
47 hg debugindex ../test2/.hg/data/a.i
47 hg debugindex ../test2/.hg/store/data/a.i
48 hg debugindex ../test1/.hg/data/a.i
48 hg debugindex ../test1/.hg/store/data/a.i
@@ -1,78 +1,78
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init remote
3 hg init remote
4 cd remote
4 cd remote
5 echo "# creating 'remote'"
5 echo "# creating 'remote'"
6 cat >>afile <<EOF
6 cat >>afile <<EOF
7 0
7 0
8 EOF
8 EOF
9 hg add afile
9 hg add afile
10 hg commit -m "0.0"
10 hg commit -m "0.0"
11 cat >>afile <<EOF
11 cat >>afile <<EOF
12 1
12 1
13 EOF
13 EOF
14 hg commit -m "0.1"
14 hg commit -m "0.1"
15 cat >>afile <<EOF
15 cat >>afile <<EOF
16 2
16 2
17 EOF
17 EOF
18 hg commit -m "0.2"
18 hg commit -m "0.2"
19 cat >>afile <<EOF
19 cat >>afile <<EOF
20 3
20 3
21 EOF
21 EOF
22 hg commit -m "0.3"
22 hg commit -m "0.3"
23 hg update -C 0
23 hg update -C 0
24 cat >>afile <<EOF
24 cat >>afile <<EOF
25 1
25 1
26 EOF
26 EOF
27 hg commit -m "1.1"
27 hg commit -m "1.1"
28 cat >>afile <<EOF
28 cat >>afile <<EOF
29 2
29 2
30 EOF
30 EOF
31 hg commit -m "1.2"
31 hg commit -m "1.2"
32 cat >fred <<EOF
32 cat >fred <<EOF
33 a line
33 a line
34 EOF
34 EOF
35 cat >>afile <<EOF
35 cat >>afile <<EOF
36 3
36 3
37 EOF
37 EOF
38 hg add fred
38 hg add fred
39 hg commit -m "1.3"
39 hg commit -m "1.3"
40 hg mv afile adifferentfile
40 hg mv afile adifferentfile
41 hg commit -m "1.3m"
41 hg commit -m "1.3m"
42 hg update -C 3
42 hg update -C 3
43 hg mv afile anotherfile
43 hg mv afile anotherfile
44 hg commit -m "0.3m"
44 hg commit -m "0.3m"
45 hg debugindex .hg/data/afile.i
45 hg debugindex .hg/store/data/afile.i
46 hg debugindex .hg/data/adifferentfile.i
46 hg debugindex .hg/store/data/adifferentfile.i
47 hg debugindex .hg/data/anotherfile.i
47 hg debugindex .hg/store/data/anotherfile.i
48 hg debugindex .hg/data/fred.i
48 hg debugindex .hg/store/data/fred.i
49 hg debugindex .hg/00manifest.i
49 hg debugindex .hg/store/00manifest.i
50 hg verify
50 hg verify
51 echo "# Starting server"
51 echo "# Starting server"
52 hg serve -p 20061 -d --pid-file=../hg1.pid
52 hg serve -p 20061 -d --pid-file=../hg1.pid
53 cd ..
53 cd ..
54 cat hg1.pid >> $DAEMON_PIDS
54 cat hg1.pid >> $DAEMON_PIDS
55
55
56 echo "# clone remote via stream"
56 echo "# clone remote via stream"
57 for i in 0 1 2 3 4 5 6 7 8; do
57 for i in 0 1 2 3 4 5 6 7 8; do
58 hg clone -r "$i" http://localhost:20061/ test-"$i" 2>&1
58 hg clone -r "$i" http://localhost:20061/ test-"$i" 2>&1
59 if cd test-"$i"; then
59 if cd test-"$i"; then
60 hg verify
60 hg verify
61 cd ..
61 cd ..
62 fi
62 fi
63 done
63 done
64 cd test-8
64 cd test-8
65 hg pull ../test-7
65 hg pull ../test-7
66 hg verify
66 hg verify
67 cd ..
67 cd ..
68 cd test-1
68 cd test-1
69 hg pull -r 4 http://localhost:20061/ 2>&1
69 hg pull -r 4 http://localhost:20061/ 2>&1
70 hg verify
70 hg verify
71 hg pull http://localhost:20061/ 2>&1
71 hg pull http://localhost:20061/ 2>&1
72 cd ..
72 cd ..
73 cd test-2
73 cd test-2
74 hg pull -r 5 http://localhost:20061/ 2>&1
74 hg pull -r 5 http://localhost:20061/ 2>&1
75 hg verify
75 hg verify
76 hg pull http://localhost:20061/ 2>&1
76 hg pull http://localhost:20061/ 2>&1
77 hg verify
77 hg verify
78 cd ..
78 cd ..
@@ -1,16 +1,16
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4 mkfifo p
4 mkfifo p
5
5
6 hg serve --stdio < p &
6 hg serve --stdio < p &
7 P=$!
7 P=$!
8 (echo lock; echo addchangegroup; sleep 5) > p &
8 (echo lock; echo addchangegroup; sleep 5) > p &
9 Q=$!
9 Q=$!
10 sleep 3
10 sleep 3
11 kill -HUP $P
11 kill -HUP $P
12 wait
12 wait
13 ls .hg
13 ls -R .hg
14
14
15
15
16
16
@@ -1,8 +1,14
1 0
1 0
2 0
2 0
3 adding changesets
3 adding changesets
4 killed!
4 killed!
5 transaction abort!
5 transaction abort!
6 rollback completed
6 rollback completed
7 .hg:
7 00changelog.i
8 00changelog.i
8 journal.dirstate
9 journal.dirstate
10 requires
11 store
12
13 .hg/store:
14 00changelog.i
@@ -1,11 +1,11
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init a
3 hg init a
4 echo a > a/a
4 echo a > a/a
5 hg --cwd a ci -A -m a
5 hg --cwd a ci -A -m a
6 hg clone a b
6 hg clone a b
7 echo b > b/b
7 echo b > b/b
8 hg --cwd b ci -A -m b
8 hg --cwd b ci -A -m b
9 chmod 100 a/.hg
9 chmod 100 a/.hg/store
10 hg --cwd b push ../a
10 hg --cwd b push ../a
11 chmod 700 a/.hg
11 chmod 700 a/.hg/store
@@ -1,66 +1,66
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # initial
3 # initial
4 hg init test-a
4 hg init test-a
5 cd test-a
5 cd test-a
6 cat >test.txt <<"EOF"
6 cat >test.txt <<"EOF"
7 1
7 1
8 2
8 2
9 3
9 3
10 EOF
10 EOF
11 hg add test.txt
11 hg add test.txt
12 hg commit -m "Initial" -d "1000000 0"
12 hg commit -m "Initial" -d "1000000 0"
13
13
14 # clone
14 # clone
15 cd ..
15 cd ..
16 hg clone test-a test-b
16 hg clone test-a test-b
17
17
18 # change test-a
18 # change test-a
19 cd test-a
19 cd test-a
20 cat >test.txt <<"EOF"
20 cat >test.txt <<"EOF"
21 one
21 one
22 two
22 two
23 three
23 three
24 EOF
24 EOF
25 hg commit -m "Numbers as words" -d "1000000 0"
25 hg commit -m "Numbers as words" -d "1000000 0"
26
26
27 # change test-b
27 # change test-b
28 cd ../test-b
28 cd ../test-b
29 cat >test.txt <<"EOF"
29 cat >test.txt <<"EOF"
30 1
30 1
31 2.5
31 2.5
32 3
32 3
33 EOF
33 EOF
34 hg commit -m "2 -> 2.5" -d "1000000 0"
34 hg commit -m "2 -> 2.5" -d "1000000 0"
35
35
36 # now pull and merge from test-a
36 # now pull and merge from test-a
37 hg pull ../test-a
37 hg pull ../test-a
38 HGMERGE=merge hg merge
38 HGMERGE=merge hg merge
39 # resolve conflict
39 # resolve conflict
40 cat >test.txt <<"EOF"
40 cat >test.txt <<"EOF"
41 one
41 one
42 two-point-five
42 two-point-five
43 three
43 three
44 EOF
44 EOF
45 rm -f *.orig
45 rm -f *.orig
46 hg commit -m "Merge 1" -d "1000000 0"
46 hg commit -m "Merge 1" -d "1000000 0"
47
47
48 # change test-a again
48 # change test-a again
49 cd ../test-a
49 cd ../test-a
50 cat >test.txt <<"EOF"
50 cat >test.txt <<"EOF"
51 one
51 one
52 two-point-one
52 two-point-one
53 three
53 three
54 EOF
54 EOF
55 hg commit -m "two -> two-point-one" -d "1000000 0"
55 hg commit -m "two -> two-point-one" -d "1000000 0"
56
56
57 # pull and merge from test-a again
57 # pull and merge from test-a again
58 cd ../test-b
58 cd ../test-b
59 hg pull ../test-a
59 hg pull ../test-a
60 HGMERGE=merge hg merge --debug
60 HGMERGE=merge hg merge --debug
61
61
62 cat test.txt | sed "s% .*%%"
62 cat test.txt | sed "s% .*%%"
63
63
64 hg debugindex .hg/data/test.txt.i
64 hg debugindex .hg/store/data/test.txt.i
65
65
66 hg log
66 hg log
@@ -1,52 +1,52
1 #!/bin/sh
1 #!/bin/sh
2 #
2 #
3 # revlog.parseindex must be able to parse the index file even if
3 # revlog.parseindex must be able to parse the index file even if
4 # an index entry is split between two 64k blocks. The ideal test
4 # an index entry is split between two 64k blocks. The ideal test
5 # would be to create an index file with inline data where
5 # would be to create an index file with inline data where
6 # 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
6 # 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
7 # the size of an index entry) and with an index entry starting right
7 # the size of an index entry) and with an index entry starting right
8 # before the 64k block boundary, and try to read it.
8 # before the 64k block boundary, and try to read it.
9 #
9 #
10 # We approximate that by reducing the read buffer to 1 byte.
10 # We approximate that by reducing the read buffer to 1 byte.
11 #
11 #
12
12
13 hg init a
13 hg init a
14 cd a
14 cd a
15 echo abc > foo
15 echo abc > foo
16 hg add foo
16 hg add foo
17 hg commit -m 'add foo' -d '1000000 0'
17 hg commit -m 'add foo' -d '1000000 0'
18
18
19 echo >> foo
19 echo >> foo
20 hg commit -m 'change foo' -d '1000001 0'
20 hg commit -m 'change foo' -d '1000001 0'
21 hg log -r 0:
21 hg log -r 0:
22
22
23 cat >> test.py << EOF
23 cat >> test.py << EOF
24 from mercurial import changelog, util
24 from mercurial import changelog, util
25 from mercurial.node import *
25 from mercurial.node import *
26
26
27 class singlebyteread(object):
27 class singlebyteread(object):
28 def __init__(self, real):
28 def __init__(self, real):
29 self.real = real
29 self.real = real
30
30
31 def read(self, size=-1):
31 def read(self, size=-1):
32 if size == 65536:
32 if size == 65536:
33 size = 1
33 size = 1
34 return self.real.read(size)
34 return self.real.read(size)
35
35
36 def __getattr__(self, key):
36 def __getattr__(self, key):
37 return getattr(self.real, key)
37 return getattr(self.real, key)
38
38
39 def opener(*args):
39 def opener(*args):
40 o = util.opener(*args)
40 o = util.opener(*args)
41 def wrapper(*a):
41 def wrapper(*a):
42 f = o(*a)
42 f = o(*a)
43 return singlebyteread(f)
43 return singlebyteread(f)
44 return wrapper
44 return wrapper
45
45
46 cl = changelog.changelog(opener('.hg'))
46 cl = changelog.changelog(opener('.hg/store'))
47 print cl.count(), 'revisions:'
47 print cl.count(), 'revisions:'
48 for r in xrange(cl.count()):
48 for r in xrange(cl.count()):
49 print short(cl.node(r))
49 print short(cl.node(r))
50 EOF
50 EOF
51
51
52 python test.py
52 python test.py
@@ -1,15 +1,15
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4 echo foo > a
4 echo foo > a
5 hg add a
5 hg add a
6 hg commit -m "1" -d "1000000 0"
6 hg commit -m "1" -d "1000000 0"
7 hg verify
7 hg verify
8 chmod -r .hg/data/a.i
8 chmod -r .hg/store/data/a.i
9 hg verify 2>/dev/null || echo verify failed
9 hg verify 2>/dev/null || echo verify failed
10 chmod +r .hg/data/a.i
10 chmod +r .hg/store/data/a.i
11 hg verify 2>/dev/null || echo verify failed
11 hg verify 2>/dev/null || echo verify failed
12 chmod -w .hg/data/a.i
12 chmod -w .hg/store/data/a.i
13 echo barber > a
13 echo barber > a
14 hg commit -m "2" -d "1000000 0" 2>/dev/null || echo commit failed
14 hg commit -m "2" -d "1000000 0" 2>/dev/null || echo commit failed
15
15
@@ -1,19 +1,19
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir a
3 mkdir a
4 cd a
4 cd a
5 hg init
5 hg init
6 echo foo > b
6 echo foo > b
7 hg add b
7 hg add b
8 hg ci -m "b" -d "1000000 0"
8 hg ci -m "b" -d "1000000 0"
9
9
10 chmod -w .hg
10 chmod -w .hg/store
11
11
12 cd ..
12 cd ..
13
13
14 hg clone a b
14 hg clone a b
15
15
16 chmod +w a/.hg # let test clean up
16 chmod +w a/.hg/store # let test clean up
17
17
18 cd b
18 cd b
19 hg verify
19 hg verify
@@ -1,61 +1,61
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init test
3 hg init test
4 cd test
4 cd test
5 cat >>afile <<EOF
5 cat >>afile <<EOF
6 0
6 0
7 EOF
7 EOF
8 hg add afile
8 hg add afile
9 hg commit -m "0.0"
9 hg commit -m "0.0"
10 cat >>afile <<EOF
10 cat >>afile <<EOF
11 1
11 1
12 EOF
12 EOF
13 hg commit -m "0.1"
13 hg commit -m "0.1"
14 cat >>afile <<EOF
14 cat >>afile <<EOF
15 2
15 2
16 EOF
16 EOF
17 hg commit -m "0.2"
17 hg commit -m "0.2"
18 cat >>afile <<EOF
18 cat >>afile <<EOF
19 3
19 3
20 EOF
20 EOF
21 hg commit -m "0.3"
21 hg commit -m "0.3"
22 hg update -C 0
22 hg update -C 0
23 cat >>afile <<EOF
23 cat >>afile <<EOF
24 1
24 1
25 EOF
25 EOF
26 hg commit -m "1.1"
26 hg commit -m "1.1"
27 cat >>afile <<EOF
27 cat >>afile <<EOF
28 2
28 2
29 EOF
29 EOF
30 hg commit -m "1.2"
30 hg commit -m "1.2"
31 cat >fred <<EOF
31 cat >fred <<EOF
32 a line
32 a line
33 EOF
33 EOF
34 cat >>afile <<EOF
34 cat >>afile <<EOF
35 3
35 3
36 EOF
36 EOF
37 hg add fred
37 hg add fred
38 hg commit -m "1.3"
38 hg commit -m "1.3"
39 hg mv afile adifferentfile
39 hg mv afile adifferentfile
40 hg commit -m "1.3m"
40 hg commit -m "1.3m"
41 hg update -C 3
41 hg update -C 3
42 hg mv afile anotherfile
42 hg mv afile anotherfile
43 hg commit -m "0.3m"
43 hg commit -m "0.3m"
44 hg debugindex .hg/data/afile.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/00manifest.i
48 hg debugindex .hg/store/00manifest.i
49 hg verify
49 hg verify
50 cd ..
50 cd ..
51 for i in 0 1 2 3 4 5 6 7 8; do
51 for i in 0 1 2 3 4 5 6 7 8; do
52 mkdir test-"$i"
52 mkdir test-"$i"
53 hg --cwd test-"$i" init
53 hg --cwd test-"$i" init
54 hg -R test push -r "$i" test-"$i"
54 hg -R test push -r "$i" test-"$i"
55 cd test-"$i"
55 cd test-"$i"
56 hg verify
56 hg verify
57 cd ..
57 cd ..
58 done
58 done
59 cd test-8
59 cd test-8
60 hg pull ../test-7
60 hg pull ../test-7
61 hg verify
61 hg verify
@@ -1,27 +1,27
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 echo "[merge]" >> .hg/hgrc
6 echo "[merge]" >> .hg/hgrc
7 echo "followcopies = 1" >> .hg/hgrc
7 echo "followcopies = 1" >> .hg/hgrc
8 echo foo > a
8 echo foo > a
9 echo foo > a2
9 echo foo > a2
10 hg add a a2
10 hg add a a2
11 hg ci -m "start" -d "0 0"
11 hg ci -m "start" -d "0 0"
12 hg mv a b
12 hg mv a b
13 hg mv a2 b2
13 hg mv a2 b2
14 hg ci -m "rename" -d "0 0"
14 hg ci -m "rename" -d "0 0"
15 echo "checkout"
15 echo "checkout"
16 hg co 0
16 hg co 0
17 echo blahblah > a
17 echo blahblah > a
18 echo blahblah > a2
18 echo blahblah > a2
19 hg mv a2 c2
19 hg mv a2 c2
20 hg ci -m "modify" -d "0 0"
20 hg ci -m "modify" -d "0 0"
21 echo "merge"
21 echo "merge"
22 hg merge -y --debug
22 hg merge -y --debug
23 hg status -AC
23 hg status -AC
24 cat b
24 cat b
25 hg ci -m "merge" -d "0 0"
25 hg ci -m "merge" -d "0 0"
26 hg debugindex .hg/data/b.i
26 hg debugindex .hg/store/data/b.i
27 hg debugrename b No newline at end of file
27 hg debugrename b
@@ -1,99 +1,100
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test tries to exercise the ssh functionality with a dummy script
3 # This test tries to exercise the ssh functionality with a dummy script
4
4
5 cat <<'EOF' > dummyssh
5 cat <<'EOF' > dummyssh
6 #!/bin/sh
6 #!/bin/sh
7 # this attempts to deal with relative pathnames
7 # this attempts to deal with relative pathnames
8 cd `dirname $0`
8 cd `dirname $0`
9
9
10 # check for proper args
10 # check for proper args
11 if [ $1 != "user@dummy" ] ; then
11 if [ $1 != "user@dummy" ] ; then
12 exit -1
12 exit -1
13 fi
13 fi
14
14
15 # check that we're in the right directory
15 # check that we're in the right directory
16 if [ ! -x dummyssh ] ; then
16 if [ ! -x dummyssh ] ; then
17 exit -1
17 exit -1
18 fi
18 fi
19
19
20 SSH_CLIENT='127.0.0.1 1 2'
20 SSH_CLIENT='127.0.0.1 1 2'
21 export SSH_CLIENT
21 export SSH_CLIENT
22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
23 $2
23 $2
24 EOF
24 EOF
25 chmod +x dummyssh
25 chmod +x dummyssh
26
26
27 echo "# creating 'remote'"
27 echo "# creating 'remote'"
28 hg init remote
28 hg init remote
29 cd remote
29 cd remote
30 echo this > foo
30 echo this > foo
31 hg ci -A -m "init" -d "1000000 0" foo
31 echo this > fooO
32 hg ci -A -m "init" -d "1000000 0" foo fooO
32 echo '[server]' > .hg/hgrc
33 echo '[server]' > .hg/hgrc
33 echo 'uncompressed = True' >> .hg/hgrc
34 echo 'uncompressed = True' >> .hg/hgrc
34 echo '[hooks]' >> .hg/hgrc
35 echo '[hooks]' >> .hg/hgrc
35 echo 'changegroup = echo changegroup in remote: u=$HG_URL >> ../dummylog' >> .hg/hgrc
36 echo 'changegroup = echo changegroup in remote: u=$HG_URL >> ../dummylog' >> .hg/hgrc
36
37
37 cd ..
38 cd ..
38
39
39 echo "# repo not found error"
40 echo "# repo not found error"
40 hg clone -e ./dummyssh ssh://user@dummy/nonexistent local
41 hg clone -e ./dummyssh ssh://user@dummy/nonexistent local
41
42
42 echo "# clone remote via stream"
43 echo "# clone remote via stream"
43 hg clone -e ./dummyssh --uncompressed ssh://user@dummy/remote local-stream 2>&1 | \
44 hg clone -e ./dummyssh --uncompressed ssh://user@dummy/remote local-stream 2>&1 | \
44 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
45 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
45 cd local-stream
46 cd local-stream
46 hg verify
47 hg verify
47 cd ..
48 cd ..
48
49
49 echo "# clone remote via pull"
50 echo "# clone remote via pull"
50 hg clone -e ./dummyssh ssh://user@dummy/remote local
51 hg clone -e ./dummyssh ssh://user@dummy/remote local
51
52
52 echo "# verify"
53 echo "# verify"
53 cd local
54 cd local
54 hg verify
55 hg verify
55
56
56 echo '[hooks]' >> .hg/hgrc
57 echo '[hooks]' >> .hg/hgrc
57 echo 'changegroup = echo changegroup in local: u=$HG_URL >> ../dummylog' >> .hg/hgrc
58 echo 'changegroup = echo changegroup in local: u=$HG_URL >> ../dummylog' >> .hg/hgrc
58
59
59 echo "# empty default pull"
60 echo "# empty default pull"
60 hg paths
61 hg paths
61 hg pull -e ../dummyssh
62 hg pull -e ../dummyssh
62
63
63 echo "# local change"
64 echo "# local change"
64 echo bleah > foo
65 echo bleah > foo
65 hg ci -m "add" -d "1000000 0"
66 hg ci -m "add" -d "1000000 0"
66
67
67 echo "# updating rc"
68 echo "# updating rc"
68 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
69 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
69 echo "[ui]" >> .hg/hgrc
70 echo "[ui]" >> .hg/hgrc
70 echo "ssh = ../dummyssh" >> .hg/hgrc
71 echo "ssh = ../dummyssh" >> .hg/hgrc
71
72
72 echo "# find outgoing"
73 echo "# find outgoing"
73 hg out ssh://user@dummy/remote
74 hg out ssh://user@dummy/remote
74
75
75 echo "# find incoming on the remote side"
76 echo "# find incoming on the remote side"
76 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
77 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
77
78
78 echo "# push"
79 echo "# push"
79 hg push
80 hg push
80
81
81 cd ../remote
82 cd ../remote
82
83
83 echo "# check remote tip"
84 echo "# check remote tip"
84 hg tip
85 hg tip
85 hg verify
86 hg verify
86 hg cat -r tip foo
87 hg cat -r tip foo
87
88
88 echo z > z
89 echo z > z
89 hg ci -A -m z -d '1000001 0' z
90 hg ci -A -m z -d '1000001 0' z
90
91
91 cd ../local
92 cd ../local
92 echo r > r
93 echo r > r
93 hg ci -A -m z -d '1000002 0' r
94 hg ci -A -m z -d '1000002 0' r
94
95
95 echo "# push should succeed"
96 echo "# push should succeed"
96 hg push
97 hg push
97
98
98 cd ..
99 cd ..
99 cat dummylog
100 cat dummylog
@@ -1,99 +1,99
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test tries to exercise the ssh functionality with a dummy script
3 # This test tries to exercise the ssh functionality with a dummy script
4
4
5 cat <<'EOF' > dummyssh
5 cat <<'EOF' > dummyssh
6 #!/bin/sh
6 #!/bin/sh
7 # this attempts to deal with relative pathnames
7 # this attempts to deal with relative pathnames
8 cd `dirname $0`
8 cd `dirname $0`
9
9
10 # check for proper args
10 # check for proper args
11 if [ $1 != "user@dummy" ] ; then
11 if [ $1 != "user@dummy" ] ; then
12 exit -1
12 exit -1
13 fi
13 fi
14
14
15 # check that we're in the right directory
15 # check that we're in the right directory
16 if [ ! -x dummyssh ] ; then
16 if [ ! -x dummyssh ] ; then
17 exit -1
17 exit -1
18 fi
18 fi
19
19
20 SSH_CLIENT='127.0.0.1 1 2'
20 SSH_CLIENT='127.0.0.1 1 2'
21 export SSH_CLIENT
21 export SSH_CLIENT
22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
23 $2
23 $2
24 EOF
24 EOF
25 chmod +x dummyssh
25 chmod +x dummyssh
26
26
27 hg init remote
27 hg init remote
28 cd remote
28 cd remote
29 echo "# creating 'remote'"
29 echo "# creating 'remote'"
30 cat >>afile <<EOF
30 cat >>afile <<EOF
31 0
31 0
32 EOF
32 EOF
33 hg add afile
33 hg add afile
34 hg commit -m "0.0"
34 hg commit -m "0.0"
35 cat >>afile <<EOF
35 cat >>afile <<EOF
36 1
36 1
37 EOF
37 EOF
38 hg commit -m "0.1"
38 hg commit -m "0.1"
39 cat >>afile <<EOF
39 cat >>afile <<EOF
40 2
40 2
41 EOF
41 EOF
42 hg commit -m "0.2"
42 hg commit -m "0.2"
43 cat >>afile <<EOF
43 cat >>afile <<EOF
44 3
44 3
45 EOF
45 EOF
46 hg commit -m "0.3"
46 hg commit -m "0.3"
47 hg update -C 0
47 hg update -C 0
48 cat >>afile <<EOF
48 cat >>afile <<EOF
49 1
49 1
50 EOF
50 EOF
51 hg commit -m "1.1"
51 hg commit -m "1.1"
52 cat >>afile <<EOF
52 cat >>afile <<EOF
53 2
53 2
54 EOF
54 EOF
55 hg commit -m "1.2"
55 hg commit -m "1.2"
56 cat >fred <<EOF
56 cat >fred <<EOF
57 a line
57 a line
58 EOF
58 EOF
59 cat >>afile <<EOF
59 cat >>afile <<EOF
60 3
60 3
61 EOF
61 EOF
62 hg add fred
62 hg add fred
63 hg commit -m "1.3"
63 hg commit -m "1.3"
64 hg mv afile adifferentfile
64 hg mv afile adifferentfile
65 hg commit -m "1.3m"
65 hg commit -m "1.3m"
66 hg update -C 3
66 hg update -C 3
67 hg mv afile anotherfile
67 hg mv afile anotherfile
68 hg commit -m "0.3m"
68 hg commit -m "0.3m"
69 hg debugindex .hg/data/afile.i
69 hg debugindex .hg/store/data/afile.i
70 hg debugindex .hg/data/adifferentfile.i
70 hg debugindex .hg/store/data/adifferentfile.i
71 hg debugindex .hg/data/anotherfile.i
71 hg debugindex .hg/store/data/anotherfile.i
72 hg debugindex .hg/data/fred.i
72 hg debugindex .hg/store/data/fred.i
73 hg debugindex .hg/00manifest.i
73 hg debugindex .hg/store/00manifest.i
74 hg verify
74 hg verify
75 cd ..
75 cd ..
76
76
77 echo "# clone remote via stream"
77 echo "# clone remote via stream"
78 for i in 0 1 2 3 4 5 6 7 8; do
78 for i in 0 1 2 3 4 5 6 7 8; do
79 hg clone -e ./dummyssh --uncompressed -r "$i" ssh://user@dummy/remote test-"$i" 2>&1
79 hg clone -e ./dummyssh --uncompressed -r "$i" ssh://user@dummy/remote test-"$i" 2>&1
80 if cd test-"$i"; then
80 if cd test-"$i"; then
81 hg verify
81 hg verify
82 cd ..
82 cd ..
83 fi
83 fi
84 done
84 done
85 cd test-8
85 cd test-8
86 hg pull ../test-7
86 hg pull ../test-7
87 hg verify
87 hg verify
88 cd ..
88 cd ..
89 cd test-1
89 cd test-1
90 hg pull -e ../dummyssh -r 4 ssh://user@dummy/remote 2>&1
90 hg pull -e ../dummyssh -r 4 ssh://user@dummy/remote 2>&1
91 hg verify
91 hg verify
92 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
92 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
93 cd ..
93 cd ..
94 cd test-2
94 cd test-2
95 hg pull -e ../dummyssh -r 5 ssh://user@dummy/remote 2>&1
95 hg pull -e ../dummyssh -r 5 ssh://user@dummy/remote 2>&1
96 hg verify
96 hg verify
97 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
97 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
98 hg verify
98 hg verify
99 cd ..
99 cd ..
@@ -1,88 +1,88
1 # creating 'remote'
1 # creating 'remote'
2 # repo not found error
2 # repo not found error
3 remote: abort: repository nonexistent not found!
3 remote: abort: repository nonexistent not found!
4 abort: no suitable response from remote hg!
4 abort: no suitable response from remote hg!
5 # clone remote via stream
5 # clone remote via stream
6 streaming all changes
6 streaming all changes
7 XXX files to transfer, XXX bytes of data
7 XXX files to transfer, XXX bytes of data
8 transferred XXX bytes in XXX seconds (XXX XB/sec)
8 transferred XXX bytes in XXX seconds (XXX XB/sec)
9 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
9 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
10 checking changesets
10 checking changesets
11 checking manifests
11 checking manifests
12 crosschecking files in changesets and manifests
12 crosschecking files in changesets and manifests
13 checking files
13 checking files
14 1 files, 1 changesets, 1 total revisions
14 2 files, 1 changesets, 2 total revisions
15 # clone remote via pull
15 # clone remote via pull
16 requesting all changes
16 requesting all changes
17 adding changesets
17 adding changesets
18 adding manifests
18 adding manifests
19 adding file changes
19 adding file changes
20 added 1 changesets with 1 changes to 1 files
20 added 1 changesets with 2 changes to 2 files
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
21 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 # verify
22 # verify
23 checking changesets
23 checking changesets
24 checking manifests
24 checking manifests
25 crosschecking files in changesets and manifests
25 crosschecking files in changesets and manifests
26 checking files
26 checking files
27 1 files, 1 changesets, 1 total revisions
27 2 files, 1 changesets, 2 total revisions
28 # empty default pull
28 # empty default pull
29 default = ssh://user@dummy/remote
29 default = ssh://user@dummy/remote
30 pulling from ssh://user@dummy/remote
30 pulling from ssh://user@dummy/remote
31 searching for changes
31 searching for changes
32 no changes found
32 no changes found
33 # local change
33 # local change
34 # updating rc
34 # updating rc
35 # find outgoing
35 # find outgoing
36 searching for changes
36 searching for changes
37 changeset: 1:c54836a570be
37 changeset: 1:572896fe480d
38 tag: tip
38 tag: tip
39 user: test
39 user: test
40 date: Mon Jan 12 13:46:40 1970 +0000
40 date: Mon Jan 12 13:46:40 1970 +0000
41 summary: add
41 summary: add
42
42
43 # find incoming on the remote side
43 # find incoming on the remote side
44 searching for changes
44 searching for changes
45 changeset: 1:c54836a570be
45 changeset: 1:572896fe480d
46 tag: tip
46 tag: tip
47 user: test
47 user: test
48 date: Mon Jan 12 13:46:40 1970 +0000
48 date: Mon Jan 12 13:46:40 1970 +0000
49 summary: add
49 summary: add
50
50
51 # push
51 # push
52 pushing to ssh://user@dummy/remote
52 pushing to ssh://user@dummy/remote
53 searching for changes
53 searching for changes
54 remote: adding changesets
54 remote: adding changesets
55 remote: adding manifests
55 remote: adding manifests
56 remote: adding file changes
56 remote: adding file changes
57 remote: added 1 changesets with 1 changes to 1 files
57 remote: added 1 changesets with 1 changes to 1 files
58 # check remote tip
58 # check remote tip
59 changeset: 1:c54836a570be
59 changeset: 1:572896fe480d
60 tag: tip
60 tag: tip
61 user: test
61 user: test
62 date: Mon Jan 12 13:46:40 1970 +0000
62 date: Mon Jan 12 13:46:40 1970 +0000
63 summary: add
63 summary: add
64
64
65 checking changesets
65 checking changesets
66 checking manifests
66 checking manifests
67 crosschecking files in changesets and manifests
67 crosschecking files in changesets and manifests
68 checking files
68 checking files
69 1 files, 2 changesets, 2 total revisions
69 2 files, 2 changesets, 3 total revisions
70 bleah
70 bleah
71 # push should succeed
71 # push should succeed
72 pushing to ssh://user@dummy/remote
72 pushing to ssh://user@dummy/remote
73 searching for changes
73 searching for changes
74 note: unsynced remote changes!
74 note: unsynced remote changes!
75 remote: adding changesets
75 remote: adding changesets
76 remote: adding manifests
76 remote: adding manifests
77 remote: adding file changes
77 remote: adding file changes
78 remote: added 1 changesets with 1 changes to 1 files
78 remote: added 1 changesets with 1 changes to 1 files
79 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio 3: 4: 5:
79 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio 3: 4: 5:
80 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
80 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
81 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
81 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
82 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
82 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
83 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
83 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
84 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
84 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
85 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
85 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
86 changegroup in remote: u=remote:ssh:127.0.0.1
86 changegroup in remote: u=remote:ssh:127.0.0.1
87 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
87 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
88 changegroup in remote: u=remote:ssh:127.0.0.1
88 changegroup in remote: u=remote:ssh:127.0.0.1
General Comments 0
You need to be logged in to leave comments. Login now