##// END OF EJS Templates
Merge Benoit's .hg/store support
Matt Mackall -
r3854:4f6db023 merge default
parent child Browse files
Show More
@@ -0,0 +1,14 b''
1 #!/bin/sh
2
3 mkdir t
4 cd t
5 hg init
6 echo a > a
7 hg add a
8 hg commit -m test -d "1000000 0"
9 rm .hg/requires
10 hg tip
11 echo indoor-pool > .hg/requires
12 hg tip
13
14 true
@@ -0,0 +1,2 b''
1 abort: index 00changelog.i unknown format 2!
2 abort: requirement 'indoor-pool' not supported!
@@ -1,257 +1,271 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import *
10 10 from repo import *
11 11 from demandload import *
12 12 from i18n import gettext as _
13 13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15 15
16 16 def _local(path):
17 17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 18 bundlerepo or localrepo)
19 19
20 20 schemes = {
21 21 'bundle': bundlerepo,
22 22 'file': _local,
23 23 'hg': httprepo,
24 24 'http': httprepo,
25 25 'https': httprepo,
26 26 'old-http': statichttprepo,
27 27 'ssh': sshrepo,
28 28 'static-http': statichttprepo,
29 29 }
30 30
31 31 def _lookup(path):
32 32 scheme = 'file'
33 33 if path:
34 34 c = path.find(':')
35 35 if c > 0:
36 36 scheme = path[:c]
37 37 thing = schemes.get(scheme) or schemes['file']
38 38 try:
39 39 return thing(path)
40 40 except TypeError:
41 41 return thing
42 42
43 43 def islocal(repo):
44 44 '''return true if repo or path is local'''
45 45 if isinstance(repo, str):
46 46 try:
47 47 return _lookup(repo).islocal(repo)
48 48 except AttributeError:
49 49 return False
50 50 return repo.local()
51 51
52 52 repo_setup_hooks = []
53 53
54 54 def repository(ui, path='', create=False):
55 55 """return a repository object for the specified path"""
56 56 repo = _lookup(path).instance(ui, path, create)
57 57 for hook in repo_setup_hooks:
58 58 hook(ui, repo)
59 59 return repo
60 60
61 61 def defaultdest(source):
62 62 '''return default destination of clone if none is given'''
63 63 return os.path.basename(os.path.normpath(source))
64 64
65 65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 66 stream=False):
67 67 """Make a copy of an existing repository.
68 68
69 69 Create a copy of an existing repository in a new directory. The
70 70 source and destination are URLs, as passed to the repository
71 71 function. Returns a pair of repository objects, the source and
72 72 newly created destination.
73 73
74 74 The location of the source is added to the new repository's
75 75 .hg/hgrc file, as the default to be used for future pulls and
76 76 pushes.
77 77
78 78 If an exception is raised, the partly cloned/updated destination
79 79 repository will be deleted.
80 80
81 81 Arguments:
82 82
83 83 source: repository object or URL
84 84
85 85 dest: URL of destination repository to create (defaults to base
86 86 name of source repository)
87 87
88 88 pull: always pull from source repository, even in local case
89 89
90 90 stream: stream raw data uncompressed from repository (fast over
91 91 LAN, slow over WAN)
92 92
93 93 rev: revision to clone up to (implies pull=True)
94 94
95 95 update: update working directory after clone completes, if
96 96 destination is local repository
97 97 """
98 98 if isinstance(source, str):
99 99 src_repo = repository(ui, source)
100 100 else:
101 101 src_repo = source
102 102 source = src_repo.url()
103 103
104 104 if dest is None:
105 105 dest = defaultdest(source)
106 106 ui.status(_("destination directory: %s\n") % dest)
107 107
108 108 def localpath(path):
109 109 if path.startswith('file://'):
110 110 return path[7:]
111 111 if path.startswith('file:'):
112 112 return path[5:]
113 113 return path
114 114
115 115 dest = localpath(dest)
116 116 source = localpath(source)
117 117
118 118 if os.path.exists(dest):
119 119 raise util.Abort(_("destination '%s' already exists") % dest)
120 120
121 121 class DirCleanup(object):
122 122 def __init__(self, dir_):
123 123 self.rmtree = shutil.rmtree
124 124 self.dir_ = dir_
125 125 def close(self):
126 126 self.dir_ = None
127 127 def __del__(self):
128 128 if self.dir_:
129 129 self.rmtree(self.dir_, True)
130 130
131 dest_repo = repository(ui, dest, create=True)
132
133 131 dir_cleanup = None
134 if dest_repo.local():
135 dir_cleanup = DirCleanup(os.path.realpath(dest_repo.root))
132 if islocal(dest):
133 dir_cleanup = DirCleanup(dest)
136 134
137 135 abspath = source
138 136 copy = False
139 if src_repo.local() and dest_repo.local():
137 if src_repo.local() and islocal(dest):
140 138 abspath = os.path.abspath(source)
141 139 copy = not pull and not rev
142 140
143 141 src_lock, dest_lock = None, None
144 142 if copy:
145 143 try:
146 144 # we use a lock here because if we race with commit, we
147 145 # can end up with extra data in the cloned revlogs that's
148 146 # not pointed to by changesets, thus causing verify to
149 147 # fail
150 148 src_lock = src_repo.lock()
151 149 except lock.LockException:
152 150 copy = False
153 151
154 152 if copy:
155 # we lock here to avoid premature writing to the target
153 def force_copy(src, dst):
154 try:
155 util.copyfiles(src, dst)
156 except OSError, inst:
157 if inst.errno != errno.ENOENT:
158 raise
159
156 160 src_store = os.path.realpath(src_repo.spath)
157 dest_store = os.path.realpath(dest_repo.spath)
161 if not os.path.exists(dest):
162 os.mkdir(dest)
163 dest_path = os.path.realpath(os.path.join(dest, ".hg"))
164 os.mkdir(dest_path)
165 if src_repo.spath != src_repo.path:
166 dest_store = os.path.join(dest_path, "store")
167 os.mkdir(dest_store)
168 else:
169 dest_store = dest_path
170 # copy the requires file
171 force_copy(src_repo.join("requires"),
172 os.path.join(dest_path, "requires"))
173 # we lock here to avoid premature writing to the target
158 174 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
159 175
160 176 files = ("data",
161 177 "00manifest.d", "00manifest.i",
162 178 "00changelog.d", "00changelog.i")
163 179 for f in files:
164 180 src = os.path.join(src_store, f)
165 181 dst = os.path.join(dest_store, f)
166 try:
167 util.copyfiles(src, dst)
168 except OSError, inst:
169 if inst.errno != errno.ENOENT:
170 raise
182 force_copy(src, dst)
171 183
172 184 # we need to re-init the repo after manually copying the data
173 185 # into it
174 186 dest_repo = repository(ui, dest)
175 187
176 188 else:
189 dest_repo = repository(ui, dest, create=True)
190
177 191 revs = None
178 192 if rev:
179 193 if 'lookup' not in src_repo.capabilities:
180 194 raise util.Abort(_("src repository does not support revision "
181 195 "lookup and so doesn't support clone by "
182 196 "revision"))
183 197 revs = [src_repo.lookup(r) for r in rev]
184 198
185 199 if dest_repo.local():
186 200 dest_repo.clone(src_repo, heads=revs, stream=stream)
187 201 elif src_repo.local():
188 202 src_repo.push(dest_repo, revs=revs)
189 203 else:
190 204 raise util.Abort(_("clone from remote to remote not supported"))
191 205
192 206 if src_lock:
193 207 src_lock.release()
194 208
195 209 if dest_repo.local():
196 210 fp = dest_repo.opener("hgrc", "w", text=True)
197 211 fp.write("[paths]\n")
198 212 fp.write("default = %s\n" % abspath)
199 213 fp.close()
200 214
201 215 if dest_lock:
202 216 dest_lock.release()
203 217
204 218 if update:
205 219 _update(dest_repo, dest_repo.changelog.tip())
206 220 if dir_cleanup:
207 221 dir_cleanup.close()
208 222
209 223 return src_repo, dest_repo
210 224
211 225 def _showstats(repo, stats):
212 226 stats = ((stats[0], _("updated")),
213 227 (stats[1], _("merged")),
214 228 (stats[2], _("removed")),
215 229 (stats[3], _("unresolved")))
216 230 note = ", ".join([_("%d files %s") % s for s in stats])
217 231 repo.ui.status("%s\n" % note)
218 232
219 233 def _update(repo, node): return update(repo, node)
220 234
221 235 def update(repo, node):
222 236 """update the working directory to node, merging linear changes"""
223 237 stats = _merge.update(repo, node, False, False, None, None)
224 238 _showstats(repo, stats)
225 239 if stats[3]:
226 240 repo.ui.status(_("There are unresolved merges with"
227 241 " locally modified files.\n"))
228 242 return stats[3]
229 243
230 244 def clean(repo, node, wlock=None, show_stats=True):
231 245 """forcibly switch the working directory to node, clobbering changes"""
232 246 stats = _merge.update(repo, node, False, True, None, wlock)
233 247 if show_stats: _showstats(repo, stats)
234 248 return stats[3]
235 249
236 250 def merge(repo, node, force=None, remind=True, wlock=None):
237 251 """branch merge with node, resolving changes"""
238 252 stats = _merge.update(repo, node, True, force, False, wlock)
239 253 _showstats(repo, stats)
240 254 if stats[3]:
241 255 pl = repo.parents()
242 256 repo.ui.status(_("There are unresolved merges,"
243 257 " you can redo the full merge using:\n"
244 258 " hg update -C %s\n"
245 259 " hg merge %s\n")
246 260 % (pl[0].rev(), pl[1].rev()))
247 261 elif remind:
248 262 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
249 263 return stats[3]
250 264
251 265 def revert(repo, node, choose, wlock):
252 266 """revert changes to revision in node without updating dirstate"""
253 267 return _merge.update(repo, node, False, True, choose, wlock)[3]
254 268
255 269 def verify(repo):
256 270 """verify the consistency of a repository"""
257 271 return _verify.verify(repo)
@@ -1,61 +1,63 b''
1 1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, mimetypes
10 10 import os.path
11 11
12 12 def get_mtime(repo_path):
13 hg_path = os.path.join(repo_path, ".hg")
14 cl_path = os.path.join(hg_path, "00changelog.i")
15 if os.path.exists(os.path.join(cl_path)):
13 store_path = os.path.join(repo_path, ".hg")
14 if not os.path.isdir(os.path.join(store_path, "data")):
15 store_path = os.path.join(store_path, "store")
16 cl_path = os.path.join(store_path, "00changelog.i")
17 if os.path.exists(cl_path):
16 18 return os.stat(cl_path).st_mtime
17 19 else:
18 return os.stat(hg_path).st_mtime
20 return os.stat(store_path).st_mtime
19 21
20 22 def staticfile(directory, fname, req):
21 23 """return a file inside directory with guessed content-type header
22 24
23 25 fname always uses '/' as directory separator and isn't allowed to
24 26 contain unusual path components.
25 27 Content-type is guessed using the mimetypes module.
26 28 Return an empty string if fname is illegal or file not found.
27 29
28 30 """
29 31 parts = fname.split('/')
30 32 path = directory
31 33 for part in parts:
32 34 if (part in ('', os.curdir, os.pardir) or
33 35 os.sep in part or os.altsep is not None and os.altsep in part):
34 36 return ""
35 37 path = os.path.join(path, part)
36 38 try:
37 39 os.stat(path)
38 40 ct = mimetypes.guess_type(path)[0] or "text/plain"
39 41 req.header([('Content-type', ct),
40 42 ('Content-length', os.path.getsize(path))])
41 43 return file(path, 'rb').read()
42 44 except (TypeError, OSError):
43 45 # illegal fname or unreadable file
44 46 return ""
45 47
46 48 def style_map(templatepath, style):
47 49 """Return path to mapfile for a given style.
48 50
49 51 Searches mapfile in the following locations:
50 52 1. templatepath/style/map
51 53 2. templatepath/map-style
52 54 3. templatepath/map
53 55 """
54 56 locations = style and [os.path.join(style, "map"), "map-"+style] or []
55 57 locations.append("map")
56 58 for location in locations:
57 59 mapfile = os.path.join(templatepath, location)
58 60 if os.path.isfile(mapfile):
59 61 return mapfile
60 62 raise RuntimeError("No hgweb templates found in %r" % templatepath)
61 63
@@ -1,1935 +1,1964 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19 20
20 21 def __del__(self):
21 22 self.transhandle = None
22 23 def __init__(self, parentui, path=None, create=0):
23 24 repo.repository.__init__(self)
24 25 if not path:
25 26 p = os.getcwd()
26 27 while not os.path.isdir(os.path.join(p, ".hg")):
27 28 oldp = p
28 29 p = os.path.dirname(p)
29 30 if p == oldp:
30 31 raise repo.RepoError(_("There is no Mercurial repository"
31 32 " here (.hg not found)"))
32 33 path = p
34
33 35 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
36 self.root = os.path.realpath(path)
37 self.origroot = path
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
35 40
36 41 if not os.path.isdir(self.path):
37 42 if create:
38 43 if not os.path.exists(path):
39 44 os.mkdir(path)
40 45 os.mkdir(self.path)
41 if self.spath != self.path:
42 os.mkdir(self.spath)
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements = ("revlogv1", "store")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
50 reqfile.write("%s\n" % r)
51 reqfile.close()
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write('\0\0\0\2')
43 54 else:
44 55 raise repo.RepoError(_("repository %s not found") % path)
45 56 elif create:
46 57 raise repo.RepoError(_("repository %s already exists") % path)
58 else:
59 # find requirements
60 try:
61 requirements = self.opener("requires").read().splitlines()
62 except IOError, inst:
63 if inst.errno != errno.ENOENT:
64 raise
65 requirements = []
66 # check them
67 for r in requirements:
68 if r not in self.supported:
69 raise repo.RepoError(_("requirement '%s' not supported") % r)
47 70
48 self.root = os.path.realpath(path)
49 self.origroot = path
71 # setup store
72 if "store" in requirements:
73 self.encodefn = util.encodefilename
74 self.decodefn = util.decodefilename
75 self.spath = os.path.join(self.path, "store")
76 else:
77 self.encodefn = lambda x: x
78 self.decodefn = lambda x: x
79 self.spath = self.path
80 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81
50 82 self.ui = ui.ui(parentui=parentui)
51 self.opener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
53 self.wopener = util.opener(self.root)
54
55 83 try:
56 84 self.ui.readconfig(self.join("hgrc"), self.root)
57 85 except IOError:
58 86 pass
59 87
60 88 v = self.ui.configrevlog()
61 89 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
62 90 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
63 91 fl = v.get('flags', None)
64 92 flags = 0
65 93 if fl != None:
66 94 for x in fl.split():
67 95 flags |= revlog.flagstr(x)
68 96 elif self.revlogv1:
69 97 flags = revlog.REVLOG_DEFAULT_FLAGS
70 98
71 99 v = self.revlogversion | flags
72 100 self.manifest = manifest.manifest(self.sopener, v)
73 101 self.changelog = changelog.changelog(self.sopener, v)
74 102
75 103 fallback = self.ui.config('ui', 'fallbackencoding')
76 104 if fallback:
77 105 util._fallbackencoding = fallback
78 106
79 107 # the changelog might not have the inline index flag
80 108 # on. If the format of the changelog is the same as found in
81 109 # .hgrc, apply any flags found in the .hgrc as well.
82 110 # Otherwise, just version from the changelog
83 111 v = self.changelog.version
84 112 if v == self.revlogversion:
85 113 v |= flags
86 114 self.revlogversion = v
87 115
88 116 self.tagscache = None
89 117 self.branchcache = None
90 118 self.nodetagscache = None
91 119 self.encodepats = None
92 120 self.decodepats = None
93 121 self.transhandle = None
94 122
95 123 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
96 124
97 125 def url(self):
98 126 return 'file:' + self.root
99 127
100 128 def hook(self, name, throw=False, **args):
101 129 def callhook(hname, funcname):
102 130 '''call python hook. hook is callable object, looked up as
103 131 name in python module. if callable returns "true", hook
104 132 fails, else passes. if hook raises exception, treated as
105 133 hook failure. exception propagates if throw is "true".
106 134
107 135 reason for "true" meaning "hook failed" is so that
108 136 unmodified commands (e.g. mercurial.commands.update) can
109 137 be run as hooks without wrappers to convert return values.'''
110 138
111 139 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
112 140 d = funcname.rfind('.')
113 141 if d == -1:
114 142 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
115 143 % (hname, funcname))
116 144 modname = funcname[:d]
117 145 try:
118 146 obj = __import__(modname)
119 147 except ImportError:
120 148 try:
121 149 # extensions are loaded with hgext_ prefix
122 150 obj = __import__("hgext_%s" % modname)
123 151 except ImportError:
124 152 raise util.Abort(_('%s hook is invalid '
125 153 '(import of "%s" failed)') %
126 154 (hname, modname))
127 155 try:
128 156 for p in funcname.split('.')[1:]:
129 157 obj = getattr(obj, p)
130 158 except AttributeError, err:
131 159 raise util.Abort(_('%s hook is invalid '
132 160 '("%s" is not defined)') %
133 161 (hname, funcname))
134 162 if not callable(obj):
135 163 raise util.Abort(_('%s hook is invalid '
136 164 '("%s" is not callable)') %
137 165 (hname, funcname))
138 166 try:
139 167 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
140 168 except (KeyboardInterrupt, util.SignalInterrupt):
141 169 raise
142 170 except Exception, exc:
143 171 if isinstance(exc, util.Abort):
144 172 self.ui.warn(_('error: %s hook failed: %s\n') %
145 173 (hname, exc.args[0]))
146 174 else:
147 175 self.ui.warn(_('error: %s hook raised an exception: '
148 176 '%s\n') % (hname, exc))
149 177 if throw:
150 178 raise
151 179 self.ui.print_exc()
152 180 return True
153 181 if r:
154 182 if throw:
155 183 raise util.Abort(_('%s hook failed') % hname)
156 184 self.ui.warn(_('warning: %s hook failed\n') % hname)
157 185 return r
158 186
159 187 def runhook(name, cmd):
160 188 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
161 189 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
162 190 r = util.system(cmd, environ=env, cwd=self.root)
163 191 if r:
164 192 desc, r = util.explain_exit(r)
165 193 if throw:
166 194 raise util.Abort(_('%s hook %s') % (name, desc))
167 195 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
168 196 return r
169 197
170 198 r = False
171 199 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
172 200 if hname.split(".", 1)[0] == name and cmd]
173 201 hooks.sort()
174 202 for hname, cmd in hooks:
175 203 if cmd.startswith('python:'):
176 204 r = callhook(hname, cmd[7:].strip()) or r
177 205 else:
178 206 r = runhook(hname, cmd) or r
179 207 return r
180 208
181 209 tag_disallowed = ':\r\n'
182 210
183 211 def tag(self, name, node, message, local, user, date):
184 212 '''tag a revision with a symbolic name.
185 213
186 214 if local is True, the tag is stored in a per-repository file.
187 215 otherwise, it is stored in the .hgtags file, and a new
188 216 changeset is committed with the change.
189 217
190 218 keyword arguments:
191 219
192 220 local: whether to store tag in non-version-controlled file
193 221 (default False)
194 222
195 223 message: commit message to use if committing
196 224
197 225 user: name of user to use if committing
198 226
199 227 date: date tuple to use if committing'''
200 228
201 229 for c in self.tag_disallowed:
202 230 if c in name:
203 231 raise util.Abort(_('%r cannot be used in a tag name') % c)
204 232
205 233 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
206 234
207 235 if local:
208 236 # local tags are stored in the current charset
209 237 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
210 238 self.hook('tag', node=hex(node), tag=name, local=local)
211 239 return
212 240
213 241 for x in self.status()[:5]:
214 242 if '.hgtags' in x:
215 243 raise util.Abort(_('working copy of .hgtags is changed '
216 244 '(please commit .hgtags manually)'))
217 245
218 246 # committed tags are stored in UTF-8
219 247 line = '%s %s\n' % (hex(node), util.fromlocal(name))
220 248 self.wfile('.hgtags', 'ab').write(line)
221 249 if self.dirstate.state('.hgtags') == '?':
222 250 self.add(['.hgtags'])
223 251
224 252 self.commit(['.hgtags'], message, user, date)
225 253 self.hook('tag', node=hex(node), tag=name, local=local)
226 254
227 255 def tags(self):
228 256 '''return a mapping of tag to node'''
229 257 if not self.tagscache:
230 258 self.tagscache = {}
231 259
232 260 def parsetag(line, context):
233 261 if not line:
234 262 return
235 263 s = l.split(" ", 1)
236 264 if len(s) != 2:
237 265 self.ui.warn(_("%s: cannot parse entry\n") % context)
238 266 return
239 267 node, key = s
240 268 key = util.tolocal(key.strip()) # stored in UTF-8
241 269 try:
242 270 bin_n = bin(node)
243 271 except TypeError:
244 272 self.ui.warn(_("%s: node '%s' is not well formed\n") %
245 273 (context, node))
246 274 return
247 275 if bin_n not in self.changelog.nodemap:
248 276 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
249 277 (context, key))
250 278 return
251 279 self.tagscache[key] = bin_n
252 280
253 281 # read the tags file from each head, ending with the tip,
254 282 # and add each tag found to the map, with "newer" ones
255 283 # taking precedence
256 284 f = None
257 285 for rev, node, fnode in self._hgtagsnodes():
258 286 f = (f and f.filectx(fnode) or
259 287 self.filectx('.hgtags', fileid=fnode))
260 288 count = 0
261 289 for l in f.data().splitlines():
262 290 count += 1
263 291 parsetag(l, _("%s, line %d") % (str(f), count))
264 292
265 293 try:
266 294 f = self.opener("localtags")
267 295 count = 0
268 296 for l in f:
269 297 # localtags are stored in the local character set
270 298 # while the internal tag table is stored in UTF-8
271 299 l = util.fromlocal(l)
272 300 count += 1
273 301 parsetag(l, _("localtags, line %d") % count)
274 302 except IOError:
275 303 pass
276 304
277 305 self.tagscache['tip'] = self.changelog.tip()
278 306
279 307 return self.tagscache
280 308
281 309 def _hgtagsnodes(self):
282 310 heads = self.heads()
283 311 heads.reverse()
284 312 last = {}
285 313 ret = []
286 314 for node in heads:
287 315 c = self.changectx(node)
288 316 rev = c.rev()
289 317 try:
290 318 fnode = c.filenode('.hgtags')
291 319 except repo.LookupError:
292 320 continue
293 321 ret.append((rev, node, fnode))
294 322 if fnode in last:
295 323 ret[last[fnode]] = None
296 324 last[fnode] = len(ret) - 1
297 325 return [item for item in ret if item]
298 326
299 327 def tagslist(self):
300 328 '''return a list of tags ordered by revision'''
301 329 l = []
302 330 for t, n in self.tags().items():
303 331 try:
304 332 r = self.changelog.rev(n)
305 333 except:
306 334 r = -2 # sort to the beginning of the list if unknown
307 335 l.append((r, t, n))
308 336 l.sort()
309 337 return [(t, n) for r, t, n in l]
310 338
311 339 def nodetags(self, node):
312 340 '''return the tags associated with a node'''
313 341 if not self.nodetagscache:
314 342 self.nodetagscache = {}
315 343 for t, n in self.tags().items():
316 344 self.nodetagscache.setdefault(n, []).append(t)
317 345 return self.nodetagscache.get(node, [])
318 346
319 347 def _branchtags(self):
320 348 partial, last, lrev = self._readbranchcache()
321 349
322 350 tiprev = self.changelog.count() - 1
323 351 if lrev != tiprev:
324 352 self._updatebranchcache(partial, lrev+1, tiprev+1)
325 353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
326 354
327 355 return partial
328 356
329 357 def branchtags(self):
330 358 if self.branchcache is not None:
331 359 return self.branchcache
332 360
333 361 self.branchcache = {} # avoid recursion in changectx
334 362 partial = self._branchtags()
335 363
336 364 # the branch cache is stored on disk as UTF-8, but in the local
337 365 # charset internally
338 366 for k, v in partial.items():
339 367 self.branchcache[util.tolocal(k)] = v
340 368 return self.branchcache
341 369
342 370 def _readbranchcache(self):
343 371 partial = {}
344 372 try:
345 373 f = self.opener("branches.cache")
346 374 lines = f.read().split('\n')
347 375 f.close()
348 376 last, lrev = lines.pop(0).rstrip().split(" ", 1)
349 377 last, lrev = bin(last), int(lrev)
350 378 if not (lrev < self.changelog.count() and
351 379 self.changelog.node(lrev) == last): # sanity check
352 380 # invalidate the cache
353 381 raise ValueError('Invalid branch cache: unknown tip')
354 382 for l in lines:
355 383 if not l: continue
356 384 node, label = l.rstrip().split(" ", 1)
357 385 partial[label] = bin(node)
358 386 except (KeyboardInterrupt, util.SignalInterrupt):
359 387 raise
360 388 except Exception, inst:
361 389 if self.ui.debugflag:
362 390 self.ui.warn(str(inst), '\n')
363 391 partial, last, lrev = {}, nullid, nullrev
364 392 return partial, last, lrev
365 393
366 394 def _writebranchcache(self, branches, tip, tiprev):
367 395 try:
368 396 f = self.opener("branches.cache", "w")
369 397 f.write("%s %s\n" % (hex(tip), tiprev))
370 398 for label, node in branches.iteritems():
371 399 f.write("%s %s\n" % (hex(node), label))
372 400 except IOError:
373 401 pass
374 402
375 403 def _updatebranchcache(self, partial, start, end):
376 404 for r in xrange(start, end):
377 405 c = self.changectx(r)
378 406 b = c.branch()
379 407 if b:
380 408 partial[b] = c.node()
381 409
382 410 def lookup(self, key):
383 411 if key == '.':
384 412 key = self.dirstate.parents()[0]
385 413 if key == nullid:
386 414 raise repo.RepoError(_("no revision checked out"))
387 415 elif key == 'null':
388 416 return nullid
389 417 n = self.changelog._match(key)
390 418 if n:
391 419 return n
392 420 if key in self.tags():
393 421 return self.tags()[key]
394 422 if key in self.branchtags():
395 423 return self.branchtags()[key]
396 424 n = self.changelog._partialmatch(key)
397 425 if n:
398 426 return n
399 427 raise repo.RepoError(_("unknown revision '%s'") % key)
400 428
401 429 def dev(self):
402 430 return os.lstat(self.path).st_dev
403 431
404 432 def local(self):
405 433 return True
406 434
407 435 def join(self, f):
408 436 return os.path.join(self.path, f)
409 437
410 438 def sjoin(self, f):
439 f = self.encodefn(f)
411 440 return os.path.join(self.spath, f)
412 441
413 442 def wjoin(self, f):
414 443 return os.path.join(self.root, f)
415 444
416 445 def file(self, f):
417 446 if f[0] == '/':
418 447 f = f[1:]
419 448 return filelog.filelog(self.sopener, f, self.revlogversion)
420 449
421 450 def changectx(self, changeid=None):
422 451 return context.changectx(self, changeid)
423 452
424 453 def workingctx(self):
425 454 return context.workingctx(self)
426 455
427 456 def parents(self, changeid=None):
428 457 '''
429 458 get list of changectxs for parents of changeid or working directory
430 459 '''
431 460 if changeid is None:
432 461 pl = self.dirstate.parents()
433 462 else:
434 463 n = self.changelog.lookup(changeid)
435 464 pl = self.changelog.parents(n)
436 465 if pl[1] == nullid:
437 466 return [self.changectx(pl[0])]
438 467 return [self.changectx(pl[0]), self.changectx(pl[1])]
439 468
440 469 def filectx(self, path, changeid=None, fileid=None):
441 470 """changeid can be a changeset revision, node, or tag.
442 471 fileid can be a file revision or node."""
443 472 return context.filectx(self, path, changeid, fileid)
444 473
445 474 def getcwd(self):
446 475 return self.dirstate.getcwd()
447 476
448 477 def wfile(self, f, mode='r'):
449 478 return self.wopener(f, mode)
450 479
451 480 def wread(self, filename):
452 481 if self.encodepats == None:
453 482 l = []
454 483 for pat, cmd in self.ui.configitems("encode"):
455 484 mf = util.matcher(self.root, "", [pat], [], [])[1]
456 485 l.append((mf, cmd))
457 486 self.encodepats = l
458 487
459 488 data = self.wopener(filename, 'r').read()
460 489
461 490 for mf, cmd in self.encodepats:
462 491 if mf(filename):
463 492 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
464 493 data = util.filter(data, cmd)
465 494 break
466 495
467 496 return data
468 497
469 498 def wwrite(self, filename, data, fd=None):
470 499 if self.decodepats == None:
471 500 l = []
472 501 for pat, cmd in self.ui.configitems("decode"):
473 502 mf = util.matcher(self.root, "", [pat], [], [])[1]
474 503 l.append((mf, cmd))
475 504 self.decodepats = l
476 505
477 506 for mf, cmd in self.decodepats:
478 507 if mf(filename):
479 508 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
480 509 data = util.filter(data, cmd)
481 510 break
482 511
483 512 if fd:
484 513 return fd.write(data)
485 514 return self.wopener(filename, 'w').write(data)
486 515
487 516 def transaction(self):
488 517 tr = self.transhandle
489 518 if tr != None and tr.running():
490 519 return tr.nest()
491 520
492 521 # save dirstate for rollback
493 522 try:
494 523 ds = self.opener("dirstate").read()
495 524 except IOError:
496 525 ds = ""
497 526 self.opener("journal.dirstate", "w").write(ds)
498 527
499 528 renames = [(self.sjoin("journal"), self.sjoin("undo")),
500 529 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
501 530 tr = transaction.transaction(self.ui.warn, self.sopener,
502 531 self.sjoin("journal"),
503 532 aftertrans(renames))
504 533 self.transhandle = tr
505 534 return tr
506 535
507 536 def recover(self):
508 537 l = self.lock()
509 538 if os.path.exists(self.sjoin("journal")):
510 539 self.ui.status(_("rolling back interrupted transaction\n"))
511 540 transaction.rollback(self.sopener, self.sjoin("journal"))
512 541 self.reload()
513 542 return True
514 543 else:
515 544 self.ui.warn(_("no interrupted transaction available\n"))
516 545 return False
517 546
518 547 def rollback(self, wlock=None):
519 548 if not wlock:
520 549 wlock = self.wlock()
521 550 l = self.lock()
522 551 if os.path.exists(self.sjoin("undo")):
523 552 self.ui.status(_("rolling back last transaction\n"))
524 553 transaction.rollback(self.sopener, self.sjoin("undo"))
525 554 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
526 555 self.reload()
527 556 self.wreload()
528 557 else:
529 558 self.ui.warn(_("no rollback information available\n"))
530 559
531 560 def wreload(self):
532 561 self.dirstate.read()
533 562
534 563 def reload(self):
535 564 self.changelog.load()
536 565 self.manifest.load()
537 566 self.tagscache = None
538 567 self.nodetagscache = None
539 568
540 569 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
541 570 desc=None):
542 571 try:
543 572 l = lock.lock(lockname, 0, releasefn, desc=desc)
544 573 except lock.LockHeld, inst:
545 574 if not wait:
546 575 raise
547 576 self.ui.warn(_("waiting for lock on %s held by %r\n") %
548 577 (desc, inst.locker))
549 578 # default to 600 seconds timeout
550 579 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
551 580 releasefn, desc=desc)
552 581 if acquirefn:
553 582 acquirefn()
554 583 return l
555 584
556 585 def lock(self, wait=1):
557 586 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
558 587 desc=_('repository %s') % self.origroot)
559 588
560 589 def wlock(self, wait=1):
561 590 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
562 591 self.wreload,
563 592 desc=_('working directory of %s') % self.origroot)
564 593
565 594 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
566 595 """
567 596 commit an individual file as part of a larger transaction
568 597 """
569 598
570 599 t = self.wread(fn)
571 600 fl = self.file(fn)
572 601 fp1 = manifest1.get(fn, nullid)
573 602 fp2 = manifest2.get(fn, nullid)
574 603
575 604 meta = {}
576 605 cp = self.dirstate.copied(fn)
577 606 if cp:
578 607 meta["copy"] = cp
579 608 if not manifest2: # not a branch merge
580 609 meta["copyrev"] = hex(manifest1.get(cp, nullid))
581 610 fp2 = nullid
582 611 elif fp2 != nullid: # copied on remote side
583 612 meta["copyrev"] = hex(manifest1.get(cp, nullid))
584 613 elif fp1 != nullid: # copied on local side, reversed
585 614 meta["copyrev"] = hex(manifest2.get(cp))
586 615 fp2 = nullid
587 616 else: # directory rename
588 617 meta["copyrev"] = hex(manifest1.get(cp, nullid))
589 618 self.ui.debug(_(" %s: copy %s:%s\n") %
590 619 (fn, cp, meta["copyrev"]))
591 620 fp1 = nullid
592 621 elif fp2 != nullid:
593 622 # is one parent an ancestor of the other?
594 623 fpa = fl.ancestor(fp1, fp2)
595 624 if fpa == fp1:
596 625 fp1, fp2 = fp2, nullid
597 626 elif fpa == fp2:
598 627 fp2 = nullid
599 628
600 629 # is the file unmodified from the parent? report existing entry
601 630 if fp2 == nullid and not fl.cmp(fp1, t):
602 631 return fp1
603 632
604 633 changelist.append(fn)
605 634 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
606 635
607 636 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
608 637 if p1 is None:
609 638 p1, p2 = self.dirstate.parents()
610 639 return self.commit(files=files, text=text, user=user, date=date,
611 640 p1=p1, p2=p2, wlock=wlock)
612 641
613 642 def commit(self, files=None, text="", user=None, date=None,
614 643 match=util.always, force=False, lock=None, wlock=None,
615 644 force_editor=False, p1=None, p2=None, extra={}):
616 645
617 646 commit = []
618 647 remove = []
619 648 changed = []
620 649 use_dirstate = (p1 is None) # not rawcommit
621 650 extra = extra.copy()
622 651
623 652 if use_dirstate:
624 653 if files:
625 654 for f in files:
626 655 s = self.dirstate.state(f)
627 656 if s in 'nmai':
628 657 commit.append(f)
629 658 elif s == 'r':
630 659 remove.append(f)
631 660 else:
632 661 self.ui.warn(_("%s not tracked!\n") % f)
633 662 else:
634 663 changes = self.status(match=match)[:5]
635 664 modified, added, removed, deleted, unknown = changes
636 665 commit = modified + added
637 666 remove = removed
638 667 else:
639 668 commit = files
640 669
641 670 if use_dirstate:
642 671 p1, p2 = self.dirstate.parents()
643 672 update_dirstate = True
644 673 else:
645 674 p1, p2 = p1, p2 or nullid
646 675 update_dirstate = (self.dirstate.parents()[0] == p1)
647 676
648 677 c1 = self.changelog.read(p1)
649 678 c2 = self.changelog.read(p2)
650 679 m1 = self.manifest.read(c1[0]).copy()
651 680 m2 = self.manifest.read(c2[0])
652 681
653 682 if use_dirstate:
654 683 branchname = util.fromlocal(self.workingctx().branch())
655 684 else:
656 685 branchname = ""
657 686
658 687 if use_dirstate:
659 688 oldname = c1[5].get("branch", "") # stored in UTF-8
660 689 if not commit and not remove and not force and p2 == nullid and \
661 690 branchname == oldname:
662 691 self.ui.status(_("nothing changed\n"))
663 692 return None
664 693
665 694 xp1 = hex(p1)
666 695 if p2 == nullid: xp2 = ''
667 696 else: xp2 = hex(p2)
668 697
669 698 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
670 699
671 700 if not wlock:
672 701 wlock = self.wlock()
673 702 if not lock:
674 703 lock = self.lock()
675 704 tr = self.transaction()
676 705
677 706 # check in files
678 707 new = {}
679 708 linkrev = self.changelog.count()
680 709 commit.sort()
681 710 for f in commit:
682 711 self.ui.note(f + "\n")
683 712 try:
684 713 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
685 714 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
686 715 except IOError:
687 716 if use_dirstate:
688 717 self.ui.warn(_("trouble committing %s!\n") % f)
689 718 raise
690 719 else:
691 720 remove.append(f)
692 721
693 722 # update manifest
694 723 m1.update(new)
695 724 remove.sort()
696 725
697 726 for f in remove:
698 727 if f in m1:
699 728 del m1[f]
700 729 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
701 730
702 731 # add changeset
703 732 new = new.keys()
704 733 new.sort()
705 734
706 735 user = user or self.ui.username()
707 736 if not text or force_editor:
708 737 edittext = []
709 738 if text:
710 739 edittext.append(text)
711 740 edittext.append("")
712 741 edittext.append("HG: user: %s" % user)
713 742 if p2 != nullid:
714 743 edittext.append("HG: branch merge")
715 744 edittext.extend(["HG: changed %s" % f for f in changed])
716 745 edittext.extend(["HG: removed %s" % f for f in remove])
717 746 if not changed and not remove:
718 747 edittext.append("HG: no files changed")
719 748 edittext.append("")
720 749 # run editor in the repository root
721 750 olddir = os.getcwd()
722 751 os.chdir(self.root)
723 752 text = self.ui.edit("\n".join(edittext), user)
724 753 os.chdir(olddir)
725 754
726 755 lines = [line.rstrip() for line in text.rstrip().splitlines()]
727 756 while lines and not lines[0]:
728 757 del lines[0]
729 758 if not lines:
730 759 return None
731 760 text = '\n'.join(lines)
732 761 if branchname:
733 762 extra["branch"] = branchname
734 763 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
735 764 user, date, extra)
736 765 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
737 766 parent2=xp2)
738 767 tr.close()
739 768
740 769 if use_dirstate or update_dirstate:
741 770 self.dirstate.setparents(n)
742 771 if use_dirstate:
743 772 self.dirstate.update(new, "n")
744 773 self.dirstate.forget(remove)
745 774
746 775 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
747 776 return n
748 777
749 778 def walk(self, node=None, files=[], match=util.always, badmatch=None):
750 779 '''
751 780 walk recursively through the directory tree or a given
752 781 changeset, finding all files matched by the match
753 782 function
754 783
755 784 results are yielded in a tuple (src, filename), where src
756 785 is one of:
757 786 'f' the file was found in the directory tree
758 787 'm' the file was only in the dirstate and not in the tree
759 788 'b' file was not found and matched badmatch
760 789 '''
761 790
762 791 if node:
763 792 fdict = dict.fromkeys(files)
764 793 for fn in self.manifest.read(self.changelog.read(node)[0]):
765 794 for ffn in fdict:
766 795 # match if the file is the exact name or a directory
767 796 if ffn == fn or fn.startswith("%s/" % ffn):
768 797 del fdict[ffn]
769 798 break
770 799 if match(fn):
771 800 yield 'm', fn
772 801 for fn in fdict:
773 802 if badmatch and badmatch(fn):
774 803 if match(fn):
775 804 yield 'b', fn
776 805 else:
777 806 self.ui.warn(_('%s: No such file in rev %s\n') % (
778 807 util.pathto(self.getcwd(), fn), short(node)))
779 808 else:
780 809 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
781 810 yield src, fn
782 811
783 812 def status(self, node1=None, node2=None, files=[], match=util.always,
784 813 wlock=None, list_ignored=False, list_clean=False):
785 814 """return status of files between two nodes or node and working directory
786 815
787 816 If node1 is None, use the first dirstate parent instead.
788 817 If node2 is None, compare node1 with working directory.
789 818 """
790 819
791 820 def fcmp(fn, mf):
792 821 t1 = self.wread(fn)
793 822 return self.file(fn).cmp(mf.get(fn, nullid), t1)
794 823
795 824 def mfmatches(node):
796 825 change = self.changelog.read(node)
797 826 mf = self.manifest.read(change[0]).copy()
798 827 for fn in mf.keys():
799 828 if not match(fn):
800 829 del mf[fn]
801 830 return mf
802 831
803 832 modified, added, removed, deleted, unknown = [], [], [], [], []
804 833 ignored, clean = [], []
805 834
806 835 compareworking = False
807 836 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
808 837 compareworking = True
809 838
810 839 if not compareworking:
811 840 # read the manifest from node1 before the manifest from node2,
812 841 # so that we'll hit the manifest cache if we're going through
813 842 # all the revisions in parent->child order.
814 843 mf1 = mfmatches(node1)
815 844
816 845 # are we comparing the working directory?
817 846 if not node2:
818 847 if not wlock:
819 848 try:
820 849 wlock = self.wlock(wait=0)
821 850 except lock.LockException:
822 851 wlock = None
823 852 (lookup, modified, added, removed, deleted, unknown,
824 853 ignored, clean) = self.dirstate.status(files, match,
825 854 list_ignored, list_clean)
826 855
827 856 # are we comparing working dir against its parent?
828 857 if compareworking:
829 858 if lookup:
830 859 # do a full compare of any files that might have changed
831 860 mf2 = mfmatches(self.dirstate.parents()[0])
832 861 for f in lookup:
833 862 if fcmp(f, mf2):
834 863 modified.append(f)
835 864 else:
836 865 clean.append(f)
837 866 if wlock is not None:
838 867 self.dirstate.update([f], "n")
839 868 else:
840 869 # we are comparing working dir against non-parent
841 870 # generate a pseudo-manifest for the working dir
842 871 # XXX: create it in dirstate.py ?
843 872 mf2 = mfmatches(self.dirstate.parents()[0])
844 873 for f in lookup + modified + added:
845 874 mf2[f] = ""
846 875 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
847 876 for f in removed:
848 877 if f in mf2:
849 878 del mf2[f]
850 879 else:
851 880 # we are comparing two revisions
852 881 mf2 = mfmatches(node2)
853 882
854 883 if not compareworking:
855 884 # flush lists from dirstate before comparing manifests
856 885 modified, added, clean = [], [], []
857 886
858 887 # make sure to sort the files so we talk to the disk in a
859 888 # reasonable order
860 889 mf2keys = mf2.keys()
861 890 mf2keys.sort()
862 891 for fn in mf2keys:
863 892 if mf1.has_key(fn):
864 893 if mf1.flags(fn) != mf2.flags(fn) or \
865 894 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
866 895 modified.append(fn)
867 896 elif list_clean:
868 897 clean.append(fn)
869 898 del mf1[fn]
870 899 else:
871 900 added.append(fn)
872 901
873 902 removed = mf1.keys()
874 903
875 904 # sort and return results:
876 905 for l in modified, added, removed, deleted, unknown, ignored, clean:
877 906 l.sort()
878 907 return (modified, added, removed, deleted, unknown, ignored, clean)
879 908
880 909 def add(self, list, wlock=None):
881 910 if not wlock:
882 911 wlock = self.wlock()
883 912 for f in list:
884 913 p = self.wjoin(f)
885 914 if not os.path.exists(p):
886 915 self.ui.warn(_("%s does not exist!\n") % f)
887 916 elif not os.path.isfile(p):
888 917 self.ui.warn(_("%s not added: only files supported currently\n")
889 918 % f)
890 919 elif self.dirstate.state(f) in 'an':
891 920 self.ui.warn(_("%s already tracked!\n") % f)
892 921 else:
893 922 self.dirstate.update([f], "a")
894 923
895 924 def forget(self, list, wlock=None):
896 925 if not wlock:
897 926 wlock = self.wlock()
898 927 for f in list:
899 928 if self.dirstate.state(f) not in 'ai':
900 929 self.ui.warn(_("%s not added!\n") % f)
901 930 else:
902 931 self.dirstate.forget([f])
903 932
904 933 def remove(self, list, unlink=False, wlock=None):
905 934 if unlink:
906 935 for f in list:
907 936 try:
908 937 util.unlink(self.wjoin(f))
909 938 except OSError, inst:
910 939 if inst.errno != errno.ENOENT:
911 940 raise
912 941 if not wlock:
913 942 wlock = self.wlock()
914 943 for f in list:
915 944 p = self.wjoin(f)
916 945 if os.path.exists(p):
917 946 self.ui.warn(_("%s still exists!\n") % f)
918 947 elif self.dirstate.state(f) == 'a':
919 948 self.dirstate.forget([f])
920 949 elif f not in self.dirstate:
921 950 self.ui.warn(_("%s not tracked!\n") % f)
922 951 else:
923 952 self.dirstate.update([f], "r")
924 953
925 954 def undelete(self, list, wlock=None):
926 955 p = self.dirstate.parents()[0]
927 956 mn = self.changelog.read(p)[0]
928 957 m = self.manifest.read(mn)
929 958 if not wlock:
930 959 wlock = self.wlock()
931 960 for f in list:
932 961 if self.dirstate.state(f) not in "r":
933 962 self.ui.warn("%s not removed!\n" % f)
934 963 else:
935 964 t = self.file(f).read(m[f])
936 965 self.wwrite(f, t)
937 966 util.set_exec(self.wjoin(f), m.execf(f))
938 967 self.dirstate.update([f], "n")
939 968
940 969 def copy(self, source, dest, wlock=None):
941 970 p = self.wjoin(dest)
942 971 if not os.path.exists(p):
943 972 self.ui.warn(_("%s does not exist!\n") % dest)
944 973 elif not os.path.isfile(p):
945 974 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
946 975 else:
947 976 if not wlock:
948 977 wlock = self.wlock()
949 978 if self.dirstate.state(dest) == '?':
950 979 self.dirstate.update([dest], "a")
951 980 self.dirstate.copy(source, dest)
952 981
953 982 def heads(self, start=None):
954 983 heads = self.changelog.heads(start)
955 984 # sort the output in rev descending order
956 985 heads = [(-self.changelog.rev(h), h) for h in heads]
957 986 heads.sort()
958 987 return [n for (r, n) in heads]
959 988
960 989 # branchlookup returns a dict giving a list of branches for
961 990 # each head. A branch is defined as the tag of a node or
962 991 # the branch of the node's parents. If a node has multiple
963 992 # branch tags, tags are eliminated if they are visible from other
964 993 # branch tags.
965 994 #
966 995 # So, for this graph: a->b->c->d->e
967 996 # \ /
968 997 # aa -----/
969 998 # a has tag 2.6.12
970 999 # d has tag 2.6.13
971 1000 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
972 1001 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
973 1002 # from the list.
974 1003 #
975 1004 # It is possible that more than one head will have the same branch tag.
976 1005 # callers need to check the result for multiple heads under the same
977 1006 # branch tag if that is a problem for them (ie checkout of a specific
978 1007 # branch).
979 1008 #
980 1009 # passing in a specific branch will limit the depth of the search
981 1010 # through the parents. It won't limit the branches returned in the
982 1011 # result though.
983 1012 def branchlookup(self, heads=None, branch=None):
984 1013 if not heads:
985 1014 heads = self.heads()
986 1015 headt = [ h for h in heads ]
987 1016 chlog = self.changelog
988 1017 branches = {}
989 1018 merges = []
990 1019 seenmerge = {}
991 1020
992 1021 # traverse the tree once for each head, recording in the branches
993 1022 # dict which tags are visible from this head. The branches
994 1023 # dict also records which tags are visible from each tag
995 1024 # while we traverse.
996 1025 while headt or merges:
997 1026 if merges:
998 1027 n, found = merges.pop()
999 1028 visit = [n]
1000 1029 else:
1001 1030 h = headt.pop()
1002 1031 visit = [h]
1003 1032 found = [h]
1004 1033 seen = {}
1005 1034 while visit:
1006 1035 n = visit.pop()
1007 1036 if n in seen:
1008 1037 continue
1009 1038 pp = chlog.parents(n)
1010 1039 tags = self.nodetags(n)
1011 1040 if tags:
1012 1041 for x in tags:
1013 1042 if x == 'tip':
1014 1043 continue
1015 1044 for f in found:
1016 1045 branches.setdefault(f, {})[n] = 1
1017 1046 branches.setdefault(n, {})[n] = 1
1018 1047 break
1019 1048 if n not in found:
1020 1049 found.append(n)
1021 1050 if branch in tags:
1022 1051 continue
1023 1052 seen[n] = 1
1024 1053 if pp[1] != nullid and n not in seenmerge:
1025 1054 merges.append((pp[1], [x for x in found]))
1026 1055 seenmerge[n] = 1
1027 1056 if pp[0] != nullid:
1028 1057 visit.append(pp[0])
1029 1058 # traverse the branches dict, eliminating branch tags from each
1030 1059 # head that are visible from another branch tag for that head.
1031 1060 out = {}
1032 1061 viscache = {}
1033 1062 for h in heads:
1034 1063 def visible(node):
1035 1064 if node in viscache:
1036 1065 return viscache[node]
1037 1066 ret = {}
1038 1067 visit = [node]
1039 1068 while visit:
1040 1069 x = visit.pop()
1041 1070 if x in viscache:
1042 1071 ret.update(viscache[x])
1043 1072 elif x not in ret:
1044 1073 ret[x] = 1
1045 1074 if x in branches:
1046 1075 visit[len(visit):] = branches[x].keys()
1047 1076 viscache[node] = ret
1048 1077 return ret
1049 1078 if h not in branches:
1050 1079 continue
1051 1080 # O(n^2), but somewhat limited. This only searches the
1052 1081 # tags visible from a specific head, not all the tags in the
1053 1082 # whole repo.
1054 1083 for b in branches[h]:
1055 1084 vis = False
1056 1085 for bb in branches[h].keys():
1057 1086 if b != bb:
1058 1087 if b in visible(bb):
1059 1088 vis = True
1060 1089 break
1061 1090 if not vis:
1062 1091 l = out.setdefault(h, [])
1063 1092 l[len(l):] = self.nodetags(b)
1064 1093 return out
1065 1094
1066 1095 def branches(self, nodes):
1067 1096 if not nodes:
1068 1097 nodes = [self.changelog.tip()]
1069 1098 b = []
1070 1099 for n in nodes:
1071 1100 t = n
1072 1101 while 1:
1073 1102 p = self.changelog.parents(n)
1074 1103 if p[1] != nullid or p[0] == nullid:
1075 1104 b.append((t, n, p[0], p[1]))
1076 1105 break
1077 1106 n = p[0]
1078 1107 return b
1079 1108
1080 1109 def between(self, pairs):
1081 1110 r = []
1082 1111
1083 1112 for top, bottom in pairs:
1084 1113 n, l, i = top, [], 0
1085 1114 f = 1
1086 1115
1087 1116 while n != bottom:
1088 1117 p = self.changelog.parents(n)[0]
1089 1118 if i == f:
1090 1119 l.append(n)
1091 1120 f = f * 2
1092 1121 n = p
1093 1122 i += 1
1094 1123
1095 1124 r.append(l)
1096 1125
1097 1126 return r
1098 1127
1099 1128 def findincoming(self, remote, base=None, heads=None, force=False):
1100 1129 """Return list of roots of the subsets of missing nodes from remote
1101 1130
1102 1131 If base dict is specified, assume that these nodes and their parents
1103 1132 exist on the remote side and that no child of a node of base exists
1104 1133 in both remote and self.
1105 1134 Furthermore base will be updated to include the nodes that exists
1106 1135 in self and remote but no children exists in self and remote.
1107 1136 If a list of heads is specified, return only nodes which are heads
1108 1137 or ancestors of these heads.
1109 1138
1110 1139 All the ancestors of base are in self and in remote.
1111 1140 All the descendants of the list returned are missing in self.
1112 1141 (and so we know that the rest of the nodes are missing in remote, see
1113 1142 outgoing)
1114 1143 """
1115 1144 m = self.changelog.nodemap
1116 1145 search = []
1117 1146 fetch = {}
1118 1147 seen = {}
1119 1148 seenbranch = {}
1120 1149 if base == None:
1121 1150 base = {}
1122 1151
1123 1152 if not heads:
1124 1153 heads = remote.heads()
1125 1154
1126 1155 if self.changelog.tip() == nullid:
1127 1156 base[nullid] = 1
1128 1157 if heads != [nullid]:
1129 1158 return [nullid]
1130 1159 return []
1131 1160
1132 1161 # assume we're closer to the tip than the root
1133 1162 # and start by examining the heads
1134 1163 self.ui.status(_("searching for changes\n"))
1135 1164
1136 1165 unknown = []
1137 1166 for h in heads:
1138 1167 if h not in m:
1139 1168 unknown.append(h)
1140 1169 else:
1141 1170 base[h] = 1
1142 1171
1143 1172 if not unknown:
1144 1173 return []
1145 1174
1146 1175 req = dict.fromkeys(unknown)
1147 1176 reqcnt = 0
1148 1177
1149 1178 # search through remote branches
1150 1179 # a 'branch' here is a linear segment of history, with four parts:
1151 1180 # head, root, first parent, second parent
1152 1181 # (a branch always has two parents (or none) by definition)
1153 1182 unknown = remote.branches(unknown)
1154 1183 while unknown:
1155 1184 r = []
1156 1185 while unknown:
1157 1186 n = unknown.pop(0)
1158 1187 if n[0] in seen:
1159 1188 continue
1160 1189
1161 1190 self.ui.debug(_("examining %s:%s\n")
1162 1191 % (short(n[0]), short(n[1])))
1163 1192 if n[0] == nullid: # found the end of the branch
1164 1193 pass
1165 1194 elif n in seenbranch:
1166 1195 self.ui.debug(_("branch already found\n"))
1167 1196 continue
1168 1197 elif n[1] and n[1] in m: # do we know the base?
1169 1198 self.ui.debug(_("found incomplete branch %s:%s\n")
1170 1199 % (short(n[0]), short(n[1])))
1171 1200 search.append(n) # schedule branch range for scanning
1172 1201 seenbranch[n] = 1
1173 1202 else:
1174 1203 if n[1] not in seen and n[1] not in fetch:
1175 1204 if n[2] in m and n[3] in m:
1176 1205 self.ui.debug(_("found new changeset %s\n") %
1177 1206 short(n[1]))
1178 1207 fetch[n[1]] = 1 # earliest unknown
1179 1208 for p in n[2:4]:
1180 1209 if p in m:
1181 1210 base[p] = 1 # latest known
1182 1211
1183 1212 for p in n[2:4]:
1184 1213 if p not in req and p not in m:
1185 1214 r.append(p)
1186 1215 req[p] = 1
1187 1216 seen[n[0]] = 1
1188 1217
1189 1218 if r:
1190 1219 reqcnt += 1
1191 1220 self.ui.debug(_("request %d: %s\n") %
1192 1221 (reqcnt, " ".join(map(short, r))))
1193 1222 for p in xrange(0, len(r), 10):
1194 1223 for b in remote.branches(r[p:p+10]):
1195 1224 self.ui.debug(_("received %s:%s\n") %
1196 1225 (short(b[0]), short(b[1])))
1197 1226 unknown.append(b)
1198 1227
1199 1228 # do binary search on the branches we found
1200 1229 while search:
1201 1230 n = search.pop(0)
1202 1231 reqcnt += 1
1203 1232 l = remote.between([(n[0], n[1])])[0]
1204 1233 l.append(n[1])
1205 1234 p = n[0]
1206 1235 f = 1
1207 1236 for i in l:
1208 1237 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1209 1238 if i in m:
1210 1239 if f <= 2:
1211 1240 self.ui.debug(_("found new branch changeset %s\n") %
1212 1241 short(p))
1213 1242 fetch[p] = 1
1214 1243 base[i] = 1
1215 1244 else:
1216 1245 self.ui.debug(_("narrowed branch search to %s:%s\n")
1217 1246 % (short(p), short(i)))
1218 1247 search.append((p, i))
1219 1248 break
1220 1249 p, f = i, f * 2
1221 1250
1222 1251 # sanity check our fetch list
1223 1252 for f in fetch.keys():
1224 1253 if f in m:
1225 1254 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1226 1255
1227 1256 if base.keys() == [nullid]:
1228 1257 if force:
1229 1258 self.ui.warn(_("warning: repository is unrelated\n"))
1230 1259 else:
1231 1260 raise util.Abort(_("repository is unrelated"))
1232 1261
1233 1262 self.ui.debug(_("found new changesets starting at ") +
1234 1263 " ".join([short(f) for f in fetch]) + "\n")
1235 1264
1236 1265 self.ui.debug(_("%d total queries\n") % reqcnt)
1237 1266
1238 1267 return fetch.keys()
1239 1268
1240 1269 def findoutgoing(self, remote, base=None, heads=None, force=False):
1241 1270 """Return list of nodes that are roots of subsets not in remote
1242 1271
1243 1272 If base dict is specified, assume that these nodes and their parents
1244 1273 exist on the remote side.
1245 1274 If a list of heads is specified, return only nodes which are heads
1246 1275 or ancestors of these heads, and return a second element which
1247 1276 contains all remote heads which get new children.
1248 1277 """
1249 1278 if base == None:
1250 1279 base = {}
1251 1280 self.findincoming(remote, base, heads, force=force)
1252 1281
1253 1282 self.ui.debug(_("common changesets up to ")
1254 1283 + " ".join(map(short, base.keys())) + "\n")
1255 1284
1256 1285 remain = dict.fromkeys(self.changelog.nodemap)
1257 1286
1258 1287 # prune everything remote has from the tree
1259 1288 del remain[nullid]
1260 1289 remove = base.keys()
1261 1290 while remove:
1262 1291 n = remove.pop(0)
1263 1292 if n in remain:
1264 1293 del remain[n]
1265 1294 for p in self.changelog.parents(n):
1266 1295 remove.append(p)
1267 1296
1268 1297 # find every node whose parents have been pruned
1269 1298 subset = []
1270 1299 # find every remote head that will get new children
1271 1300 updated_heads = {}
1272 1301 for n in remain:
1273 1302 p1, p2 = self.changelog.parents(n)
1274 1303 if p1 not in remain and p2 not in remain:
1275 1304 subset.append(n)
1276 1305 if heads:
1277 1306 if p1 in heads:
1278 1307 updated_heads[p1] = True
1279 1308 if p2 in heads:
1280 1309 updated_heads[p2] = True
1281 1310
1282 1311 # this is the set of all roots we have to push
1283 1312 if heads:
1284 1313 return subset, updated_heads.keys()
1285 1314 else:
1286 1315 return subset
1287 1316
1288 1317 def pull(self, remote, heads=None, force=False, lock=None):
1289 1318 mylock = False
1290 1319 if not lock:
1291 1320 lock = self.lock()
1292 1321 mylock = True
1293 1322
1294 1323 try:
1295 1324 fetch = self.findincoming(remote, force=force)
1296 1325 if fetch == [nullid]:
1297 1326 self.ui.status(_("requesting all changes\n"))
1298 1327
1299 1328 if not fetch:
1300 1329 self.ui.status(_("no changes found\n"))
1301 1330 return 0
1302 1331
1303 1332 if heads is None:
1304 1333 cg = remote.changegroup(fetch, 'pull')
1305 1334 else:
1306 1335 if 'changegroupsubset' not in remote.capabilities:
1307 1336 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1308 1337 cg = remote.changegroupsubset(fetch, heads, 'pull')
1309 1338 return self.addchangegroup(cg, 'pull', remote.url())
1310 1339 finally:
1311 1340 if mylock:
1312 1341 lock.release()
1313 1342
1314 1343 def push(self, remote, force=False, revs=None):
1315 1344 # there are two ways to push to remote repo:
1316 1345 #
1317 1346 # addchangegroup assumes local user can lock remote
1318 1347 # repo (local filesystem, old ssh servers).
1319 1348 #
1320 1349 # unbundle assumes local user cannot lock remote repo (new ssh
1321 1350 # servers, http servers).
1322 1351
1323 1352 if remote.capable('unbundle'):
1324 1353 return self.push_unbundle(remote, force, revs)
1325 1354 return self.push_addchangegroup(remote, force, revs)
1326 1355
1327 1356 def prepush(self, remote, force, revs):
1328 1357 base = {}
1329 1358 remote_heads = remote.heads()
1330 1359 inc = self.findincoming(remote, base, remote_heads, force=force)
1331 1360
1332 1361 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1333 1362 if revs is not None:
1334 1363 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1335 1364 else:
1336 1365 bases, heads = update, self.changelog.heads()
1337 1366
1338 1367 if not bases:
1339 1368 self.ui.status(_("no changes found\n"))
1340 1369 return None, 1
1341 1370 elif not force:
1342 1371 # check if we're creating new remote heads
1343 1372 # to be a remote head after push, node must be either
1344 1373 # - unknown locally
1345 1374 # - a local outgoing head descended from update
1346 1375 # - a remote head that's known locally and not
1347 1376 # ancestral to an outgoing head
1348 1377
1349 1378 warn = 0
1350 1379
1351 1380 if remote_heads == [nullid]:
1352 1381 warn = 0
1353 1382 elif not revs and len(heads) > len(remote_heads):
1354 1383 warn = 1
1355 1384 else:
1356 1385 newheads = list(heads)
1357 1386 for r in remote_heads:
1358 1387 if r in self.changelog.nodemap:
1359 1388 desc = self.changelog.heads(r)
1360 1389 l = [h for h in heads if h in desc]
1361 1390 if not l:
1362 1391 newheads.append(r)
1363 1392 else:
1364 1393 newheads.append(r)
1365 1394 if len(newheads) > len(remote_heads):
1366 1395 warn = 1
1367 1396
1368 1397 if warn:
1369 1398 self.ui.warn(_("abort: push creates new remote branches!\n"))
1370 1399 self.ui.status(_("(did you forget to merge?"
1371 1400 " use push -f to force)\n"))
1372 1401 return None, 1
1373 1402 elif inc:
1374 1403 self.ui.warn(_("note: unsynced remote changes!\n"))
1375 1404
1376 1405
1377 1406 if revs is None:
1378 1407 cg = self.changegroup(update, 'push')
1379 1408 else:
1380 1409 cg = self.changegroupsubset(update, revs, 'push')
1381 1410 return cg, remote_heads
1382 1411
1383 1412 def push_addchangegroup(self, remote, force, revs):
1384 1413 lock = remote.lock()
1385 1414
1386 1415 ret = self.prepush(remote, force, revs)
1387 1416 if ret[0] is not None:
1388 1417 cg, remote_heads = ret
1389 1418 return remote.addchangegroup(cg, 'push', self.url())
1390 1419 return ret[1]
1391 1420
1392 1421 def push_unbundle(self, remote, force, revs):
1393 1422 # local repo finds heads on server, finds out what revs it
1394 1423 # must push. once revs transferred, if server finds it has
1395 1424 # different heads (someone else won commit/push race), server
1396 1425 # aborts.
1397 1426
1398 1427 ret = self.prepush(remote, force, revs)
1399 1428 if ret[0] is not None:
1400 1429 cg, remote_heads = ret
1401 1430 if force: remote_heads = ['force']
1402 1431 return remote.unbundle(cg, remote_heads, 'push')
1403 1432 return ret[1]
1404 1433
1405 1434 def changegroupinfo(self, nodes):
1406 1435 self.ui.note(_("%d changesets found\n") % len(nodes))
1407 1436 if self.ui.debugflag:
1408 1437 self.ui.debug(_("List of changesets:\n"))
1409 1438 for node in nodes:
1410 1439 self.ui.debug("%s\n" % hex(node))
1411 1440
1412 1441 def changegroupsubset(self, bases, heads, source):
1413 1442 """This function generates a changegroup consisting of all the nodes
1414 1443 that are descendents of any of the bases, and ancestors of any of
1415 1444 the heads.
1416 1445
1417 1446 It is fairly complex as determining which filenodes and which
1418 1447 manifest nodes need to be included for the changeset to be complete
1419 1448 is non-trivial.
1420 1449
1421 1450 Another wrinkle is doing the reverse, figuring out which changeset in
1422 1451 the changegroup a particular filenode or manifestnode belongs to."""
1423 1452
1424 1453 self.hook('preoutgoing', throw=True, source=source)
1425 1454
1426 1455 # Set up some initial variables
1427 1456 # Make it easy to refer to self.changelog
1428 1457 cl = self.changelog
1429 1458 # msng is short for missing - compute the list of changesets in this
1430 1459 # changegroup.
1431 1460 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1432 1461 self.changegroupinfo(msng_cl_lst)
1433 1462 # Some bases may turn out to be superfluous, and some heads may be
1434 1463 # too. nodesbetween will return the minimal set of bases and heads
1435 1464 # necessary to re-create the changegroup.
1436 1465
1437 1466 # Known heads are the list of heads that it is assumed the recipient
1438 1467 # of this changegroup will know about.
1439 1468 knownheads = {}
1440 1469 # We assume that all parents of bases are known heads.
1441 1470 for n in bases:
1442 1471 for p in cl.parents(n):
1443 1472 if p != nullid:
1444 1473 knownheads[p] = 1
1445 1474 knownheads = knownheads.keys()
1446 1475 if knownheads:
1447 1476 # Now that we know what heads are known, we can compute which
1448 1477 # changesets are known. The recipient must know about all
1449 1478 # changesets required to reach the known heads from the null
1450 1479 # changeset.
1451 1480 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1452 1481 junk = None
1453 1482 # Transform the list into an ersatz set.
1454 1483 has_cl_set = dict.fromkeys(has_cl_set)
1455 1484 else:
1456 1485 # If there were no known heads, the recipient cannot be assumed to
1457 1486 # know about any changesets.
1458 1487 has_cl_set = {}
1459 1488
1460 1489 # Make it easy to refer to self.manifest
1461 1490 mnfst = self.manifest
1462 1491 # We don't know which manifests are missing yet
1463 1492 msng_mnfst_set = {}
1464 1493 # Nor do we know which filenodes are missing.
1465 1494 msng_filenode_set = {}
1466 1495
1467 1496 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1468 1497 junk = None
1469 1498
1470 1499 # A changeset always belongs to itself, so the changenode lookup
1471 1500 # function for a changenode is identity.
1472 1501 def identity(x):
1473 1502 return x
1474 1503
1475 1504 # A function generating function. Sets up an environment for the
1476 1505 # inner function.
1477 1506 def cmp_by_rev_func(revlog):
1478 1507 # Compare two nodes by their revision number in the environment's
1479 1508 # revision history. Since the revision number both represents the
1480 1509 # most efficient order to read the nodes in, and represents a
1481 1510 # topological sorting of the nodes, this function is often useful.
1482 1511 def cmp_by_rev(a, b):
1483 1512 return cmp(revlog.rev(a), revlog.rev(b))
1484 1513 return cmp_by_rev
1485 1514
1486 1515 # If we determine that a particular file or manifest node must be a
1487 1516 # node that the recipient of the changegroup will already have, we can
1488 1517 # also assume the recipient will have all the parents. This function
1489 1518 # prunes them from the set of missing nodes.
1490 1519 def prune_parents(revlog, hasset, msngset):
1491 1520 haslst = hasset.keys()
1492 1521 haslst.sort(cmp_by_rev_func(revlog))
1493 1522 for node in haslst:
1494 1523 parentlst = [p for p in revlog.parents(node) if p != nullid]
1495 1524 while parentlst:
1496 1525 n = parentlst.pop()
1497 1526 if n not in hasset:
1498 1527 hasset[n] = 1
1499 1528 p = [p for p in revlog.parents(n) if p != nullid]
1500 1529 parentlst.extend(p)
1501 1530 for n in hasset:
1502 1531 msngset.pop(n, None)
1503 1532
1504 1533 # This is a function generating function used to set up an environment
1505 1534 # for the inner function to execute in.
1506 1535 def manifest_and_file_collector(changedfileset):
1507 1536 # This is an information gathering function that gathers
1508 1537 # information from each changeset node that goes out as part of
1509 1538 # the changegroup. The information gathered is a list of which
1510 1539 # manifest nodes are potentially required (the recipient may
1511 1540 # already have them) and total list of all files which were
1512 1541 # changed in any changeset in the changegroup.
1513 1542 #
1514 1543 # We also remember the first changenode we saw any manifest
1515 1544 # referenced by so we can later determine which changenode 'owns'
1516 1545 # the manifest.
1517 1546 def collect_manifests_and_files(clnode):
1518 1547 c = cl.read(clnode)
1519 1548 for f in c[3]:
1520 1549 # This is to make sure we only have one instance of each
1521 1550 # filename string for each filename.
1522 1551 changedfileset.setdefault(f, f)
1523 1552 msng_mnfst_set.setdefault(c[0], clnode)
1524 1553 return collect_manifests_and_files
1525 1554
1526 1555 # Figure out which manifest nodes (of the ones we think might be part
1527 1556 # of the changegroup) the recipient must know about and remove them
1528 1557 # from the changegroup.
1529 1558 def prune_manifests():
1530 1559 has_mnfst_set = {}
1531 1560 for n in msng_mnfst_set:
1532 1561 # If a 'missing' manifest thinks it belongs to a changenode
1533 1562 # the recipient is assumed to have, obviously the recipient
1534 1563 # must have that manifest.
1535 1564 linknode = cl.node(mnfst.linkrev(n))
1536 1565 if linknode in has_cl_set:
1537 1566 has_mnfst_set[n] = 1
1538 1567 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1539 1568
1540 1569 # Use the information collected in collect_manifests_and_files to say
1541 1570 # which changenode any manifestnode belongs to.
1542 1571 def lookup_manifest_link(mnfstnode):
1543 1572 return msng_mnfst_set[mnfstnode]
1544 1573
1545 1574 # A function generating function that sets up the initial environment
1546 1575 # the inner function.
1547 1576 def filenode_collector(changedfiles):
1548 1577 next_rev = [0]
1549 1578 # This gathers information from each manifestnode included in the
1550 1579 # changegroup about which filenodes the manifest node references
1551 1580 # so we can include those in the changegroup too.
1552 1581 #
1553 1582 # It also remembers which changenode each filenode belongs to. It
1554 1583 # does this by assuming the a filenode belongs to the changenode
1555 1584 # the first manifest that references it belongs to.
1556 1585 def collect_msng_filenodes(mnfstnode):
1557 1586 r = mnfst.rev(mnfstnode)
1558 1587 if r == next_rev[0]:
1559 1588 # If the last rev we looked at was the one just previous,
1560 1589 # we only need to see a diff.
1561 1590 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1562 1591 # For each line in the delta
1563 1592 for dline in delta.splitlines():
1564 1593 # get the filename and filenode for that line
1565 1594 f, fnode = dline.split('\0')
1566 1595 fnode = bin(fnode[:40])
1567 1596 f = changedfiles.get(f, None)
1568 1597 # And if the file is in the list of files we care
1569 1598 # about.
1570 1599 if f is not None:
1571 1600 # Get the changenode this manifest belongs to
1572 1601 clnode = msng_mnfst_set[mnfstnode]
1573 1602 # Create the set of filenodes for the file if
1574 1603 # there isn't one already.
1575 1604 ndset = msng_filenode_set.setdefault(f, {})
1576 1605 # And set the filenode's changelog node to the
1577 1606 # manifest's if it hasn't been set already.
1578 1607 ndset.setdefault(fnode, clnode)
1579 1608 else:
1580 1609 # Otherwise we need a full manifest.
1581 1610 m = mnfst.read(mnfstnode)
1582 1611 # For every file in we care about.
1583 1612 for f in changedfiles:
1584 1613 fnode = m.get(f, None)
1585 1614 # If it's in the manifest
1586 1615 if fnode is not None:
1587 1616 # See comments above.
1588 1617 clnode = msng_mnfst_set[mnfstnode]
1589 1618 ndset = msng_filenode_set.setdefault(f, {})
1590 1619 ndset.setdefault(fnode, clnode)
1591 1620 # Remember the revision we hope to see next.
1592 1621 next_rev[0] = r + 1
1593 1622 return collect_msng_filenodes
1594 1623
1595 1624 # We have a list of filenodes we think we need for a file, lets remove
1596 1625 # all those we now the recipient must have.
1597 1626 def prune_filenodes(f, filerevlog):
1598 1627 msngset = msng_filenode_set[f]
1599 1628 hasset = {}
1600 1629 # If a 'missing' filenode thinks it belongs to a changenode we
1601 1630 # assume the recipient must have, then the recipient must have
1602 1631 # that filenode.
1603 1632 for n in msngset:
1604 1633 clnode = cl.node(filerevlog.linkrev(n))
1605 1634 if clnode in has_cl_set:
1606 1635 hasset[n] = 1
1607 1636 prune_parents(filerevlog, hasset, msngset)
1608 1637
1609 1638 # A function generator function that sets up the a context for the
1610 1639 # inner function.
1611 1640 def lookup_filenode_link_func(fname):
1612 1641 msngset = msng_filenode_set[fname]
1613 1642 # Lookup the changenode the filenode belongs to.
1614 1643 def lookup_filenode_link(fnode):
1615 1644 return msngset[fnode]
1616 1645 return lookup_filenode_link
1617 1646
1618 1647 # Now that we have all theses utility functions to help out and
1619 1648 # logically divide up the task, generate the group.
1620 1649 def gengroup():
1621 1650 # The set of changed files starts empty.
1622 1651 changedfiles = {}
1623 1652 # Create a changenode group generator that will call our functions
1624 1653 # back to lookup the owning changenode and collect information.
1625 1654 group = cl.group(msng_cl_lst, identity,
1626 1655 manifest_and_file_collector(changedfiles))
1627 1656 for chnk in group:
1628 1657 yield chnk
1629 1658
1630 1659 # The list of manifests has been collected by the generator
1631 1660 # calling our functions back.
1632 1661 prune_manifests()
1633 1662 msng_mnfst_lst = msng_mnfst_set.keys()
1634 1663 # Sort the manifestnodes by revision number.
1635 1664 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1636 1665 # Create a generator for the manifestnodes that calls our lookup
1637 1666 # and data collection functions back.
1638 1667 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1639 1668 filenode_collector(changedfiles))
1640 1669 for chnk in group:
1641 1670 yield chnk
1642 1671
1643 1672 # These are no longer needed, dereference and toss the memory for
1644 1673 # them.
1645 1674 msng_mnfst_lst = None
1646 1675 msng_mnfst_set.clear()
1647 1676
1648 1677 changedfiles = changedfiles.keys()
1649 1678 changedfiles.sort()
1650 1679 # Go through all our files in order sorted by name.
1651 1680 for fname in changedfiles:
1652 1681 filerevlog = self.file(fname)
1653 1682 # Toss out the filenodes that the recipient isn't really
1654 1683 # missing.
1655 1684 if msng_filenode_set.has_key(fname):
1656 1685 prune_filenodes(fname, filerevlog)
1657 1686 msng_filenode_lst = msng_filenode_set[fname].keys()
1658 1687 else:
1659 1688 msng_filenode_lst = []
1660 1689 # If any filenodes are left, generate the group for them,
1661 1690 # otherwise don't bother.
1662 1691 if len(msng_filenode_lst) > 0:
1663 1692 yield changegroup.genchunk(fname)
1664 1693 # Sort the filenodes by their revision #
1665 1694 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1666 1695 # Create a group generator and only pass in a changenode
1667 1696 # lookup function as we need to collect no information
1668 1697 # from filenodes.
1669 1698 group = filerevlog.group(msng_filenode_lst,
1670 1699 lookup_filenode_link_func(fname))
1671 1700 for chnk in group:
1672 1701 yield chnk
1673 1702 if msng_filenode_set.has_key(fname):
1674 1703 # Don't need this anymore, toss it to free memory.
1675 1704 del msng_filenode_set[fname]
1676 1705 # Signal that no more groups are left.
1677 1706 yield changegroup.closechunk()
1678 1707
1679 1708 if msng_cl_lst:
1680 1709 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1681 1710
1682 1711 return util.chunkbuffer(gengroup())
1683 1712
1684 1713 def changegroup(self, basenodes, source):
1685 1714 """Generate a changegroup of all nodes that we have that a recipient
1686 1715 doesn't.
1687 1716
1688 1717 This is much easier than the previous function as we can assume that
1689 1718 the recipient has any changenode we aren't sending them."""
1690 1719
1691 1720 self.hook('preoutgoing', throw=True, source=source)
1692 1721
1693 1722 cl = self.changelog
1694 1723 nodes = cl.nodesbetween(basenodes, None)[0]
1695 1724 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1696 1725 self.changegroupinfo(nodes)
1697 1726
1698 1727 def identity(x):
1699 1728 return x
1700 1729
1701 1730 def gennodelst(revlog):
1702 1731 for r in xrange(0, revlog.count()):
1703 1732 n = revlog.node(r)
1704 1733 if revlog.linkrev(n) in revset:
1705 1734 yield n
1706 1735
1707 1736 def changed_file_collector(changedfileset):
1708 1737 def collect_changed_files(clnode):
1709 1738 c = cl.read(clnode)
1710 1739 for fname in c[3]:
1711 1740 changedfileset[fname] = 1
1712 1741 return collect_changed_files
1713 1742
1714 1743 def lookuprevlink_func(revlog):
1715 1744 def lookuprevlink(n):
1716 1745 return cl.node(revlog.linkrev(n))
1717 1746 return lookuprevlink
1718 1747
1719 1748 def gengroup():
1720 1749 # construct a list of all changed files
1721 1750 changedfiles = {}
1722 1751
1723 1752 for chnk in cl.group(nodes, identity,
1724 1753 changed_file_collector(changedfiles)):
1725 1754 yield chnk
1726 1755 changedfiles = changedfiles.keys()
1727 1756 changedfiles.sort()
1728 1757
1729 1758 mnfst = self.manifest
1730 1759 nodeiter = gennodelst(mnfst)
1731 1760 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1732 1761 yield chnk
1733 1762
1734 1763 for fname in changedfiles:
1735 1764 filerevlog = self.file(fname)
1736 1765 nodeiter = gennodelst(filerevlog)
1737 1766 nodeiter = list(nodeiter)
1738 1767 if nodeiter:
1739 1768 yield changegroup.genchunk(fname)
1740 1769 lookup = lookuprevlink_func(filerevlog)
1741 1770 for chnk in filerevlog.group(nodeiter, lookup):
1742 1771 yield chnk
1743 1772
1744 1773 yield changegroup.closechunk()
1745 1774
1746 1775 if nodes:
1747 1776 self.hook('outgoing', node=hex(nodes[0]), source=source)
1748 1777
1749 1778 return util.chunkbuffer(gengroup())
1750 1779
1751 1780 def addchangegroup(self, source, srctype, url):
1752 1781 """add changegroup to repo.
1753 1782
1754 1783 return values:
1755 1784 - nothing changed or no source: 0
1756 1785 - more heads than before: 1+added heads (2..n)
1757 1786 - less heads than before: -1-removed heads (-2..-n)
1758 1787 - number of heads stays the same: 1
1759 1788 """
1760 1789 def csmap(x):
1761 1790 self.ui.debug(_("add changeset %s\n") % short(x))
1762 1791 return cl.count()
1763 1792
1764 1793 def revmap(x):
1765 1794 return cl.rev(x)
1766 1795
1767 1796 if not source:
1768 1797 return 0
1769 1798
1770 1799 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1771 1800
1772 1801 changesets = files = revisions = 0
1773 1802
1774 1803 tr = self.transaction()
1775 1804
1776 1805 # write changelog data to temp files so concurrent readers will not see
1777 1806 # inconsistent view
1778 1807 cl = None
1779 1808 try:
1780 1809 cl = appendfile.appendchangelog(self.sopener,
1781 1810 self.changelog.version)
1782 1811
1783 1812 oldheads = len(cl.heads())
1784 1813
1785 1814 # pull off the changeset group
1786 1815 self.ui.status(_("adding changesets\n"))
1787 1816 cor = cl.count() - 1
1788 1817 chunkiter = changegroup.chunkiter(source)
1789 1818 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1790 1819 raise util.Abort(_("received changelog group is empty"))
1791 1820 cnr = cl.count() - 1
1792 1821 changesets = cnr - cor
1793 1822
1794 1823 # pull off the manifest group
1795 1824 self.ui.status(_("adding manifests\n"))
1796 1825 chunkiter = changegroup.chunkiter(source)
1797 1826 # no need to check for empty manifest group here:
1798 1827 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1799 1828 # no new manifest will be created and the manifest group will
1800 1829 # be empty during the pull
1801 1830 self.manifest.addgroup(chunkiter, revmap, tr)
1802 1831
1803 1832 # process the files
1804 1833 self.ui.status(_("adding file changes\n"))
1805 1834 while 1:
1806 1835 f = changegroup.getchunk(source)
1807 1836 if not f:
1808 1837 break
1809 1838 self.ui.debug(_("adding %s revisions\n") % f)
1810 1839 fl = self.file(f)
1811 1840 o = fl.count()
1812 1841 chunkiter = changegroup.chunkiter(source)
1813 1842 if fl.addgroup(chunkiter, revmap, tr) is None:
1814 1843 raise util.Abort(_("received file revlog group is empty"))
1815 1844 revisions += fl.count() - o
1816 1845 files += 1
1817 1846
1818 1847 cl.writedata()
1819 1848 finally:
1820 1849 if cl:
1821 1850 cl.cleanup()
1822 1851
1823 1852 # make changelog see real files again
1824 1853 self.changelog = changelog.changelog(self.sopener,
1825 1854 self.changelog.version)
1826 1855 self.changelog.checkinlinesize(tr)
1827 1856
1828 1857 newheads = len(self.changelog.heads())
1829 1858 heads = ""
1830 1859 if oldheads and newheads != oldheads:
1831 1860 heads = _(" (%+d heads)") % (newheads - oldheads)
1832 1861
1833 1862 self.ui.status(_("added %d changesets"
1834 1863 " with %d changes to %d files%s\n")
1835 1864 % (changesets, revisions, files, heads))
1836 1865
1837 1866 if changesets > 0:
1838 1867 self.hook('pretxnchangegroup', throw=True,
1839 1868 node=hex(self.changelog.node(cor+1)), source=srctype,
1840 1869 url=url)
1841 1870
1842 1871 tr.close()
1843 1872
1844 1873 if changesets > 0:
1845 1874 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1846 1875 source=srctype, url=url)
1847 1876
1848 1877 for i in xrange(cor + 1, cnr + 1):
1849 1878 self.hook("incoming", node=hex(self.changelog.node(i)),
1850 1879 source=srctype, url=url)
1851 1880
1852 1881 # never return 0 here:
1853 1882 if newheads < oldheads:
1854 1883 return newheads - oldheads - 1
1855 1884 else:
1856 1885 return newheads - oldheads + 1
1857 1886
1858 1887
1859 1888 def stream_in(self, remote):
1860 1889 fp = remote.stream_out()
1861 1890 l = fp.readline()
1862 1891 try:
1863 1892 resp = int(l)
1864 1893 except ValueError:
1865 1894 raise util.UnexpectedOutput(
1866 1895 _('Unexpected response from remote server:'), l)
1867 1896 if resp == 1:
1868 1897 raise util.Abort(_('operation forbidden by server'))
1869 1898 elif resp == 2:
1870 1899 raise util.Abort(_('locking the remote repository failed'))
1871 1900 elif resp != 0:
1872 1901 raise util.Abort(_('the server sent an unknown error code'))
1873 1902 self.ui.status(_('streaming all changes\n'))
1874 1903 l = fp.readline()
1875 1904 try:
1876 1905 total_files, total_bytes = map(int, l.split(' ', 1))
1877 1906 except ValueError, TypeError:
1878 1907 raise util.UnexpectedOutput(
1879 1908 _('Unexpected response from remote server:'), l)
1880 1909 self.ui.status(_('%d files to transfer, %s of data\n') %
1881 1910 (total_files, util.bytecount(total_bytes)))
1882 1911 start = time.time()
1883 1912 for i in xrange(total_files):
1884 1913 # XXX doesn't support '\n' or '\r' in filenames
1885 1914 l = fp.readline()
1886 1915 try:
1887 1916 name, size = l.split('\0', 1)
1888 1917 size = int(size)
1889 1918 except ValueError, TypeError:
1890 1919 raise util.UnexpectedOutput(
1891 1920 _('Unexpected response from remote server:'), l)
1892 1921 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1893 1922 ofp = self.sopener(name, 'w')
1894 1923 for chunk in util.filechunkiter(fp, limit=size):
1895 1924 ofp.write(chunk)
1896 1925 ofp.close()
1897 1926 elapsed = time.time() - start
1898 1927 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1899 1928 (util.bytecount(total_bytes), elapsed,
1900 1929 util.bytecount(total_bytes / elapsed)))
1901 1930 self.reload()
1902 1931 return len(self.heads()) + 1
1903 1932
1904 1933 def clone(self, remote, heads=[], stream=False):
1905 1934 '''clone remote repository.
1906 1935
1907 1936 keyword arguments:
1908 1937 heads: list of revs to clone (forces use of pull)
1909 1938 stream: use streaming clone if possible'''
1910 1939
1911 1940 # now, all clients that can request uncompressed clones can
1912 1941 # read repo formats supported by all servers that can serve
1913 1942 # them.
1914 1943
1915 1944 # if revlog format changes, client will have to check version
1916 1945 # and format flags on "stream" capability, and use
1917 1946 # uncompressed only if compatible.
1918 1947
1919 1948 if stream and not heads and remote.capable('stream'):
1920 1949 return self.stream_in(remote)
1921 1950 return self.pull(remote, heads)
1922 1951
1923 1952 # used to avoid circular references so destructors work
1924 1953 def aftertrans(files):
1925 1954 renamefiles = [tuple(t) for t in files]
1926 1955 def a():
1927 1956 for src, dest in renamefiles:
1928 1957 util.rename(src, dest)
1929 1958 return a
1930 1959
1931 1960 def instance(ui, path, create):
1932 1961 return localrepository(ui, util.drop_scheme('file', path), create)
1933 1962
1934 1963 def islocal(path):
1935 1964 return True
@@ -1,66 +1,86 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from demandload import *
11 11 from i18n import gettext as _
12 12 demandload(globals(), "changelog filelog httprangereader")
13 13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14 14
15 15 class rangereader(httprangereader.httprangereader):
16 16 def read(self, size=None):
17 17 try:
18 18 return httprangereader.httprangereader.read(self, size)
19 19 except urllib2.HTTPError, inst:
20 20 raise IOError(None, inst)
21 21 except urllib2.URLError, inst:
22 22 raise IOError(None, inst.reason[1])
23 23
24 24 def opener(base):
25 25 """return a function that opens files over http"""
26 26 p = base
27 27 def o(path, mode="r"):
28 28 f = "/".join((p, urllib.quote(path)))
29 29 return rangereader(f)
30 30 return o
31 31
32 32 class statichttprepository(localrepo.localrepository):
33 33 def __init__(self, ui, path):
34 34 self._url = path
35 self.path = (path + "/.hg")
36 self.spath = self.path
37 35 self.ui = ui
38 36 self.revlogversion = 0
37
38 self.path = (path + "/.hg")
39 39 self.opener = opener(self.path)
40 self.sopener = opener(self.spath)
40 # find requirements
41 try:
42 requirements = self.opener("requires").read().splitlines()
43 except IOError:
44 requirements = []
45 # check them
46 for r in requirements:
47 if r not in self.supported:
48 raise repo.RepoError(_("requirement '%s' not supported") % r)
49
50 # setup store
51 if "store" in requirements:
52 self.encodefn = util.encodefilename
53 self.decodefn = util.decodefilename
54 self.spath = self.path + "/store"
55 else:
56 self.encodefn = lambda x: x
57 self.decodefn = lambda x: x
58 self.spath = self.path
59 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
60
41 61 self.manifest = manifest.manifest(self.sopener)
42 62 self.changelog = changelog.changelog(self.sopener)
43 63 self.tagscache = None
44 64 self.nodetagscache = None
45 65 self.encodepats = None
46 66 self.decodepats = None
47 67
48 68 def url(self):
49 69 return 'static-' + self._url
50 70
51 71 def dev(self):
52 72 return -1
53 73
54 74 def local(self):
55 75 return False
56 76
57 77 def instance(ui, path, create):
58 78 if create:
59 79 raise util.Abort(_('cannot create new static-http repository'))
60 80 if path.startswith('old-http:'):
61 81 ui.warn(_("old-http:// syntax is deprecated, "
62 82 "please use static-http:// instead\n"))
63 83 path = path[4:]
64 84 else:
65 85 path = path[7:]
66 86 return statichttprepository(ui, path)
@@ -1,95 +1,97 b''
1 1 # streamclone.py - streaming clone server support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from i18n import gettext as _
10 10 demandload(globals(), "os stat util lock")
11 11
12 12 # if server supports streaming clone, it advertises "stream"
13 13 # capability with value that is version+flags of repo it is serving.
14 14 # client only streams if it can read that repo format.
15 15
16 16 def walkrepo(root):
17 17 '''iterate over metadata files in repository.
18 18 walk in natural (sorted) order.
19 19 yields 2-tuples: name of .d or .i file, size of file.'''
20 20
21 21 strip_count = len(root) + len(os.sep)
22 22 def walk(path, recurse):
23 23 ents = os.listdir(path)
24 24 ents.sort()
25 25 for e in ents:
26 26 pe = os.path.join(path, e)
27 27 st = os.lstat(pe)
28 28 if stat.S_ISDIR(st.st_mode):
29 29 if recurse:
30 30 for x in walk(pe, True):
31 31 yield x
32 32 else:
33 33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
34 34 continue
35 35 sfx = e[-2:]
36 36 if sfx in ('.d', '.i'):
37 37 yield pe[strip_count:], st.st_size
38 38 # write file data first
39 39 for x in walk(os.path.join(root, 'data'), True):
40 40 yield x
41 41 # write manifest before changelog
42 42 meta = list(walk(root, False))
43 43 meta.sort()
44 44 meta.reverse()
45 45 for x in meta:
46 46 yield x
47 47
48 48 # stream file format is simple.
49 49 #
50 50 # server writes out line that says how many files, how many total
51 51 # bytes. separator is ascii space, byte counts are strings.
52 52 #
53 53 # then for each file:
54 54 #
55 55 # server writes out line that says file name, how many bytes in
56 56 # file. separator is ascii nul, byte count is string.
57 57 #
58 58 # server writes out raw file data.
59 59
60 60 def stream_out(repo, fileobj):
61 61 '''stream out all metadata files in repository.
62 62 writes to file-like object, must support write() and optional flush().'''
63 63
64 64 if not repo.ui.configbool('server', 'uncompressed'):
65 65 fileobj.write('1\n')
66 66 return
67 67
68 68 # get consistent snapshot of repo. lock during scan so lock not
69 69 # needed while we stream, and commits can happen.
70 70 try:
71 71 repolock = repo.lock()
72 72 except (lock.LockHeld, lock.LockUnavailable), inst:
73 73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
74 74 fileobj.write('2\n')
75 75 return
76 76
77 77 fileobj.write('0\n')
78 78 repo.ui.debug('scanning\n')
79 79 entries = []
80 80 total_bytes = 0
81 81 for name, size in walkrepo(repo.spath):
82 if repo.decodefn:
83 name = repo.decodefn(name)
82 84 entries.append((name, size))
83 85 total_bytes += size
84 86 repolock.release()
85 87
86 88 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 89 (len(entries), total_bytes))
88 90 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 91 for name, size in entries:
90 92 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 93 fileobj.write('%s\0%d\n' % (name, size))
92 94 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 95 fileobj.write(chunk)
94 96 flush = getattr(fileobj, 'flush', None)
95 97 if flush: flush()
@@ -1,1290 +1,1322 b''
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import gettext as _
16 16 from demandload import *
17 17 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
18 18 demandload(globals(), "os threading time calendar ConfigParser locale")
19 19
20 20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
21 21 or "ascii"
22 22 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
23 23 _fallbackencoding = 'ISO-8859-1'
24 24
25 25 def tolocal(s):
26 26 """
27 27 Convert a string from internal UTF-8 to local encoding
28 28
29 29 All internal strings should be UTF-8 but some repos before the
30 30 implementation of locale support may contain latin1 or possibly
31 31 other character sets. We attempt to decode everything strictly
32 32 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
33 33 replace unknown characters.
34 34 """
35 35 for e in ('UTF-8', _fallbackencoding):
36 36 try:
37 37 u = s.decode(e) # attempt strict decoding
38 38 return u.encode(_encoding, "replace")
39 39 except LookupError, k:
40 40 raise Abort(_("%s, please check your locale settings") % k)
41 41 except UnicodeDecodeError:
42 42 pass
43 43 u = s.decode("utf-8", "replace") # last ditch
44 44 return u.encode(_encoding, "replace")
45 45
46 46 def fromlocal(s):
47 47 """
48 48 Convert a string from the local character encoding to UTF-8
49 49
50 50 We attempt to decode strings using the encoding mode set by
51 51 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
52 52 characters will cause an error message. Other modes include
53 53 'replace', which replaces unknown characters with a special
54 54 Unicode character, and 'ignore', which drops the character.
55 55 """
56 56 try:
57 57 return s.decode(_encoding, _encodingmode).encode("utf-8")
58 58 except UnicodeDecodeError, inst:
59 59 sub = s[max(0, inst.start-10):inst.start+10]
60 60 raise Abort("decoding near '%s': %s!" % (sub, inst))
61 61 except LookupError, k:
62 62 raise Abort(_("%s, please check your locale settings") % k)
63 63
64 64 def locallen(s):
65 65 """Find the length in characters of a local string"""
66 66 return len(s.decode(_encoding, "replace"))
67 67
68 68 def localsub(s, a, b=None):
69 69 try:
70 70 u = s.decode(_encoding, _encodingmode)
71 71 if b is not None:
72 72 u = u[a:b]
73 73 else:
74 74 u = u[:a]
75 75 return u.encode(_encoding, _encodingmode)
76 76 except UnicodeDecodeError, inst:
77 77 sub = s[max(0, inst.start-10), inst.start+10]
78 78 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
79 79
80 80 # used by parsedate
81 81 defaultdateformats = (
82 82 '%Y-%m-%d %H:%M:%S',
83 83 '%Y-%m-%d %I:%M:%S%p',
84 84 '%Y-%m-%d %H:%M',
85 85 '%Y-%m-%d %I:%M%p',
86 86 '%Y-%m-%d',
87 87 '%m-%d',
88 88 '%m/%d',
89 89 '%m/%d/%y',
90 90 '%m/%d/%Y',
91 91 '%a %b %d %H:%M:%S %Y',
92 92 '%a %b %d %I:%M:%S%p %Y',
93 93 '%b %d %H:%M:%S %Y',
94 94 '%b %d %I:%M:%S%p %Y',
95 95 '%b %d %H:%M:%S',
96 96 '%b %d %I:%M:%S%p',
97 97 '%b %d %H:%M',
98 98 '%b %d %I:%M%p',
99 99 '%b %d %Y',
100 100 '%b %d',
101 101 '%H:%M:%S',
102 102 '%I:%M:%SP',
103 103 '%H:%M',
104 104 '%I:%M%p',
105 105 )
106 106
107 107 extendeddateformats = defaultdateformats + (
108 108 "%Y",
109 109 "%Y-%m",
110 110 "%b",
111 111 "%b %Y",
112 112 )
113 113
114 114 class SignalInterrupt(Exception):
115 115 """Exception raised on SIGTERM and SIGHUP."""
116 116
117 117 # like SafeConfigParser but with case-sensitive keys
118 118 class configparser(ConfigParser.SafeConfigParser):
119 119 def optionxform(self, optionstr):
120 120 return optionstr
121 121
122 122 def cachefunc(func):
123 123 '''cache the result of function calls'''
124 124 # XXX doesn't handle keywords args
125 125 cache = {}
126 126 if func.func_code.co_argcount == 1:
127 127 # we gain a small amount of time because
128 128 # we don't need to pack/unpack the list
129 129 def f(arg):
130 130 if arg not in cache:
131 131 cache[arg] = func(arg)
132 132 return cache[arg]
133 133 else:
134 134 def f(*args):
135 135 if args not in cache:
136 136 cache[args] = func(*args)
137 137 return cache[args]
138 138
139 139 return f
140 140
141 141 def pipefilter(s, cmd):
142 142 '''filter string S through command CMD, returning its output'''
143 143 (pout, pin) = popen2.popen2(cmd, -1, 'b')
144 144 def writer():
145 145 try:
146 146 pin.write(s)
147 147 pin.close()
148 148 except IOError, inst:
149 149 if inst.errno != errno.EPIPE:
150 150 raise
151 151
152 152 # we should use select instead on UNIX, but this will work on most
153 153 # systems, including Windows
154 154 w = threading.Thread(target=writer)
155 155 w.start()
156 156 f = pout.read()
157 157 pout.close()
158 158 w.join()
159 159 return f
160 160
161 161 def tempfilter(s, cmd):
162 162 '''filter string S through a pair of temporary files with CMD.
163 163 CMD is used as a template to create the real command to be run,
164 164 with the strings INFILE and OUTFILE replaced by the real names of
165 165 the temporary files generated.'''
166 166 inname, outname = None, None
167 167 try:
168 168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
169 169 fp = os.fdopen(infd, 'wb')
170 170 fp.write(s)
171 171 fp.close()
172 172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
173 173 os.close(outfd)
174 174 cmd = cmd.replace('INFILE', inname)
175 175 cmd = cmd.replace('OUTFILE', outname)
176 176 code = os.system(cmd)
177 177 if code: raise Abort(_("command '%s' failed: %s") %
178 178 (cmd, explain_exit(code)))
179 179 return open(outname, 'rb').read()
180 180 finally:
181 181 try:
182 182 if inname: os.unlink(inname)
183 183 except: pass
184 184 try:
185 185 if outname: os.unlink(outname)
186 186 except: pass
187 187
188 188 filtertable = {
189 189 'tempfile:': tempfilter,
190 190 'pipe:': pipefilter,
191 191 }
192 192
193 193 def filter(s, cmd):
194 194 "filter a string through a command that transforms its input to its output"
195 195 for name, fn in filtertable.iteritems():
196 196 if cmd.startswith(name):
197 197 return fn(s, cmd[len(name):].lstrip())
198 198 return pipefilter(s, cmd)
199 199
200 200 def find_in_path(name, path, default=None):
201 201 '''find name in search path. path can be string (will be split
202 202 with os.pathsep), or iterable thing that returns strings. if name
203 203 found, return path to name. else return default.'''
204 204 if isinstance(path, str):
205 205 path = path.split(os.pathsep)
206 206 for p in path:
207 207 p_name = os.path.join(p, name)
208 208 if os.path.exists(p_name):
209 209 return p_name
210 210 return default
211 211
212 212 def binary(s):
213 213 """return true if a string is binary data using diff's heuristic"""
214 214 if s and '\0' in s[:4096]:
215 215 return True
216 216 return False
217 217
218 218 def unique(g):
219 219 """return the uniq elements of iterable g"""
220 220 seen = {}
221 221 l = []
222 222 for f in g:
223 223 if f not in seen:
224 224 seen[f] = 1
225 225 l.append(f)
226 226 return l
227 227
228 228 class Abort(Exception):
229 229 """Raised if a command needs to print an error and exit."""
230 230
231 231 class UnexpectedOutput(Abort):
232 232 """Raised to print an error with part of output and exit."""
233 233
234 234 def always(fn): return True
235 235 def never(fn): return False
236 236
237 237 def patkind(name, dflt_pat='glob'):
238 238 """Split a string into an optional pattern kind prefix and the
239 239 actual pattern."""
240 240 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
241 241 if name.startswith(prefix + ':'): return name.split(':', 1)
242 242 return dflt_pat, name
243 243
244 244 def globre(pat, head='^', tail='$'):
245 245 "convert a glob pattern into a regexp"
246 246 i, n = 0, len(pat)
247 247 res = ''
248 248 group = False
249 249 def peek(): return i < n and pat[i]
250 250 while i < n:
251 251 c = pat[i]
252 252 i = i+1
253 253 if c == '*':
254 254 if peek() == '*':
255 255 i += 1
256 256 res += '.*'
257 257 else:
258 258 res += '[^/]*'
259 259 elif c == '?':
260 260 res += '.'
261 261 elif c == '[':
262 262 j = i
263 263 if j < n and pat[j] in '!]':
264 264 j += 1
265 265 while j < n and pat[j] != ']':
266 266 j += 1
267 267 if j >= n:
268 268 res += '\\['
269 269 else:
270 270 stuff = pat[i:j].replace('\\','\\\\')
271 271 i = j + 1
272 272 if stuff[0] == '!':
273 273 stuff = '^' + stuff[1:]
274 274 elif stuff[0] == '^':
275 275 stuff = '\\' + stuff
276 276 res = '%s[%s]' % (res, stuff)
277 277 elif c == '{':
278 278 group = True
279 279 res += '(?:'
280 280 elif c == '}' and group:
281 281 res += ')'
282 282 group = False
283 283 elif c == ',' and group:
284 284 res += '|'
285 285 elif c == '\\':
286 286 p = peek()
287 287 if p:
288 288 i += 1
289 289 res += re.escape(p)
290 290 else:
291 291 res += re.escape(c)
292 292 else:
293 293 res += re.escape(c)
294 294 return head + res + tail
295 295
296 296 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
297 297
298 298 def pathto(n1, n2):
299 299 '''return the relative path from one place to another.
300 300 n1 should use os.sep to separate directories
301 301 n2 should use "/" to separate directories
302 302 returns an os.sep-separated path.
303 303 '''
304 304 if not n1: return localpath(n2)
305 305 a, b = n1.split(os.sep), n2.split('/')
306 306 a.reverse()
307 307 b.reverse()
308 308 while a and b and a[-1] == b[-1]:
309 309 a.pop()
310 310 b.pop()
311 311 b.reverse()
312 312 return os.sep.join((['..'] * len(a)) + b)
313 313
314 314 def canonpath(root, cwd, myname):
315 315 """return the canonical path of myname, given cwd and root"""
316 316 if root == os.sep:
317 317 rootsep = os.sep
318 318 elif root.endswith(os.sep):
319 319 rootsep = root
320 320 else:
321 321 rootsep = root + os.sep
322 322 name = myname
323 323 if not os.path.isabs(name):
324 324 name = os.path.join(root, cwd, name)
325 325 name = os.path.normpath(name)
326 326 if name != rootsep and name.startswith(rootsep):
327 327 name = name[len(rootsep):]
328 328 audit_path(name)
329 329 return pconvert(name)
330 330 elif name == root:
331 331 return ''
332 332 else:
333 333 # Determine whether `name' is in the hierarchy at or beneath `root',
334 334 # by iterating name=dirname(name) until that causes no change (can't
335 335 # check name == '/', because that doesn't work on windows). For each
336 336 # `name', compare dev/inode numbers. If they match, the list `rel'
337 337 # holds the reversed list of components making up the relative file
338 338 # name we want.
339 339 root_st = os.stat(root)
340 340 rel = []
341 341 while True:
342 342 try:
343 343 name_st = os.stat(name)
344 344 except OSError:
345 345 break
346 346 if samestat(name_st, root_st):
347 347 rel.reverse()
348 348 name = os.path.join(*rel)
349 349 audit_path(name)
350 350 return pconvert(name)
351 351 dirname, basename = os.path.split(name)
352 352 rel.append(basename)
353 353 if dirname == name:
354 354 break
355 355 name = dirname
356 356
357 357 raise Abort('%s not under root' % myname)
358 358
359 359 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
360 360 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
361 361
362 362 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
363 363 if os.name == 'nt':
364 364 dflt_pat = 'glob'
365 365 else:
366 366 dflt_pat = 'relpath'
367 367 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
368 368
369 369 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
370 370 """build a function to match a set of file patterns
371 371
372 372 arguments:
373 373 canonroot - the canonical root of the tree you're matching against
374 374 cwd - the current working directory, if relevant
375 375 names - patterns to find
376 376 inc - patterns to include
377 377 exc - patterns to exclude
378 378 head - a regex to prepend to patterns to control whether a match is rooted
379 379
380 380 a pattern is one of:
381 381 'glob:<rooted glob>'
382 382 're:<rooted regexp>'
383 383 'path:<rooted path>'
384 384 'relglob:<relative glob>'
385 385 'relpath:<relative path>'
386 386 'relre:<relative regexp>'
387 387 '<rooted path or regexp>'
388 388
389 389 returns:
390 390 a 3-tuple containing
391 391 - list of explicit non-pattern names passed in
392 392 - a bool match(filename) function
393 393 - a bool indicating if any patterns were passed in
394 394
395 395 todo:
396 396 make head regex a rooted bool
397 397 """
398 398
399 399 def contains_glob(name):
400 400 for c in name:
401 401 if c in _globchars: return True
402 402 return False
403 403
404 404 def regex(kind, name, tail):
405 405 '''convert a pattern into a regular expression'''
406 406 if kind == 're':
407 407 return name
408 408 elif kind == 'path':
409 409 return '^' + re.escape(name) + '(?:/|$)'
410 410 elif kind == 'relglob':
411 411 return head + globre(name, '(?:|.*/)', tail)
412 412 elif kind == 'relpath':
413 413 return head + re.escape(name) + tail
414 414 elif kind == 'relre':
415 415 if name.startswith('^'):
416 416 return name
417 417 return '.*' + name
418 418 return head + globre(name, '', tail)
419 419
420 420 def matchfn(pats, tail):
421 421 """build a matching function from a set of patterns"""
422 422 if not pats:
423 423 return
424 424 matches = []
425 425 for k, p in pats:
426 426 try:
427 427 pat = '(?:%s)' % regex(k, p, tail)
428 428 matches.append(re.compile(pat).match)
429 429 except re.error:
430 430 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
431 431 else: raise Abort("invalid pattern (%s): %s" % (k, p))
432 432
433 433 def buildfn(text):
434 434 for m in matches:
435 435 r = m(text)
436 436 if r:
437 437 return r
438 438
439 439 return buildfn
440 440
441 441 def globprefix(pat):
442 442 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
443 443 root = []
444 444 for p in pat.split(os.sep):
445 445 if contains_glob(p): break
446 446 root.append(p)
447 447 return '/'.join(root)
448 448
449 449 pats = []
450 450 files = []
451 451 roots = []
452 452 for kind, name in [patkind(p, dflt_pat) for p in names]:
453 453 if kind in ('glob', 'relpath'):
454 454 name = canonpath(canonroot, cwd, name)
455 455 if name == '':
456 456 kind, name = 'glob', '**'
457 457 if kind in ('glob', 'path', 're'):
458 458 pats.append((kind, name))
459 459 if kind == 'glob':
460 460 root = globprefix(name)
461 461 if root: roots.append(root)
462 462 elif kind == 'relpath':
463 463 files.append((kind, name))
464 464 roots.append(name)
465 465
466 466 patmatch = matchfn(pats, '$') or always
467 467 filematch = matchfn(files, '(?:/|$)') or always
468 468 incmatch = always
469 469 if inc:
470 470 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
471 471 incmatch = matchfn(inckinds, '(?:/|$)')
472 472 excmatch = lambda fn: False
473 473 if exc:
474 474 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
475 475 excmatch = matchfn(exckinds, '(?:/|$)')
476 476
477 477 return (roots,
478 478 lambda fn: (incmatch(fn) and not excmatch(fn) and
479 479 (fn.endswith('/') or
480 480 (not pats and not files) or
481 481 (pats and patmatch(fn)) or
482 482 (files and filematch(fn)))),
483 483 (inc or exc or (pats and pats != [('glob', '**')])) and True)
484 484
485 485 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
486 486 '''enhanced shell command execution.
487 487 run with environment maybe modified, maybe in different dir.
488 488
489 489 if command fails and onerr is None, return status. if ui object,
490 490 print error message and return status, else raise onerr object as
491 491 exception.'''
492 492 def py2shell(val):
493 493 'convert python object into string that is useful to shell'
494 494 if val in (None, False):
495 495 return '0'
496 496 if val == True:
497 497 return '1'
498 498 return str(val)
499 499 oldenv = {}
500 500 for k in environ:
501 501 oldenv[k] = os.environ.get(k)
502 502 if cwd is not None:
503 503 oldcwd = os.getcwd()
504 504 try:
505 505 for k, v in environ.iteritems():
506 506 os.environ[k] = py2shell(v)
507 507 if cwd is not None and oldcwd != cwd:
508 508 os.chdir(cwd)
509 509 rc = os.system(cmd)
510 510 if rc and onerr:
511 511 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
512 512 explain_exit(rc)[0])
513 513 if errprefix:
514 514 errmsg = '%s: %s' % (errprefix, errmsg)
515 515 try:
516 516 onerr.warn(errmsg + '\n')
517 517 except AttributeError:
518 518 raise onerr(errmsg)
519 519 return rc
520 520 finally:
521 521 for k, v in oldenv.iteritems():
522 522 if v is None:
523 523 del os.environ[k]
524 524 else:
525 525 os.environ[k] = v
526 526 if cwd is not None and oldcwd != cwd:
527 527 os.chdir(oldcwd)
528 528
529 529 def rename(src, dst):
530 530 """forcibly rename a file"""
531 531 try:
532 532 os.rename(src, dst)
533 533 except OSError, err:
534 534 # on windows, rename to existing file is not allowed, so we
535 535 # must delete destination first. but if file is open, unlink
536 536 # schedules it for delete but does not delete it. rename
537 537 # happens immediately even for open files, so we create
538 538 # temporary file, delete it, rename destination to that name,
539 539 # then delete that. then rename is safe to do.
540 540 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
541 541 os.close(fd)
542 542 os.unlink(temp)
543 543 os.rename(dst, temp)
544 544 os.unlink(temp)
545 545 os.rename(src, dst)
546 546
547 547 def unlink(f):
548 548 """unlink and remove the directory if it is empty"""
549 549 os.unlink(f)
550 550 # try removing directories that might now be empty
551 551 try:
552 552 os.removedirs(os.path.dirname(f))
553 553 except OSError:
554 554 pass
555 555
556 556 def copyfile(src, dest):
557 557 "copy a file, preserving mode"
558 558 try:
559 559 shutil.copyfile(src, dest)
560 560 shutil.copymode(src, dest)
561 561 except shutil.Error, inst:
562 562 raise util.Abort(str(inst))
563 563
564 564 def copyfiles(src, dst, hardlink=None):
565 565 """Copy a directory tree using hardlinks if possible"""
566 566
567 567 if hardlink is None:
568 568 hardlink = (os.stat(src).st_dev ==
569 569 os.stat(os.path.dirname(dst)).st_dev)
570 570
571 571 if os.path.isdir(src):
572 572 os.mkdir(dst)
573 573 for name in os.listdir(src):
574 574 srcname = os.path.join(src, name)
575 575 dstname = os.path.join(dst, name)
576 576 copyfiles(srcname, dstname, hardlink)
577 577 else:
578 578 if hardlink:
579 579 try:
580 580 os_link(src, dst)
581 581 except (IOError, OSError):
582 582 hardlink = False
583 583 shutil.copy(src, dst)
584 584 else:
585 585 shutil.copy(src, dst)
586 586
587 587 def audit_path(path):
588 588 """Abort if path contains dangerous components"""
589 589 parts = os.path.normcase(path).split(os.sep)
590 590 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
591 591 or os.pardir in parts):
592 592 raise Abort(_("path contains illegal component: %s\n") % path)
593 593
594 594 def _makelock_file(info, pathname):
595 595 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
596 596 os.write(ld, info)
597 597 os.close(ld)
598 598
599 599 def _readlock_file(pathname):
600 600 return posixfile(pathname).read()
601 601
602 602 def nlinks(pathname):
603 603 """Return number of hardlinks for the given file."""
604 604 return os.lstat(pathname).st_nlink
605 605
606 606 if hasattr(os, 'link'):
607 607 os_link = os.link
608 608 else:
609 609 def os_link(src, dst):
610 610 raise OSError(0, _("Hardlinks not supported"))
611 611
612 612 def fstat(fp):
613 613 '''stat file object that may not have fileno method.'''
614 614 try:
615 615 return os.fstat(fp.fileno())
616 616 except AttributeError:
617 617 return os.stat(fp.name)
618 618
619 619 posixfile = file
620 620
621 621 def is_win_9x():
622 622 '''return true if run on windows 95, 98 or me.'''
623 623 try:
624 624 return sys.getwindowsversion()[3] == 1
625 625 except AttributeError:
626 626 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
627 627
628 628 getuser_fallback = None
629 629
630 630 def getuser():
631 631 '''return name of current user'''
632 632 try:
633 633 return getpass.getuser()
634 634 except ImportError:
635 635 # import of pwd will fail on windows - try fallback
636 636 if getuser_fallback:
637 637 return getuser_fallback()
638 638 # raised if win32api not available
639 639 raise Abort(_('user name not available - set USERNAME '
640 640 'environment variable'))
641 641
642 642 def username(uid=None):
643 643 """Return the name of the user with the given uid.
644 644
645 645 If uid is None, return the name of the current user."""
646 646 try:
647 647 import pwd
648 648 if uid is None:
649 649 uid = os.getuid()
650 650 try:
651 651 return pwd.getpwuid(uid)[0]
652 652 except KeyError:
653 653 return str(uid)
654 654 except ImportError:
655 655 return None
656 656
657 657 def groupname(gid=None):
658 658 """Return the name of the group with the given gid.
659 659
660 660 If gid is None, return the name of the current group."""
661 661 try:
662 662 import grp
663 663 if gid is None:
664 664 gid = os.getgid()
665 665 try:
666 666 return grp.getgrgid(gid)[0]
667 667 except KeyError:
668 668 return str(gid)
669 669 except ImportError:
670 670 return None
671 671
672 672 # File system features
673 673
674 674 def checkfolding(path):
675 675 """
676 676 Check whether the given path is on a case-sensitive filesystem
677 677
678 678 Requires a path (like /foo/.hg) ending with a foldable final
679 679 directory component.
680 680 """
681 681 s1 = os.stat(path)
682 682 d, b = os.path.split(path)
683 683 p2 = os.path.join(d, b.upper())
684 684 if path == p2:
685 685 p2 = os.path.join(d, b.lower())
686 686 try:
687 687 s2 = os.stat(p2)
688 688 if s2 == s1:
689 689 return False
690 690 return True
691 691 except:
692 692 return True
693 693
694 694 # Platform specific variants
695 695 if os.name == 'nt':
696 696 demandload(globals(), "msvcrt")
697 697 nulldev = 'NUL:'
698 698
699 699 class winstdout:
700 700 '''stdout on windows misbehaves if sent through a pipe'''
701 701
702 702 def __init__(self, fp):
703 703 self.fp = fp
704 704
705 705 def __getattr__(self, key):
706 706 return getattr(self.fp, key)
707 707
708 708 def close(self):
709 709 try:
710 710 self.fp.close()
711 711 except: pass
712 712
713 713 def write(self, s):
714 714 try:
715 715 return self.fp.write(s)
716 716 except IOError, inst:
717 717 if inst.errno != 0: raise
718 718 self.close()
719 719 raise IOError(errno.EPIPE, 'Broken pipe')
720 720
721 721 sys.stdout = winstdout(sys.stdout)
722 722
723 723 def system_rcpath():
724 724 try:
725 725 return system_rcpath_win32()
726 726 except:
727 727 return [r'c:\mercurial\mercurial.ini']
728 728
729 729 def os_rcpath():
730 730 '''return default os-specific hgrc search path'''
731 731 path = system_rcpath()
732 732 path.append(user_rcpath())
733 733 userprofile = os.environ.get('USERPROFILE')
734 734 if userprofile:
735 735 path.append(os.path.join(userprofile, 'mercurial.ini'))
736 736 return path
737 737
738 738 def user_rcpath():
739 739 '''return os-specific hgrc search path to the user dir'''
740 740 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
741 741
742 742 def parse_patch_output(output_line):
743 743 """parses the output produced by patch and returns the file name"""
744 744 pf = output_line[14:]
745 745 if pf[0] == '`':
746 746 pf = pf[1:-1] # Remove the quotes
747 747 return pf
748 748
749 749 def testpid(pid):
750 750 '''return False if pid dead, True if running or not known'''
751 751 return True
752 752
753 753 def is_exec(f, last):
754 754 return last
755 755
756 756 def set_exec(f, mode):
757 757 pass
758 758
759 759 def set_binary(fd):
760 760 msvcrt.setmode(fd.fileno(), os.O_BINARY)
761 761
762 762 def pconvert(path):
763 763 return path.replace("\\", "/")
764 764
765 765 def localpath(path):
766 766 return path.replace('/', '\\')
767 767
768 768 def normpath(path):
769 769 return pconvert(os.path.normpath(path))
770 770
771 771 makelock = _makelock_file
772 772 readlock = _readlock_file
773 773
774 774 def samestat(s1, s2):
775 775 return False
776 776
777 777 def shellquote(s):
778 778 return '"%s"' % s.replace('"', '\\"')
779 779
780 780 def explain_exit(code):
781 781 return _("exited with status %d") % code, code
782 782
783 783 # if you change this stub into a real check, please try to implement the
784 784 # username and groupname functions above, too.
785 785 def isowner(fp, st=None):
786 786 return True
787 787
788 788 try:
789 789 # override functions with win32 versions if possible
790 790 from util_win32 import *
791 791 if not is_win_9x():
792 792 posixfile = posixfile_nt
793 793 except ImportError:
794 794 pass
795 795
796 796 else:
797 797 nulldev = '/dev/null'
798 798
799 799 def rcfiles(path):
800 800 rcs = [os.path.join(path, 'hgrc')]
801 801 rcdir = os.path.join(path, 'hgrc.d')
802 802 try:
803 803 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
804 804 if f.endswith(".rc")])
805 805 except OSError:
806 806 pass
807 807 return rcs
808 808
809 809 def os_rcpath():
810 810 '''return default os-specific hgrc search path'''
811 811 path = []
812 812 # old mod_python does not set sys.argv
813 813 if len(getattr(sys, 'argv', [])) > 0:
814 814 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
815 815 '/../etc/mercurial'))
816 816 path.extend(rcfiles('/etc/mercurial'))
817 817 path.append(os.path.expanduser('~/.hgrc'))
818 818 path = [os.path.normpath(f) for f in path]
819 819 return path
820 820
821 821 def parse_patch_output(output_line):
822 822 """parses the output produced by patch and returns the file name"""
823 823 pf = output_line[14:]
824 824 if pf.startswith("'") and pf.endswith("'") and " " in pf:
825 825 pf = pf[1:-1] # Remove the quotes
826 826 return pf
827 827
828 828 def is_exec(f, last):
829 829 """check whether a file is executable"""
830 830 return (os.lstat(f).st_mode & 0100 != 0)
831 831
832 832 def set_exec(f, mode):
833 833 s = os.lstat(f).st_mode
834 834 if (s & 0100 != 0) == mode:
835 835 return
836 836 if mode:
837 837 # Turn on +x for every +r bit when making a file executable
838 838 # and obey umask.
839 839 umask = os.umask(0)
840 840 os.umask(umask)
841 841 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
842 842 else:
843 843 os.chmod(f, s & 0666)
844 844
845 845 def set_binary(fd):
846 846 pass
847 847
848 848 def pconvert(path):
849 849 return path
850 850
851 851 def localpath(path):
852 852 return path
853 853
854 854 normpath = os.path.normpath
855 855 samestat = os.path.samestat
856 856
857 857 def makelock(info, pathname):
858 858 try:
859 859 os.symlink(info, pathname)
860 860 except OSError, why:
861 861 if why.errno == errno.EEXIST:
862 862 raise
863 863 else:
864 864 _makelock_file(info, pathname)
865 865
866 866 def readlock(pathname):
867 867 try:
868 868 return os.readlink(pathname)
869 869 except OSError, why:
870 870 if why.errno == errno.EINVAL:
871 871 return _readlock_file(pathname)
872 872 else:
873 873 raise
874 874
875 875 def shellquote(s):
876 876 return "'%s'" % s.replace("'", "'\\''")
877 877
878 878 def testpid(pid):
879 879 '''return False if pid dead, True if running or not sure'''
880 880 try:
881 881 os.kill(pid, 0)
882 882 return True
883 883 except OSError, inst:
884 884 return inst.errno != errno.ESRCH
885 885
886 886 def explain_exit(code):
887 887 """return a 2-tuple (desc, code) describing a process's status"""
888 888 if os.WIFEXITED(code):
889 889 val = os.WEXITSTATUS(code)
890 890 return _("exited with status %d") % val, val
891 891 elif os.WIFSIGNALED(code):
892 892 val = os.WTERMSIG(code)
893 893 return _("killed by signal %d") % val, val
894 894 elif os.WIFSTOPPED(code):
895 895 val = os.WSTOPSIG(code)
896 896 return _("stopped by signal %d") % val, val
897 897 raise ValueError(_("invalid exit code"))
898 898
899 899 def isowner(fp, st=None):
900 900 """Return True if the file object f belongs to the current user.
901 901
902 902 The return value of a util.fstat(f) may be passed as the st argument.
903 903 """
904 904 if st is None:
905 905 st = fstat(f)
906 906 return st.st_uid == os.getuid()
907 907
908 def _buildencodefun():
909 e = '_'
910 win_reserved = [ord(x) for x in '|\?*<":>+[]']
911 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
912 for x in (range(32) + range(126, 256) + win_reserved):
913 cmap[chr(x)] = "~%02x" % x
914 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
915 cmap[chr(x)] = e + chr(x).lower()
916 dmap = {}
917 for k, v in cmap.iteritems():
918 dmap[v] = k
919 def decode(s):
920 i = 0
921 while i < len(s):
922 for l in xrange(1, 4):
923 try:
924 yield dmap[s[i:i+l]]
925 i += l
926 break
927 except KeyError:
928 pass
929 else:
930 raise KeyError
931 return (lambda s: "".join([cmap[c] for c in s]),
932 lambda s: "".join(list(decode(s))))
933
934 encodefilename, decodefilename = _buildencodefun()
935
936 def encodedopener(openerfn, fn):
937 def o(path, *args, **kw):
938 return openerfn(fn(path), *args, **kw)
939 return o
908 940
909 941 def opener(base, audit=True):
910 942 """
911 943 return a function that opens files relative to base
912 944
913 945 this function is used to hide the details of COW semantics and
914 946 remote file access from higher level code.
915 947 """
916 948 p = base
917 949 audit_p = audit
918 950
919 951 def mktempcopy(name):
920 952 d, fn = os.path.split(name)
921 953 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
922 954 os.close(fd)
923 955 ofp = posixfile(temp, "wb")
924 956 try:
925 957 try:
926 958 ifp = posixfile(name, "rb")
927 959 except IOError, inst:
928 960 if not getattr(inst, 'filename', None):
929 961 inst.filename = name
930 962 raise
931 963 for chunk in filechunkiter(ifp):
932 964 ofp.write(chunk)
933 965 ifp.close()
934 966 ofp.close()
935 967 except:
936 968 try: os.unlink(temp)
937 969 except: pass
938 970 raise
939 971 st = os.lstat(name)
940 972 os.chmod(temp, st.st_mode)
941 973 return temp
942 974
943 975 class atomictempfile(posixfile):
944 976 """the file will only be copied when rename is called"""
945 977 def __init__(self, name, mode):
946 978 self.__name = name
947 979 self.temp = mktempcopy(name)
948 980 posixfile.__init__(self, self.temp, mode)
949 981 def rename(self):
950 982 if not self.closed:
951 983 posixfile.close(self)
952 984 rename(self.temp, localpath(self.__name))
953 985 def __del__(self):
954 986 if not self.closed:
955 987 try:
956 988 os.unlink(self.temp)
957 989 except: pass
958 990 posixfile.close(self)
959 991
960 992 class atomicfile(atomictempfile):
961 993 """the file will only be copied on close"""
962 994 def __init__(self, name, mode):
963 995 atomictempfile.__init__(self, name, mode)
964 996 def close(self):
965 997 self.rename()
966 998 def __del__(self):
967 999 self.rename()
968 1000
969 1001 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
970 1002 if audit_p:
971 1003 audit_path(path)
972 1004 f = os.path.join(p, path)
973 1005
974 1006 if not text:
975 1007 mode += "b" # for that other OS
976 1008
977 1009 if mode[0] != "r":
978 1010 try:
979 1011 nlink = nlinks(f)
980 1012 except OSError:
981 1013 d = os.path.dirname(f)
982 1014 if not os.path.isdir(d):
983 1015 os.makedirs(d)
984 1016 else:
985 1017 if atomic:
986 1018 return atomicfile(f, mode)
987 1019 elif atomictemp:
988 1020 return atomictempfile(f, mode)
989 1021 if nlink > 1:
990 1022 rename(mktempcopy(f), f)
991 1023 return posixfile(f, mode)
992 1024
993 1025 return o
994 1026
995 1027 class chunkbuffer(object):
996 1028 """Allow arbitrary sized chunks of data to be efficiently read from an
997 1029 iterator over chunks of arbitrary size."""
998 1030
999 1031 def __init__(self, in_iter, targetsize = 2**16):
1000 1032 """in_iter is the iterator that's iterating over the input chunks.
1001 1033 targetsize is how big a buffer to try to maintain."""
1002 1034 self.in_iter = iter(in_iter)
1003 1035 self.buf = ''
1004 1036 self.targetsize = int(targetsize)
1005 1037 if self.targetsize <= 0:
1006 1038 raise ValueError(_("targetsize must be greater than 0, was %d") %
1007 1039 targetsize)
1008 1040 self.iterempty = False
1009 1041
1010 1042 def fillbuf(self):
1011 1043 """Ignore target size; read every chunk from iterator until empty."""
1012 1044 if not self.iterempty:
1013 1045 collector = cStringIO.StringIO()
1014 1046 collector.write(self.buf)
1015 1047 for ch in self.in_iter:
1016 1048 collector.write(ch)
1017 1049 self.buf = collector.getvalue()
1018 1050 self.iterempty = True
1019 1051
1020 1052 def read(self, l):
1021 1053 """Read L bytes of data from the iterator of chunks of data.
1022 1054 Returns less than L bytes if the iterator runs dry."""
1023 1055 if l > len(self.buf) and not self.iterempty:
1024 1056 # Clamp to a multiple of self.targetsize
1025 1057 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1026 1058 collector = cStringIO.StringIO()
1027 1059 collector.write(self.buf)
1028 1060 collected = len(self.buf)
1029 1061 for chunk in self.in_iter:
1030 1062 collector.write(chunk)
1031 1063 collected += len(chunk)
1032 1064 if collected >= targetsize:
1033 1065 break
1034 1066 if collected < targetsize:
1035 1067 self.iterempty = True
1036 1068 self.buf = collector.getvalue()
1037 1069 s, self.buf = self.buf[:l], buffer(self.buf, l)
1038 1070 return s
1039 1071
1040 1072 def filechunkiter(f, size=65536, limit=None):
1041 1073 """Create a generator that produces the data in the file size
1042 1074 (default 65536) bytes at a time, up to optional limit (default is
1043 1075 to read all data). Chunks may be less than size bytes if the
1044 1076 chunk is the last chunk in the file, or the file is a socket or
1045 1077 some other type of file that sometimes reads less data than is
1046 1078 requested."""
1047 1079 assert size >= 0
1048 1080 assert limit is None or limit >= 0
1049 1081 while True:
1050 1082 if limit is None: nbytes = size
1051 1083 else: nbytes = min(limit, size)
1052 1084 s = nbytes and f.read(nbytes)
1053 1085 if not s: break
1054 1086 if limit: limit -= len(s)
1055 1087 yield s
1056 1088
1057 1089 def makedate():
1058 1090 lt = time.localtime()
1059 1091 if lt[8] == 1 and time.daylight:
1060 1092 tz = time.altzone
1061 1093 else:
1062 1094 tz = time.timezone
1063 1095 return time.mktime(lt), tz
1064 1096
1065 1097 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1066 1098 """represent a (unixtime, offset) tuple as a localized time.
1067 1099 unixtime is seconds since the epoch, and offset is the time zone's
1068 1100 number of seconds away from UTC. if timezone is false, do not
1069 1101 append time zone to string."""
1070 1102 t, tz = date or makedate()
1071 1103 s = time.strftime(format, time.gmtime(float(t) - tz))
1072 1104 if timezone:
1073 1105 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1074 1106 return s
1075 1107
1076 1108 def strdate(string, format, defaults):
1077 1109 """parse a localized time string and return a (unixtime, offset) tuple.
1078 1110 if the string cannot be parsed, ValueError is raised."""
1079 1111 def timezone(string):
1080 1112 tz = string.split()[-1]
1081 1113 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1082 1114 tz = int(tz)
1083 1115 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1084 1116 return offset
1085 1117 if tz == "GMT" or tz == "UTC":
1086 1118 return 0
1087 1119 return None
1088 1120
1089 1121 # NOTE: unixtime = localunixtime + offset
1090 1122 offset, date = timezone(string), string
1091 1123 if offset != None:
1092 1124 date = " ".join(string.split()[:-1])
1093 1125
1094 1126 # add missing elements from defaults
1095 1127 for part in defaults:
1096 1128 found = [True for p in part if ("%"+p) in format]
1097 1129 if not found:
1098 1130 date += "@" + defaults[part]
1099 1131 format += "@%" + part[0]
1100 1132
1101 1133 timetuple = time.strptime(date, format)
1102 1134 localunixtime = int(calendar.timegm(timetuple))
1103 1135 if offset is None:
1104 1136 # local timezone
1105 1137 unixtime = int(time.mktime(timetuple))
1106 1138 offset = unixtime - localunixtime
1107 1139 else:
1108 1140 unixtime = localunixtime + offset
1109 1141 return unixtime, offset
1110 1142
1111 1143 def parsedate(string, formats=None, defaults=None):
1112 1144 """parse a localized time string and return a (unixtime, offset) tuple.
1113 1145 The date may be a "unixtime offset" string or in one of the specified
1114 1146 formats."""
1115 1147 if not string:
1116 1148 return 0, 0
1117 1149 if not formats:
1118 1150 formats = defaultdateformats
1119 1151 string = string.strip()
1120 1152 try:
1121 1153 when, offset = map(int, string.split(' '))
1122 1154 except ValueError:
1123 1155 # fill out defaults
1124 1156 if not defaults:
1125 1157 defaults = {}
1126 1158 now = makedate()
1127 1159 for part in "d mb yY HI M S".split():
1128 1160 if part not in defaults:
1129 1161 if part[0] in "HMS":
1130 1162 defaults[part] = "00"
1131 1163 elif part[0] in "dm":
1132 1164 defaults[part] = "1"
1133 1165 else:
1134 1166 defaults[part] = datestr(now, "%" + part[0], False)
1135 1167
1136 1168 for format in formats:
1137 1169 try:
1138 1170 when, offset = strdate(string, format, defaults)
1139 1171 except ValueError:
1140 1172 pass
1141 1173 else:
1142 1174 break
1143 1175 else:
1144 1176 raise Abort(_('invalid date: %r ') % string)
1145 1177 # validate explicit (probably user-specified) date and
1146 1178 # time zone offset. values must fit in signed 32 bits for
1147 1179 # current 32-bit linux runtimes. timezones go from UTC-12
1148 1180 # to UTC+14
1149 1181 if abs(when) > 0x7fffffff:
1150 1182 raise Abort(_('date exceeds 32 bits: %d') % when)
1151 1183 if offset < -50400 or offset > 43200:
1152 1184 raise Abort(_('impossible time zone offset: %d') % offset)
1153 1185 return when, offset
1154 1186
1155 1187 def matchdate(date):
1156 1188 """Return a function that matches a given date match specifier
1157 1189
1158 1190 Formats include:
1159 1191
1160 1192 '{date}' match a given date to the accuracy provided
1161 1193
1162 1194 '<{date}' on or before a given date
1163 1195
1164 1196 '>{date}' on or after a given date
1165 1197
1166 1198 """
1167 1199
1168 1200 def lower(date):
1169 1201 return parsedate(date, extendeddateformats)[0]
1170 1202
1171 1203 def upper(date):
1172 1204 d = dict(mb="12", HI="23", M="59", S="59")
1173 1205 for days in "31 30 29".split():
1174 1206 try:
1175 1207 d["d"] = days
1176 1208 return parsedate(date, extendeddateformats, d)[0]
1177 1209 except:
1178 1210 pass
1179 1211 d["d"] = "28"
1180 1212 return parsedate(date, extendeddateformats, d)[0]
1181 1213
1182 1214 if date[0] == "<":
1183 1215 when = upper(date[1:])
1184 1216 return lambda x: x <= when
1185 1217 elif date[0] == ">":
1186 1218 when = lower(date[1:])
1187 1219 return lambda x: x >= when
1188 1220 elif date[0] == "-":
1189 1221 try:
1190 1222 days = int(date[1:])
1191 1223 except ValueError:
1192 1224 raise Abort(_("invalid day spec: %s") % date[1:])
1193 1225 when = makedate()[0] - days * 3600 * 24
1194 1226 return lambda x: x >= when
1195 1227 elif " to " in date:
1196 1228 a, b = date.split(" to ")
1197 1229 start, stop = lower(a), upper(b)
1198 1230 return lambda x: x >= start and x <= stop
1199 1231 else:
1200 1232 start, stop = lower(date), upper(date)
1201 1233 return lambda x: x >= start and x <= stop
1202 1234
1203 1235 def shortuser(user):
1204 1236 """Return a short representation of a user name or email address."""
1205 1237 f = user.find('@')
1206 1238 if f >= 0:
1207 1239 user = user[:f]
1208 1240 f = user.find('<')
1209 1241 if f >= 0:
1210 1242 user = user[f+1:]
1211 1243 f = user.find(' ')
1212 1244 if f >= 0:
1213 1245 user = user[:f]
1214 1246 f = user.find('.')
1215 1247 if f >= 0:
1216 1248 user = user[:f]
1217 1249 return user
1218 1250
1219 1251 def ellipsis(text, maxlength=400):
1220 1252 """Trim string to at most maxlength (default: 400) characters."""
1221 1253 if len(text) <= maxlength:
1222 1254 return text
1223 1255 else:
1224 1256 return "%s..." % (text[:maxlength-3])
1225 1257
1226 1258 def walkrepos(path):
1227 1259 '''yield every hg repository under path, recursively.'''
1228 1260 def errhandler(err):
1229 1261 if err.filename == path:
1230 1262 raise err
1231 1263
1232 1264 for root, dirs, files in os.walk(path, onerror=errhandler):
1233 1265 for d in dirs:
1234 1266 if d == '.hg':
1235 1267 yield root
1236 1268 dirs[:] = []
1237 1269 break
1238 1270
1239 1271 _rcpath = None
1240 1272
1241 1273 def rcpath():
1242 1274 '''return hgrc search path. if env var HGRCPATH is set, use it.
1243 1275 for each item in path, if directory, use files ending in .rc,
1244 1276 else use item.
1245 1277 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1246 1278 if no HGRCPATH, use default os-specific path.'''
1247 1279 global _rcpath
1248 1280 if _rcpath is None:
1249 1281 if 'HGRCPATH' in os.environ:
1250 1282 _rcpath = []
1251 1283 for p in os.environ['HGRCPATH'].split(os.pathsep):
1252 1284 if not p: continue
1253 1285 if os.path.isdir(p):
1254 1286 for f in os.listdir(p):
1255 1287 if f.endswith('.rc'):
1256 1288 _rcpath.append(os.path.join(p, f))
1257 1289 else:
1258 1290 _rcpath.append(p)
1259 1291 else:
1260 1292 _rcpath = os_rcpath()
1261 1293 return _rcpath
1262 1294
1263 1295 def bytecount(nbytes):
1264 1296 '''return byte count formatted as readable string, with units'''
1265 1297
1266 1298 units = (
1267 1299 (100, 1<<30, _('%.0f GB')),
1268 1300 (10, 1<<30, _('%.1f GB')),
1269 1301 (1, 1<<30, _('%.2f GB')),
1270 1302 (100, 1<<20, _('%.0f MB')),
1271 1303 (10, 1<<20, _('%.1f MB')),
1272 1304 (1, 1<<20, _('%.2f MB')),
1273 1305 (100, 1<<10, _('%.0f KB')),
1274 1306 (10, 1<<10, _('%.1f KB')),
1275 1307 (1, 1<<10, _('%.2f KB')),
1276 1308 (1, 1, _('%.0f bytes')),
1277 1309 )
1278 1310
1279 1311 for multiplier, divisor, format in units:
1280 1312 if nbytes >= divisor * multiplier:
1281 1313 return format % (nbytes / float(divisor))
1282 1314 return units[-1][2] % nbytes
1283 1315
1284 1316 def drop_scheme(scheme, path):
1285 1317 sc = scheme + ':'
1286 1318 if path.startswith(sc):
1287 1319 path = path[len(sc):]
1288 1320 if path.startswith('//'):
1289 1321 path = path[2:]
1290 1322 return path
@@ -1,113 +1,113 b''
1 1 #!/bin/sh
2 2
3 3 hg init test
4 4 cd test
5 5 cat >>afile <<EOF
6 6 0
7 7 EOF
8 8 hg add afile
9 9 hg commit -m "0.0" -d "1000000 0"
10 10 cat >>afile <<EOF
11 11 1
12 12 EOF
13 13 hg commit -m "0.1" -d "1000000 0"
14 14 cat >>afile <<EOF
15 15 2
16 16 EOF
17 17 hg commit -m "0.2" -d "1000000 0"
18 18 cat >>afile <<EOF
19 19 3
20 20 EOF
21 21 hg commit -m "0.3" -d "1000000 0"
22 22 hg update -C 0
23 23 cat >>afile <<EOF
24 24 1
25 25 EOF
26 26 hg commit -m "1.1" -d "1000000 0"
27 27 cat >>afile <<EOF
28 28 2
29 29 EOF
30 30 hg commit -m "1.2" -d "1000000 0"
31 31 cat >fred <<EOF
32 32 a line
33 33 EOF
34 34 cat >>afile <<EOF
35 35 3
36 36 EOF
37 37 hg add fred
38 38 hg commit -m "1.3" -d "1000000 0"
39 39 hg mv afile adifferentfile
40 40 hg commit -m "1.3m" -d "1000000 0"
41 41 hg update -C 3
42 42 hg mv afile anotherfile
43 43 hg commit -m "0.3m" -d "1000000 0"
44 hg debugindex .hg/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
48 hg debugindex .hg/00manifest.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/store/00manifest.i
49 49 hg verify
50 50 cd ..
51 51 for i in 0 1 2 3 4 5 6 7 8; do
52 52 mkdir test-"$i"
53 53 hg --cwd test-"$i" init
54 54 hg -R test bundle -r "$i" test-"$i".hg test-"$i"
55 55 cd test-"$i"
56 56 hg unbundle ../test-"$i".hg
57 57 hg verify
58 58 hg tip -q
59 59 cd ..
60 60 done
61 61 cd test-8
62 62 hg pull ../test-7
63 63 hg verify
64 64 hg rollback
65 65 cd ..
66 66
67 67 echo % should fail
68 68 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg test-3
69 69 hg -R test bundle -r tip test-bundle-branch1.hg
70 70
71 71 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg
72 72 hg -R test bundle --base 2 -r 7 test-bundle-branch2.hg
73 73 hg -R test bundle --base 2 test-bundle-all.hg
74 74 hg -R test bundle --base 3 -r tip test-bundle-should-fail.hg
75 75
76 76 # issue76 msg2163
77 77 hg -R test bundle --base 3 -r 3 -r 3 test-bundle-cset-3.hg
78 78
79 79 hg clone test-2 test-9
80 80 cd test-9
81 81 echo % 2
82 82 hg tip -q
83 83 hg unbundle ../test-bundle-should-fail.hg
84 84 echo % 2
85 85 hg tip -q
86 86 hg unbundle ../test-bundle-all.hg
87 87 echo % 8
88 88 hg tip -q
89 89 hg verify
90 90 hg rollback
91 91 echo % 2
92 92 hg tip -q
93 93 hg unbundle ../test-bundle-branch1.hg
94 94 echo % 4
95 95 hg tip -q
96 96 hg verify
97 97 hg rollback
98 98 hg unbundle ../test-bundle-branch2.hg
99 99 echo % 6
100 100 hg tip -q
101 101 hg verify
102 102
103 103 cd ../test
104 104 hg merge 7
105 105 hg ci -m merge -d "1000000 0"
106 106 cd ..
107 107 hg -R test bundle --base 2 test-bundle-head.hg
108 108 hg clone test-2 test-10
109 109 cd test-10
110 110 hg unbundle ../test-bundle-head.hg
111 111 echo % 9
112 112 hg tip -q
113 113 hg verify
@@ -1,59 +1,59 b''
1 1 #!/bin/sh
2 2
3 3 hg init test
4 4 cd test
5 5 cat >>afile <<EOF
6 6 0
7 7 EOF
8 8 hg add afile
9 9 hg commit -m "0.0"
10 10 cat >>afile <<EOF
11 11 1
12 12 EOF
13 13 hg commit -m "0.1"
14 14 cat >>afile <<EOF
15 15 2
16 16 EOF
17 17 hg commit -m "0.2"
18 18 cat >>afile <<EOF
19 19 3
20 20 EOF
21 21 hg commit -m "0.3"
22 22 hg update -C 0
23 23 cat >>afile <<EOF
24 24 1
25 25 EOF
26 26 hg commit -m "1.1"
27 27 cat >>afile <<EOF
28 28 2
29 29 EOF
30 30 hg commit -m "1.2"
31 31 cat >fred <<EOF
32 32 a line
33 33 EOF
34 34 cat >>afile <<EOF
35 35 3
36 36 EOF
37 37 hg add fred
38 38 hg commit -m "1.3"
39 39 hg mv afile adifferentfile
40 40 hg commit -m "1.3m"
41 41 hg update -C 3
42 42 hg mv afile anotherfile
43 43 hg commit -m "0.3m"
44 hg debugindex .hg/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
48 hg debugindex .hg/00manifest.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/store/00manifest.i
49 49 hg verify
50 50 cd ..
51 51 for i in 0 1 2 3 4 5 6 7 8; do
52 52 hg clone -r "$i" test test-"$i"
53 53 cd test-"$i"
54 54 hg verify
55 55 cd ..
56 56 done
57 57 cd test-8
58 58 hg pull ../test-7
59 59 hg verify
@@ -1,14 +1,14 b''
1 1 #!/bin/sh
2 2
3 3 hg init dir
4 4 cd dir
5 5 echo bleh > bar
6 6 hg add bar
7 7 hg ci -m 'add bar'
8 8
9 9 hg cp bar foo
10 10 echo >> bar
11 11 hg ci -m 'cp bar foo; change bar'
12 12
13 13 hg debugrename foo
14 hg debugindex .hg/data/bar.i
14 hg debugindex .hg/store/data/bar.i
@@ -1,30 +1,30 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4 echo a > a
5 5 hg add a
6 6 hg commit -m "1" -d "1000000 0"
7 7 hg status
8 8 hg copy a b
9 9 hg status
10 10 hg --debug commit -m "2" -d "1000000 0"
11 11 echo "we should see two history entries"
12 12 hg history -v
13 13 echo "we should see one log entry for a"
14 14 hg log a
15 15 echo "this should show a revision linked to changeset 0"
16 hg debugindex .hg/data/a.i
16 hg debugindex .hg/store/data/a.i
17 17 echo "we should see one log entry for b"
18 18 hg log b
19 19 echo "this should show a revision linked to changeset 1"
20 hg debugindex .hg/data/b.i
20 hg debugindex .hg/store/data/b.i
21 21
22 22 echo "this should show the rename information in the metadata"
23 hg debugdata .hg/data/b.d 0 | head -3 | tail -2
23 hg debugdata .hg/store/data/b.d 0 | head -3 | tail -2
24 24
25 $TESTDIR/md5sum.py .hg/data/b.i
25 $TESTDIR/md5sum.py .hg/store/data/b.i
26 26 hg cat b > bsum
27 27 $TESTDIR/md5sum.py bsum
28 28 hg cat a > asum
29 29 $TESTDIR/md5sum.py asum
30 30 hg verify
@@ -1,51 +1,51 b''
1 1 A b
2 2 b
3 3 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
4 4 we should see two history entries
5 5 changeset: 1:386a3cc01532
6 6 tag: tip
7 7 user: test
8 8 date: Mon Jan 12 13:46:40 1970 +0000
9 9 files: b
10 10 description:
11 11 2
12 12
13 13
14 14 changeset: 0:33aaa84a386b
15 15 user: test
16 16 date: Mon Jan 12 13:46:40 1970 +0000
17 17 files: a
18 18 description:
19 19 1
20 20
21 21
22 22 we should see one log entry for a
23 23 changeset: 0:33aaa84a386b
24 24 user: test
25 25 date: Mon Jan 12 13:46:40 1970 +0000
26 26 summary: 1
27 27
28 28 this should show a revision linked to changeset 0
29 29 rev offset length base linkrev nodeid p1 p2
30 30 0 0 3 0 0 b789fdd96dc2 000000000000 000000000000
31 31 we should see one log entry for b
32 32 changeset: 1:386a3cc01532
33 33 tag: tip
34 34 user: test
35 35 date: Mon Jan 12 13:46:40 1970 +0000
36 36 summary: 2
37 37
38 38 this should show a revision linked to changeset 1
39 39 rev offset length base linkrev nodeid p1 p2
40 40 0 0 65 0 1 9a263dd772e0 000000000000 000000000000
41 41 this should show the rename information in the metadata
42 42 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
43 43 copy: a
44 ed156f22f0a6fde642de0b5eba0cbbb2 .hg/data/b.i
44 ed156f22f0a6fde642de0b5eba0cbbb2 .hg/store/data/b.i
45 45 60b725f10c9c85c70d97880dfe8191b3 bsum
46 46 60b725f10c9c85c70d97880dfe8191b3 asum
47 47 checking changesets
48 48 checking manifests
49 49 crosschecking files in changesets and manifests
50 50 checking files
51 51 2 files, 2 changesets, 2 total revisions
@@ -1,41 +1,41 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4 echo foo > foo
5 5 hg add foo
6 6 hg commit -m1 -d"0 0"
7 7
8 8 echo "# should show copy"
9 9 hg copy foo bar
10 10 hg debugstate|grep '^copy'
11 11
12 12 echo "# shouldn't show copy"
13 13 hg commit -m2 -d"0 0"
14 14 hg debugstate|grep '^copy'
15 15
16 16 echo "# should match"
17 hg debugindex .hg/data/foo.i
17 hg debugindex .hg/store/data/foo.i
18 18 hg debugrename bar
19 19
20 20 echo bleah > foo
21 21 echo quux > bar
22 22 hg commit -m3 -d"0 0"
23 23
24 24 echo "# should not be renamed"
25 25 hg debugrename bar
26 26
27 27 hg copy -f foo bar
28 28 echo "# should show copy"
29 29 hg debugstate|grep '^copy'
30 30 hg commit -m3 -d"0 0"
31 31
32 32 echo "# should show no parents for tip"
33 hg debugindex .hg/data/bar.i
33 hg debugindex .hg/store/data/bar.i
34 34 echo "# should match"
35 hg debugindex .hg/data/foo.i
35 hg debugindex .hg/store/data/foo.i
36 36 hg debugrename bar
37 37
38 38 echo "# should show no copies"
39 39 hg debugstate|grep '^copy'
40 40
41 41 exit 0
@@ -1,49 +1,49 b''
1 1 #!/bin/sh
2 2 #
3 3 # A B
4 4 #
5 5 # 3 4 3
6 6 # |\/| |\
7 7 # |/\| | \
8 8 # 1 2 1 2
9 9 # \ / \ /
10 10 # 0 0
11 11 #
12 12 # if the result of the merge of 1 and 2
13 13 # is the same in 3 and 4, no new manifest
14 14 # will be created and the manifest group
15 15 # will be empty during the pull
16 16 #
17 17 # (plus we test a failure where outgoing
18 18 # wrongly reported the number of csets)
19 19 #
20 20
21 21 hg init a
22 22 cd a
23 23 touch init
24 24 hg ci -A -m 0 -d "1000000 0"
25 25 touch x y
26 26 hg ci -A -m 1 -d "1000000 0"
27 27 hg update 0
28 28 touch x y
29 29 hg ci -A -m 2 -d "1000000 0"
30 30 hg merge 1
31 31 hg ci -A -m m1 -d "1000000 0"
32 32 #hg log
33 #hg debugindex .hg/00manifest.i
33 #hg debugindex .hg/store/00manifest.i
34 34 hg update -C 1
35 35 hg merge 2
36 36 hg ci -A -m m2 -d "1000000 0"
37 37 #hg log
38 #hg debugindex .hg/00manifest.i
38 #hg debugindex .hg/store/00manifest.i
39 39
40 40 cd ..
41 41 hg clone -r 3 a b
42 42 hg clone -r 4 a c
43 43 hg -R a outgoing b
44 44 hg -R a outgoing c
45 45 hg -R b outgoing c
46 46 hg -R c outgoing b
47 47
48 48 hg -R b pull a
49 49 hg -R c pull a
@@ -1,34 +1,34 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4
5 5 cat > .hg/hgrc <<EOF
6 6 [encode]
7 7 *.gz = gunzip
8 8
9 9 [decode]
10 10 *.gz = gzip
11 11
12 12 EOF
13 13
14 14 echo "this is a test" | gzip > a.gz
15 15 hg add a.gz
16 16 hg ci -m "test" -d "1000000 0"
17 17 echo %% no changes
18 18 hg status
19 19 touch a.gz
20 20
21 21 echo %% no changes
22 22 hg status
23 23
24 24 echo %% uncompressed contents in repo
25 hg debugdata .hg/data/a.gz.d 0
25 hg debugdata .hg/store/data/a.gz.d 0
26 26
27 27 echo %% uncompress our working dir copy
28 28 gunzip < a.gz
29 29
30 30 rm a.gz
31 31 hg co
32 32
33 33 echo %% uncompress our new working dir copy
34 34 gunzip < a.gz
@@ -1,46 +1,46 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4
5 5 echo foo > a
6 6 echo foo > b
7 7 hg add a b
8 8
9 9 hg ci -m "test" -d "1000000 0"
10 10
11 11 echo blah > a
12 12
13 13 hg ci -m "branch a" -d "1000000 0"
14 14
15 15 hg co 0
16 16
17 17 echo blah > b
18 18
19 19 hg ci -m "branch b" -d "1000000 0"
20 20 HGMERGE=true hg merge 1
21 21
22 22 hg ci -m "merge b/a -> blah" -d "1000000 0"
23 23
24 24 hg co 1
25 25 HGMERGE=true hg merge 2
26 26 hg ci -m "merge a/b -> blah" -d "1000000 0"
27 27
28 28 hg log
29 hg debugindex .hg/00changelog.i
29 hg debugindex .hg/store/00changelog.i
30 30
31 31 echo
32 32
33 33 echo 1
34 34 hg manifest --debug 1
35 35 echo 2
36 36 hg manifest --debug 2
37 37 echo 3
38 38 hg manifest --debug 3
39 39 echo 4
40 40 hg manifest --debug 4
41 41
42 42 echo
43 43
44 hg debugindex .hg/data/a.i
44 hg debugindex .hg/store/data/a.i
45 45
46 46 hg verify
@@ -1,79 +1,79 b''
1 1 #!/bin/sh
2 2
3 3 # This test makes sure that we don't mark a file as merged with its ancestor
4 4 # when we do a merge.
5 5
6 6 cat <<'EOF' > merge
7 7 #!/bin/sh
8 8 echo merging for `basename $1`
9 9 EOF
10 10 chmod +x merge
11 11
12 12 echo creating base
13 13 hg init a
14 14 cd a
15 15 echo 1 > foo
16 16 echo 1 > bar
17 17 echo 1 > baz
18 18 echo 1 > quux
19 19 hg add foo bar baz quux
20 20 hg commit -m "base" -d "1000000 0"
21 21
22 22 cd ..
23 23 hg clone a b
24 24
25 25 echo creating branch a
26 26 cd a
27 27 echo 2a > foo
28 28 echo 2a > bar
29 29 hg commit -m "branch a" -d "1000000 0"
30 30
31 31 echo creating branch b
32 32
33 33 cd ..
34 34 cd b
35 35 echo 2b > foo
36 36 echo 2b > baz
37 37 hg commit -m "branch b" -d "1000000 0"
38 38
39 39 echo "we shouldn't have anything but n state here"
40 40 hg debugstate | cut -b 1-16,35-
41 41
42 42 echo merging
43 43 hg pull ../a
44 44 env HGMERGE=../merge hg merge -v
45 45
46 46 echo 2m > foo
47 47 echo 2b > baz
48 48 echo new > quux
49 49
50 50 echo "we shouldn't have anything but foo in merge state here"
51 51 hg debugstate | cut -b 1-16,35- | grep "^m"
52 52
53 53 hg ci -m "merge" -d "1000000 0"
54 54
55 55 echo "main: we should have a merge here"
56 hg debugindex .hg/00changelog.i
56 hg debugindex .hg/store/00changelog.i
57 57
58 58 echo "log should show foo and quux changed"
59 59 hg log -v -r tip
60 60
61 61 echo "foo: we should have a merge here"
62 hg debugindex .hg/data/foo.i
62 hg debugindex .hg/store/data/foo.i
63 63
64 64 echo "bar: we shouldn't have a merge here"
65 hg debugindex .hg/data/bar.i
65 hg debugindex .hg/store/data/bar.i
66 66
67 67 echo "baz: we shouldn't have a merge here"
68 hg debugindex .hg/data/baz.i
68 hg debugindex .hg/store/data/baz.i
69 69
70 70 echo "quux: we shouldn't have a merge here"
71 hg debugindex .hg/data/quux.i
71 hg debugindex .hg/store/data/quux.i
72 72
73 73 echo "manifest entries should match tips of all files"
74 74 hg manifest --debug
75 75
76 76 echo "everything should be clean now"
77 77 hg status
78 78
79 79 hg verify
@@ -1,48 +1,48 b''
1 1 #!/bin/sh -e
2 2
3 3 umask 027
4 4 mkdir test1
5 5 cd test1
6 6
7 7 hg init
8 8 touch a b
9 9 hg add a b
10 10 hg ci -m "added a b" -d "1000000 0"
11 11
12 12 cd ..
13 13 hg clone test1 test3
14 14 mkdir test2
15 15 cd test2
16 16
17 17 hg init
18 18 hg pull ../test1
19 19 hg co
20 20 chmod +x a
21 21 hg ci -m "chmod +x a" -d "1000000 0"
22 22
23 23 cd ../test1
24 24 echo 123 >>a
25 25 hg ci -m "a updated" -d "1000000 0"
26 26
27 27 hg pull ../test2
28 28 hg heads
29 29 hg history
30 30
31 31 hg -v merge
32 32
33 33 cd ../test3
34 34 echo 123 >>b
35 35 hg ci -m "b updated" -d "1000000 0"
36 36
37 37 hg pull ../test2
38 38 hg heads
39 39 hg history
40 40
41 41 hg -v merge
42 42
43 43 ls -l ../test[123]/a > foo
44 44 cut -b 1-10 < foo
45 45
46 hg debugindex .hg/data/a.i
47 hg debugindex ../test2/.hg/data/a.i
48 hg debugindex ../test1/.hg/data/a.i
46 hg debugindex .hg/store/data/a.i
47 hg debugindex ../test2/.hg/store/data/a.i
48 hg debugindex ../test1/.hg/store/data/a.i
@@ -1,78 +1,78 b''
1 1 #!/bin/sh
2 2
3 3 hg init remote
4 4 cd remote
5 5 echo "# creating 'remote'"
6 6 cat >>afile <<EOF
7 7 0
8 8 EOF
9 9 hg add afile
10 10 hg commit -m "0.0"
11 11 cat >>afile <<EOF
12 12 1
13 13 EOF
14 14 hg commit -m "0.1"
15 15 cat >>afile <<EOF
16 16 2
17 17 EOF
18 18 hg commit -m "0.2"
19 19 cat >>afile <<EOF
20 20 3
21 21 EOF
22 22 hg commit -m "0.3"
23 23 hg update -C 0
24 24 cat >>afile <<EOF
25 25 1
26 26 EOF
27 27 hg commit -m "1.1"
28 28 cat >>afile <<EOF
29 29 2
30 30 EOF
31 31 hg commit -m "1.2"
32 32 cat >fred <<EOF
33 33 a line
34 34 EOF
35 35 cat >>afile <<EOF
36 36 3
37 37 EOF
38 38 hg add fred
39 39 hg commit -m "1.3"
40 40 hg mv afile adifferentfile
41 41 hg commit -m "1.3m"
42 42 hg update -C 3
43 43 hg mv afile anotherfile
44 44 hg commit -m "0.3m"
45 hg debugindex .hg/data/afile.i
46 hg debugindex .hg/data/adifferentfile.i
47 hg debugindex .hg/data/anotherfile.i
48 hg debugindex .hg/data/fred.i
49 hg debugindex .hg/00manifest.i
45 hg debugindex .hg/store/data/afile.i
46 hg debugindex .hg/store/data/adifferentfile.i
47 hg debugindex .hg/store/data/anotherfile.i
48 hg debugindex .hg/store/data/fred.i
49 hg debugindex .hg/store/00manifest.i
50 50 hg verify
51 51 echo "# Starting server"
52 52 hg serve -p 20061 -d --pid-file=../hg1.pid
53 53 cd ..
54 54 cat hg1.pid >> $DAEMON_PIDS
55 55
56 56 echo "# clone remote via stream"
57 57 for i in 0 1 2 3 4 5 6 7 8; do
58 58 hg clone -r "$i" http://localhost:20061/ test-"$i" 2>&1
59 59 if cd test-"$i"; then
60 60 hg verify
61 61 cd ..
62 62 fi
63 63 done
64 64 cd test-8
65 65 hg pull ../test-7
66 66 hg verify
67 67 cd ..
68 68 cd test-1
69 69 hg pull -r 4 http://localhost:20061/ 2>&1
70 70 hg verify
71 71 hg pull http://localhost:20061/ 2>&1
72 72 cd ..
73 73 cd test-2
74 74 hg pull -r 5 http://localhost:20061/ 2>&1
75 75 hg verify
76 76 hg pull http://localhost:20061/ 2>&1
77 77 hg verify
78 78 cd ..
@@ -1,16 +1,16 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4 mkfifo p
5 5
6 6 hg serve --stdio < p &
7 7 P=$!
8 8 (echo lock; echo addchangegroup; sleep 5) > p &
9 9 Q=$!
10 10 sleep 3
11 11 kill -HUP $P
12 12 wait
13 ls .hg
13 ls -R .hg
14 14
15 15
16 16
@@ -1,8 +1,14 b''
1 1 0
2 2 0
3 3 adding changesets
4 4 killed!
5 5 transaction abort!
6 6 rollback completed
7 .hg:
7 8 00changelog.i
8 9 journal.dirstate
10 requires
11 store
12
13 .hg/store:
14 00changelog.i
@@ -1,11 +1,11 b''
1 1 #!/bin/sh
2 2
3 3 hg init a
4 4 echo a > a/a
5 5 hg --cwd a ci -A -m a
6 6 hg clone a b
7 7 echo b > b/b
8 8 hg --cwd b ci -A -m b
9 chmod 100 a/.hg
9 chmod 100 a/.hg/store
10 10 hg --cwd b push ../a
11 chmod 700 a/.hg
11 chmod 700 a/.hg/store
@@ -1,66 +1,66 b''
1 1 #!/bin/sh
2 2
3 3 # initial
4 4 hg init test-a
5 5 cd test-a
6 6 cat >test.txt <<"EOF"
7 7 1
8 8 2
9 9 3
10 10 EOF
11 11 hg add test.txt
12 12 hg commit -m "Initial" -d "1000000 0"
13 13
14 14 # clone
15 15 cd ..
16 16 hg clone test-a test-b
17 17
18 18 # change test-a
19 19 cd test-a
20 20 cat >test.txt <<"EOF"
21 21 one
22 22 two
23 23 three
24 24 EOF
25 25 hg commit -m "Numbers as words" -d "1000000 0"
26 26
27 27 # change test-b
28 28 cd ../test-b
29 29 cat >test.txt <<"EOF"
30 30 1
31 31 2.5
32 32 3
33 33 EOF
34 34 hg commit -m "2 -> 2.5" -d "1000000 0"
35 35
36 36 # now pull and merge from test-a
37 37 hg pull ../test-a
38 38 HGMERGE=merge hg merge
39 39 # resolve conflict
40 40 cat >test.txt <<"EOF"
41 41 one
42 42 two-point-five
43 43 three
44 44 EOF
45 45 rm -f *.orig
46 46 hg commit -m "Merge 1" -d "1000000 0"
47 47
48 48 # change test-a again
49 49 cd ../test-a
50 50 cat >test.txt <<"EOF"
51 51 one
52 52 two-point-one
53 53 three
54 54 EOF
55 55 hg commit -m "two -> two-point-one" -d "1000000 0"
56 56
57 57 # pull and merge from test-a again
58 58 cd ../test-b
59 59 hg pull ../test-a
60 60 HGMERGE=merge hg merge --debug
61 61
62 62 cat test.txt | sed "s% .*%%"
63 63
64 hg debugindex .hg/data/test.txt.i
64 hg debugindex .hg/store/data/test.txt.i
65 65
66 66 hg log
@@ -1,52 +1,52 b''
1 1 #!/bin/sh
2 2 #
3 3 # revlog.parseindex must be able to parse the index file even if
4 4 # an index entry is split between two 64k blocks. The ideal test
5 5 # would be to create an index file with inline data where
6 6 # 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
7 7 # the size of an index entry) and with an index entry starting right
8 8 # before the 64k block boundary, and try to read it.
9 9 #
10 10 # We approximate that by reducing the read buffer to 1 byte.
11 11 #
12 12
13 13 hg init a
14 14 cd a
15 15 echo abc > foo
16 16 hg add foo
17 17 hg commit -m 'add foo' -d '1000000 0'
18 18
19 19 echo >> foo
20 20 hg commit -m 'change foo' -d '1000001 0'
21 21 hg log -r 0:
22 22
23 23 cat >> test.py << EOF
24 24 from mercurial import changelog, util
25 25 from mercurial.node import *
26 26
27 27 class singlebyteread(object):
28 28 def __init__(self, real):
29 29 self.real = real
30 30
31 31 def read(self, size=-1):
32 32 if size == 65536:
33 33 size = 1
34 34 return self.real.read(size)
35 35
36 36 def __getattr__(self, key):
37 37 return getattr(self.real, key)
38 38
39 39 def opener(*args):
40 40 o = util.opener(*args)
41 41 def wrapper(*a):
42 42 f = o(*a)
43 43 return singlebyteread(f)
44 44 return wrapper
45 45
46 cl = changelog.changelog(opener('.hg'))
46 cl = changelog.changelog(opener('.hg/store'))
47 47 print cl.count(), 'revisions:'
48 48 for r in xrange(cl.count()):
49 49 print short(cl.node(r))
50 50 EOF
51 51
52 52 python test.py
@@ -1,15 +1,15 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4 echo foo > a
5 5 hg add a
6 6 hg commit -m "1" -d "1000000 0"
7 7 hg verify
8 chmod -r .hg/data/a.i
8 chmod -r .hg/store/data/a.i
9 9 hg verify 2>/dev/null || echo verify failed
10 chmod +r .hg/data/a.i
10 chmod +r .hg/store/data/a.i
11 11 hg verify 2>/dev/null || echo verify failed
12 chmod -w .hg/data/a.i
12 chmod -w .hg/store/data/a.i
13 13 echo barber > a
14 14 hg commit -m "2" -d "1000000 0" 2>/dev/null || echo commit failed
15 15
@@ -1,19 +1,19 b''
1 1 #!/bin/sh
2 2
3 3 mkdir a
4 4 cd a
5 5 hg init
6 6 echo foo > b
7 7 hg add b
8 8 hg ci -m "b" -d "1000000 0"
9 9
10 chmod -w .hg
10 chmod -w .hg/store
11 11
12 12 cd ..
13 13
14 14 hg clone a b
15 15
16 chmod +w a/.hg # let test clean up
16 chmod +w a/.hg/store # let test clean up
17 17
18 18 cd b
19 19 hg verify
@@ -1,61 +1,61 b''
1 1 #!/bin/sh
2 2
3 3 hg init test
4 4 cd test
5 5 cat >>afile <<EOF
6 6 0
7 7 EOF
8 8 hg add afile
9 9 hg commit -m "0.0"
10 10 cat >>afile <<EOF
11 11 1
12 12 EOF
13 13 hg commit -m "0.1"
14 14 cat >>afile <<EOF
15 15 2
16 16 EOF
17 17 hg commit -m "0.2"
18 18 cat >>afile <<EOF
19 19 3
20 20 EOF
21 21 hg commit -m "0.3"
22 22 hg update -C 0
23 23 cat >>afile <<EOF
24 24 1
25 25 EOF
26 26 hg commit -m "1.1"
27 27 cat >>afile <<EOF
28 28 2
29 29 EOF
30 30 hg commit -m "1.2"
31 31 cat >fred <<EOF
32 32 a line
33 33 EOF
34 34 cat >>afile <<EOF
35 35 3
36 36 EOF
37 37 hg add fred
38 38 hg commit -m "1.3"
39 39 hg mv afile adifferentfile
40 40 hg commit -m "1.3m"
41 41 hg update -C 3
42 42 hg mv afile anotherfile
43 43 hg commit -m "0.3m"
44 hg debugindex .hg/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
48 hg debugindex .hg/00manifest.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/store/00manifest.i
49 49 hg verify
50 50 cd ..
51 51 for i in 0 1 2 3 4 5 6 7 8; do
52 52 mkdir test-"$i"
53 53 hg --cwd test-"$i" init
54 54 hg -R test push -r "$i" test-"$i"
55 55 cd test-"$i"
56 56 hg verify
57 57 cd ..
58 58 done
59 59 cd test-8
60 60 hg pull ../test-7
61 61 hg verify
@@ -1,27 +1,27 b''
1 1 #!/bin/sh
2 2
3 3 mkdir t
4 4 cd t
5 5 hg init
6 6 echo "[merge]" >> .hg/hgrc
7 7 echo "followcopies = 1" >> .hg/hgrc
8 8 echo foo > a
9 9 echo foo > a2
10 10 hg add a a2
11 11 hg ci -m "start" -d "0 0"
12 12 hg mv a b
13 13 hg mv a2 b2
14 14 hg ci -m "rename" -d "0 0"
15 15 echo "checkout"
16 16 hg co 0
17 17 echo blahblah > a
18 18 echo blahblah > a2
19 19 hg mv a2 c2
20 20 hg ci -m "modify" -d "0 0"
21 21 echo "merge"
22 22 hg merge -y --debug
23 23 hg status -AC
24 24 cat b
25 25 hg ci -m "merge" -d "0 0"
26 hg debugindex .hg/data/b.i
26 hg debugindex .hg/store/data/b.i
27 27 hg debugrename b No newline at end of file
@@ -1,99 +1,100 b''
1 1 #!/bin/sh
2 2
3 3 # This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 cat <<'EOF' > dummyssh
6 6 #!/bin/sh
7 7 # this attempts to deal with relative pathnames
8 8 cd `dirname $0`
9 9
10 10 # check for proper args
11 11 if [ $1 != "user@dummy" ] ; then
12 12 exit -1
13 13 fi
14 14
15 15 # check that we're in the right directory
16 16 if [ ! -x dummyssh ] ; then
17 17 exit -1
18 18 fi
19 19
20 20 SSH_CLIENT='127.0.0.1 1 2'
21 21 export SSH_CLIENT
22 22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
23 23 $2
24 24 EOF
25 25 chmod +x dummyssh
26 26
27 27 echo "# creating 'remote'"
28 28 hg init remote
29 29 cd remote
30 30 echo this > foo
31 hg ci -A -m "init" -d "1000000 0" foo
31 echo this > fooO
32 hg ci -A -m "init" -d "1000000 0" foo fooO
32 33 echo '[server]' > .hg/hgrc
33 34 echo 'uncompressed = True' >> .hg/hgrc
34 35 echo '[hooks]' >> .hg/hgrc
35 36 echo 'changegroup = echo changegroup in remote: u=$HG_URL >> ../dummylog' >> .hg/hgrc
36 37
37 38 cd ..
38 39
39 40 echo "# repo not found error"
40 41 hg clone -e ./dummyssh ssh://user@dummy/nonexistent local
41 42
42 43 echo "# clone remote via stream"
43 44 hg clone -e ./dummyssh --uncompressed ssh://user@dummy/remote local-stream 2>&1 | \
44 45 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
45 46 cd local-stream
46 47 hg verify
47 48 cd ..
48 49
49 50 echo "# clone remote via pull"
50 51 hg clone -e ./dummyssh ssh://user@dummy/remote local
51 52
52 53 echo "# verify"
53 54 cd local
54 55 hg verify
55 56
56 57 echo '[hooks]' >> .hg/hgrc
57 58 echo 'changegroup = echo changegroup in local: u=$HG_URL >> ../dummylog' >> .hg/hgrc
58 59
59 60 echo "# empty default pull"
60 61 hg paths
61 62 hg pull -e ../dummyssh
62 63
63 64 echo "# local change"
64 65 echo bleah > foo
65 66 hg ci -m "add" -d "1000000 0"
66 67
67 68 echo "# updating rc"
68 69 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
69 70 echo "[ui]" >> .hg/hgrc
70 71 echo "ssh = ../dummyssh" >> .hg/hgrc
71 72
72 73 echo "# find outgoing"
73 74 hg out ssh://user@dummy/remote
74 75
75 76 echo "# find incoming on the remote side"
76 77 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
77 78
78 79 echo "# push"
79 80 hg push
80 81
81 82 cd ../remote
82 83
83 84 echo "# check remote tip"
84 85 hg tip
85 86 hg verify
86 87 hg cat -r tip foo
87 88
88 89 echo z > z
89 90 hg ci -A -m z -d '1000001 0' z
90 91
91 92 cd ../local
92 93 echo r > r
93 94 hg ci -A -m z -d '1000002 0' r
94 95
95 96 echo "# push should succeed"
96 97 hg push
97 98
98 99 cd ..
99 100 cat dummylog
@@ -1,99 +1,99 b''
1 1 #!/bin/sh
2 2
3 3 # This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 cat <<'EOF' > dummyssh
6 6 #!/bin/sh
7 7 # this attempts to deal with relative pathnames
8 8 cd `dirname $0`
9 9
10 10 # check for proper args
11 11 if [ $1 != "user@dummy" ] ; then
12 12 exit -1
13 13 fi
14 14
15 15 # check that we're in the right directory
16 16 if [ ! -x dummyssh ] ; then
17 17 exit -1
18 18 fi
19 19
20 20 SSH_CLIENT='127.0.0.1 1 2'
21 21 export SSH_CLIENT
22 22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
23 23 $2
24 24 EOF
25 25 chmod +x dummyssh
26 26
27 27 hg init remote
28 28 cd remote
29 29 echo "# creating 'remote'"
30 30 cat >>afile <<EOF
31 31 0
32 32 EOF
33 33 hg add afile
34 34 hg commit -m "0.0"
35 35 cat >>afile <<EOF
36 36 1
37 37 EOF
38 38 hg commit -m "0.1"
39 39 cat >>afile <<EOF
40 40 2
41 41 EOF
42 42 hg commit -m "0.2"
43 43 cat >>afile <<EOF
44 44 3
45 45 EOF
46 46 hg commit -m "0.3"
47 47 hg update -C 0
48 48 cat >>afile <<EOF
49 49 1
50 50 EOF
51 51 hg commit -m "1.1"
52 52 cat >>afile <<EOF
53 53 2
54 54 EOF
55 55 hg commit -m "1.2"
56 56 cat >fred <<EOF
57 57 a line
58 58 EOF
59 59 cat >>afile <<EOF
60 60 3
61 61 EOF
62 62 hg add fred
63 63 hg commit -m "1.3"
64 64 hg mv afile adifferentfile
65 65 hg commit -m "1.3m"
66 66 hg update -C 3
67 67 hg mv afile anotherfile
68 68 hg commit -m "0.3m"
69 hg debugindex .hg/data/afile.i
70 hg debugindex .hg/data/adifferentfile.i
71 hg debugindex .hg/data/anotherfile.i
72 hg debugindex .hg/data/fred.i
73 hg debugindex .hg/00manifest.i
69 hg debugindex .hg/store/data/afile.i
70 hg debugindex .hg/store/data/adifferentfile.i
71 hg debugindex .hg/store/data/anotherfile.i
72 hg debugindex .hg/store/data/fred.i
73 hg debugindex .hg/store/00manifest.i
74 74 hg verify
75 75 cd ..
76 76
77 77 echo "# clone remote via stream"
78 78 for i in 0 1 2 3 4 5 6 7 8; do
79 79 hg clone -e ./dummyssh --uncompressed -r "$i" ssh://user@dummy/remote test-"$i" 2>&1
80 80 if cd test-"$i"; then
81 81 hg verify
82 82 cd ..
83 83 fi
84 84 done
85 85 cd test-8
86 86 hg pull ../test-7
87 87 hg verify
88 88 cd ..
89 89 cd test-1
90 90 hg pull -e ../dummyssh -r 4 ssh://user@dummy/remote 2>&1
91 91 hg verify
92 92 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
93 93 cd ..
94 94 cd test-2
95 95 hg pull -e ../dummyssh -r 5 ssh://user@dummy/remote 2>&1
96 96 hg verify
97 97 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
98 98 hg verify
99 99 cd ..
@@ -1,88 +1,88 b''
1 1 # creating 'remote'
2 2 # repo not found error
3 3 remote: abort: repository nonexistent not found!
4 4 abort: no suitable response from remote hg!
5 5 # clone remote via stream
6 6 streaming all changes
7 7 XXX files to transfer, XXX bytes of data
8 8 transferred XXX bytes in XXX seconds (XXX XB/sec)
9 9 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
10 10 checking changesets
11 11 checking manifests
12 12 crosschecking files in changesets and manifests
13 13 checking files
14 1 files, 1 changesets, 1 total revisions
14 2 files, 1 changesets, 2 total revisions
15 15 # clone remote via pull
16 16 requesting all changes
17 17 adding changesets
18 18 adding manifests
19 19 adding file changes
20 added 1 changesets with 1 changes to 1 files
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
20 added 1 changesets with 2 changes to 2 files
21 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 22 # verify
23 23 checking changesets
24 24 checking manifests
25 25 crosschecking files in changesets and manifests
26 26 checking files
27 1 files, 1 changesets, 1 total revisions
27 2 files, 1 changesets, 2 total revisions
28 28 # empty default pull
29 29 default = ssh://user@dummy/remote
30 30 pulling from ssh://user@dummy/remote
31 31 searching for changes
32 32 no changes found
33 33 # local change
34 34 # updating rc
35 35 # find outgoing
36 36 searching for changes
37 changeset: 1:c54836a570be
37 changeset: 1:572896fe480d
38 38 tag: tip
39 39 user: test
40 40 date: Mon Jan 12 13:46:40 1970 +0000
41 41 summary: add
42 42
43 43 # find incoming on the remote side
44 44 searching for changes
45 changeset: 1:c54836a570be
45 changeset: 1:572896fe480d
46 46 tag: tip
47 47 user: test
48 48 date: Mon Jan 12 13:46:40 1970 +0000
49 49 summary: add
50 50
51 51 # push
52 52 pushing to ssh://user@dummy/remote
53 53 searching for changes
54 54 remote: adding changesets
55 55 remote: adding manifests
56 56 remote: adding file changes
57 57 remote: added 1 changesets with 1 changes to 1 files
58 58 # check remote tip
59 changeset: 1:c54836a570be
59 changeset: 1:572896fe480d
60 60 tag: tip
61 61 user: test
62 62 date: Mon Jan 12 13:46:40 1970 +0000
63 63 summary: add
64 64
65 65 checking changesets
66 66 checking manifests
67 67 crosschecking files in changesets and manifests
68 68 checking files
69 1 files, 2 changesets, 2 total revisions
69 2 files, 2 changesets, 3 total revisions
70 70 bleah
71 71 # push should succeed
72 72 pushing to ssh://user@dummy/remote
73 73 searching for changes
74 74 note: unsynced remote changes!
75 75 remote: adding changesets
76 76 remote: adding manifests
77 77 remote: adding file changes
78 78 remote: added 1 changesets with 1 changes to 1 files
79 79 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio 3: 4: 5:
80 80 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
81 81 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
82 82 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
83 83 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
84 84 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
85 85 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
86 86 changegroup in remote: u=remote:ssh:127.0.0.1
87 87 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
88 88 changegroup in remote: u=remote:ssh:127.0.0.1
General Comments 0
You need to be logged in to leave comments. Login now