From mboxrd@z Thu Jan 1 00:00:00 1970 X-GM-THRID: 7018164937051602944 X-Received: by 2002:adf:ca10:: with SMTP id o16mr36838276wrh.212.1634120279294; Wed, 13 Oct 2021 03:17:59 -0700 (PDT) X-BeenThere: isar-users@googlegroups.com Received: by 2002:a05:600c:4ed1:: with SMTP id g17ls2883674wmq.1.canary-gmail; Wed, 13 Oct 2021 03:17:58 -0700 (PDT) X-Google-Smtp-Source: ABdhPJxR1AzXiNqfEnP+QeK6bgvabmXQa2uipbqC9Yho/ytDkeEEc9UDw82WX3h9/AbrXVlqTw11 X-Received: by 2002:a7b:cd90:: with SMTP id y16mr11919169wmj.146.1634120278207; Wed, 13 Oct 2021 03:17:58 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1634120278; cv=none; d=google.com; s=arc-20160816; b=WwFiBCxBH/W3DvOuKj3OmpLwmqCgJLf+j4hpvg2NeH6ugDFUP0+CH4ppxMk3QT7SGL 6L8iLreZr5XbUV/kkXrslyNeDKwL3owFAppU0jAwXOYRE6TDuSpR7q8e3BqBWr9r2Cs8 NUhGDMoD5LLWDMBHf4q8TPeNQh+q/+iR0zFB8iVMNF7exyOPHuadc98bWNWMJan/r/N6 XGGAXTwnNRhTr4LN/rItg5cx0EGCVMjVdbxLmRHpiNgc2PAfZCvEpmWdRQTOCYnfojAD nwn4brVD126/PnfjRav2KK5J9FoY+Rn8YJyscD66dTzHEyafxbM6pEOjTTzMvHNoMrcE IwsA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=content-transfer-encoding:content-language:in-reply-to:mime-version :user-agent:date:message-id:from:references:to:subject; bh=tOB0baOWOcY7Eot0wErmKH4mesJSJtX2IZdCGGvvsUs=; b=BTIO3U+OVouYt53h3a/Mh3ZiD4Fi+jllpymTXZNt0tR3PplDr/gnsLgYh5W9oqL9ac TEkUYl1Zz81JyJ4wu4q32rO26gw1pSVykVJtRlYr4bBkYYvap3nbVlQBQ9SZkeUuChbK NC6z/0iyQpkLf+4umLtAYCsi/3UBjFnM7ddERghQmWT0YmNvPZ9636MjMMTgNWVc7VPD DbWcnqf/xkJYjghJinYEOO/tWJkeukz4ifp0tL8tyg0qIV3h+xhlvPInDDiOI2M5XT71 2eZeT193d4C+nmrsOj1eyUNI9SEBTmYLZU+6/HxF2sQ0+sQXl6JPe1gZiMGjqqYtUidx RwGw== ARC-Authentication-Results: i=1; gmr-mx.google.com; spf=pass (google.com: domain of jan.kiszka@siemens.com designates 194.138.37.39 as permitted sender) smtp.mailfrom=jan.kiszka@siemens.com; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=siemens.com Return-Path: Received: from lizzard.sbs.de (lizzard.sbs.de. [194.138.37.39]) by gmr-mx.google.com with ESMTPS id a10si346790wmb.0.2021.10.13.03.17.58 for (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Wed, 13 Oct 2021 03:17:58 -0700 (PDT) Received-SPF: pass (google.com: domain of jan.kiszka@siemens.com designates 194.138.37.39 as permitted sender) client-ip=194.138.37.39; Authentication-Results: gmr-mx.google.com; spf=pass (google.com: domain of jan.kiszka@siemens.com designates 194.138.37.39 as permitted sender) smtp.mailfrom=jan.kiszka@siemens.com; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=siemens.com Received: from mail2.sbs.de (mail2.sbs.de [192.129.41.66]) by lizzard.sbs.de (8.15.2/8.15.2) with ESMTPS id 19DAHvPM008409 (version=TLSv1.2 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=OK) for ; Wed, 13 Oct 2021 12:17:57 +0200 Received: from [167.87.72.172] ([167.87.72.172]) by mail2.sbs.de (8.15.2/8.15.2) with ESMTP id 19DAHvhn003627; Wed, 13 Oct 2021 12:17:57 +0200 Subject: Re: [RFC PATCH 3/5] meta: add oe.utils To: Adriaan Schmidt , isar-users@googlegroups.com References: <20211012130413.1719424-1-adriaan.schmidt@siemens.com> <20211012130413.1719424-4-adriaan.schmidt@siemens.com> From: Jan Kiszka Message-ID: Date: Wed, 13 Oct 2021 12:17:57 +0200 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Thunderbird/78.13.0 MIME-Version: 1.0 In-Reply-To: <20211012130413.1719424-4-adriaan.schmidt@siemens.com> Content-Type: text/plain; charset=utf-8 Content-Language: en-US Content-Transfer-Encoding: 7bit X-TUID: 0DNiy23iuy+Z On 12.10.21 15:04, Adriaan Schmidt wrote: > Taken unmodified from yocto-3.3.2 (commit 31c639eb8664059eb4ed711be9173c223b4cc940) Reason missing ("will use X for Y"). Jan > > Signed-off-by: Adriaan Schmidt > --- > meta/classes/base.bbclass | 2 +- > meta/lib/oe/utils.py | 569 ++++++++++++++++++++++++++++++++++++++ > 2 files changed, 570 insertions(+), 1 deletion(-) > create mode 100644 meta/lib/oe/utils.py > > diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass > index 13134ff..f42dcba 100644 > --- a/meta/classes/base.bbclass > +++ b/meta/classes/base.bbclass > @@ -21,7 +21,7 @@ > THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}" > FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${PF}", "${FILE_DIRNAME}/${P}", "${FILE_DIRNAME}/${PN}", "${FILE_DIRNAME}/files", "${FILE_DIRNAME}"], d)}" > > -OE_IMPORTS += "os sys time oe.path oe.patch" > +OE_IMPORTS += "os sys time oe.path oe.patch oe.utils" > OE_IMPORTS[type] = "list" > > def oe_import(d): > diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py > new file mode 100644 > index 0000000..a84039f > --- /dev/null > +++ b/meta/lib/oe/utils.py > @@ -0,0 +1,569 @@ > +# > +# SPDX-License-Identifier: GPL-2.0-only > +# > + > +import subprocess > +import multiprocessing > +import traceback > + > +def read_file(filename): > + try: > + f = open( filename, "r" ) > + except IOError as reason: > + return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M: > + else: > + data = f.read().strip() > + f.close() > + return data > + return None > + > +def ifelse(condition, iftrue = True, iffalse = False): > + if condition: > + return iftrue > + else: > + return iffalse > + > +def conditional(variable, checkvalue, truevalue, falsevalue, d): > + if d.getVar(variable) == checkvalue: > + return truevalue > + else: > + return falsevalue > + > +def vartrue(var, iftrue, iffalse, d): > + import oe.types > + if oe.types.boolean(d.getVar(var)): > + return iftrue > + else: > + return iffalse > + > +def less_or_equal(variable, checkvalue, truevalue, falsevalue, d): > + if float(d.getVar(variable)) <= float(checkvalue): > + return truevalue > + else: > + return falsevalue > + > +def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): > + result = bb.utils.vercmp_string(d.getVar(variable), checkvalue) > + if result <= 0: > + return truevalue > + else: > + return falsevalue > + > +def both_contain(variable1, variable2, checkvalue, d): > + val1 = d.getVar(variable1) > + val2 = d.getVar(variable2) > + val1 = set(val1.split()) > + val2 = set(val2.split()) > + if isinstance(checkvalue, str): > + checkvalue = set(checkvalue.split()) > + else: > + checkvalue = set(checkvalue) > + if checkvalue.issubset(val1) and checkvalue.issubset(val2): > + return " ".join(checkvalue) > + else: > + return "" > + > +def set_intersect(variable1, variable2, d): > + """ > + Expand both variables, interpret them as lists of strings, and return the > + intersection as a flattened string. > + > + For example: > + s1 = "a b c" > + s2 = "b c d" > + s3 = set_intersect(s1, s2) > + => s3 = "b c" > + """ > + val1 = set(d.getVar(variable1).split()) > + val2 = set(d.getVar(variable2).split()) > + return " ".join(val1 & val2) > + > +def prune_suffix(var, suffixes, d): > + # See if var ends with any of the suffixes listed and > + # remove it if found > + for suffix in suffixes: > + if suffix and var.endswith(suffix): > + var = var[:-len(suffix)] > + > + prefix = d.getVar("MLPREFIX") > + if prefix and var.startswith(prefix): > + var = var[len(prefix):] > + > + return var > + > +def str_filter(f, str, d): > + from re import match > + return " ".join([x for x in str.split() if match(f, x, 0)]) > + > +def str_filter_out(f, str, d): > + from re import match > + return " ".join([x for x in str.split() if not match(f, x, 0)]) > + > +def build_depends_string(depends, task): > + """Append a taskname to a string of dependencies as used by the [depends] flag""" > + return " ".join(dep + ":" + task for dep in depends.split()) > + > +def inherits(d, *classes): > + """Return True if the metadata inherits any of the specified classes""" > + return any(bb.data.inherits_class(cls, d) for cls in classes) > + > +def features_backfill(var,d): > + # This construct allows the addition of new features to variable specified > + # as var > + # Example for var = "DISTRO_FEATURES" > + # This construct allows the addition of new features to DISTRO_FEATURES > + # that if not present would disable existing functionality, without > + # disturbing distributions that have already set DISTRO_FEATURES. > + # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should > + # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED > + features = (d.getVar(var) or "").split() > + backfill = (d.getVar(var+"_BACKFILL") or "").split() > + considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split() > + > + addfeatures = [] > + for feature in backfill: > + if feature not in features and feature not in considered: > + addfeatures.append(feature) > + > + if addfeatures: > + d.appendVar(var, " " + " ".join(addfeatures)) > + > +def all_distro_features(d, features, truevalue="1", falsevalue=""): > + """ > + Returns truevalue if *all* given features are set in DISTRO_FEATURES, > + else falsevalue. The features can be given as single string or anything > + that can be turned into a set. > + > + This is a shorter, more flexible version of > + bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d). > + > + Without explicit true/false values it can be used directly where > + Python expects a boolean: > + if oe.utils.all_distro_features(d, "foo bar"): > + bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES") > + > + With just a truevalue, it can be used to include files that are meant to be > + used only when requested via DISTRO_FEATURES: > + require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc") > + """ > + return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d) > + > +def any_distro_features(d, features, truevalue="1", falsevalue=""): > + """ > + Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES, > + else falsevalue. The features can be given as single string or anything > + that can be turned into a set. > + > + This is a shorter, more flexible version of > + bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d). > + > + Without explicit true/false values it can be used directly where > + Python expects a boolean: > + if not oe.utils.any_distro_features(d, "foo bar"): > + bb.fatal("foo, bar or both must be set in DISTRO_FEATURES") > + > + With just a truevalue, it can be used to include files that are meant to be > + used only when requested via DISTRO_FEATURES: > + require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc") > + > + """ > + return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d) > + > +def parallel_make(d, makeinst=False): > + """ > + Return the integer value for the number of parallel threads to use when > + building, scraped out of PARALLEL_MAKE. If no parallelization option is > + found, returns None > + > + e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer. > + """ > + if makeinst: > + pm = (d.getVar('PARALLEL_MAKEINST') or '').split() > + else: > + pm = (d.getVar('PARALLEL_MAKE') or '').split() > + # look for '-j' and throw other options (e.g. '-l') away > + while pm: > + opt = pm.pop(0) > + if opt == '-j': > + v = pm.pop(0) > + elif opt.startswith('-j'): > + v = opt[2:].strip() > + else: > + continue > + > + return int(v) > + > + return '' > + > +def parallel_make_argument(d, fmt, limit=None, makeinst=False): > + """ > + Helper utility to construct a parallel make argument from the number of > + parallel threads specified in PARALLEL_MAKE. > + > + Returns the input format string `fmt` where a single '%d' will be expanded > + with the number of parallel threads to use. If `limit` is specified, the > + number of parallel threads will be no larger than it. If no parallelization > + option is found in PARALLEL_MAKE, returns an empty string > + > + e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return > + "-n 10" > + """ > + v = parallel_make(d, makeinst) > + if v: > + if limit: > + v = min(limit, v) > + return fmt % v > + return '' > + > +def packages_filter_out_system(d): > + """ > + Return a list of packages from PACKAGES with the "system" packages such as > + PN-dbg PN-doc PN-locale-eb-gb removed. > + """ > + pn = d.getVar('PN') > + blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')] > + localepkg = pn + "-locale-" > + pkgs = [] > + > + for pkg in d.getVar('PACKAGES').split(): > + if pkg not in blacklist and localepkg not in pkg: > + pkgs.append(pkg) > + return pkgs > + > +def getstatusoutput(cmd): > + return subprocess.getstatusoutput(cmd) > + > + > +def trim_version(version, num_parts=2): > + """ > + Return just the first of , split by periods. For > + example, trim_version("1.2.3", 2) will return "1.2". > + """ > + if type(version) is not str: > + raise TypeError("Version should be a string") > + if num_parts < 1: > + raise ValueError("Cannot split to parts < 1") > + > + parts = version.split(".") > + trimmed = ".".join(parts[:num_parts]) > + return trimmed > + > +def cpu_count(at_least=1): > + cpus = len(os.sched_getaffinity(0)) > + return max(cpus, at_least) > + > +def execute_pre_post_process(d, cmds): > + if cmds is None: > + return > + > + for cmd in cmds.strip().split(';'): > + cmd = cmd.strip() > + if cmd != '': > + bb.note("Executing %s ..." % cmd) > + bb.build.exec_func(cmd, d) > + > +# For each item in items, call the function 'target' with item as the first > +# argument, extraargs as the other arguments and handle any exceptions in the > +# parent thread > +def multiprocess_launch(target, items, d, extraargs=None): > + > + class ProcessLaunch(multiprocessing.Process): > + def __init__(self, *args, **kwargs): > + multiprocessing.Process.__init__(self, *args, **kwargs) > + self._pconn, self._cconn = multiprocessing.Pipe() > + self._exception = None > + self._result = None > + > + def run(self): > + try: > + ret = self._target(*self._args, **self._kwargs) > + self._cconn.send((None, ret)) > + except Exception as e: > + tb = traceback.format_exc() > + self._cconn.send((e, tb)) > + > + def update(self): > + if self._pconn.poll(): > + (e, tb) = self._pconn.recv() > + if e is not None: > + self._exception = (e, tb) > + else: > + self._result = tb > + > + @property > + def exception(self): > + self.update() > + return self._exception > + > + @property > + def result(self): > + self.update() > + return self._result > + > + max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1) > + launched = [] > + errors = [] > + results = [] > + items = list(items) > + while (items and not errors) or launched: > + if not errors and items and len(launched) < max_process: > + args = (items.pop(),) > + if extraargs is not None: > + args = args + extraargs > + p = ProcessLaunch(target=target, args=args) > + p.start() > + launched.append(p) > + for q in launched: > + # Have to manually call update() to avoid deadlocks. The pipe can be full and > + # transfer stalled until we try and read the results object but the subprocess won't exit > + # as it still has data to write (https://bugs.python.org/issue8426) > + q.update() > + # The finished processes are joined when calling is_alive() > + if not q.is_alive(): > + if q.exception: > + errors.append(q.exception) > + if q.result: > + results.append(q.result) > + launched.remove(q) > + # Paranoia doesn't hurt > + for p in launched: > + p.join() > + if errors: > + msg = "" > + for (e, tb) in errors: > + if isinstance(e, subprocess.CalledProcessError) and e.output: > + msg = msg + str(e) + "\n" > + msg = msg + "Subprocess output:" > + msg = msg + e.output.decode("utf-8", errors="ignore") > + else: > + msg = msg + str(e) + ": " + str(tb) + "\n" > + bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg) > + return results > + > +def squashspaces(string): > + import re > + return re.sub(r"\s+", " ", string).strip() > + > +def format_pkg_list(pkg_dict, ret_format=None): > + output = [] > + > + if ret_format == "arch": > + for pkg in sorted(pkg_dict): > + output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"])) > + elif ret_format == "file": > + for pkg in sorted(pkg_dict): > + output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"])) > + elif ret_format == "ver": > + for pkg in sorted(pkg_dict): > + output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"])) > + elif ret_format == "deps": > + for pkg in sorted(pkg_dict): > + for dep in pkg_dict[pkg]["deps"]: > + output.append("%s|%s" % (pkg, dep)) > + else: > + for pkg in sorted(pkg_dict): > + output.append(pkg) > + > + output_str = '\n'.join(output) > + > + if output_str: > + # make sure last line is newline terminated > + output_str += '\n' > + > + return output_str > + > + > +# Helper function to get the host compiler version > +# Do not assume the compiler is gcc > +def get_host_compiler_version(d, taskcontextonly=False): > + import re, subprocess > + > + if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1': > + return > + > + compiler = d.getVar("BUILD_CC") > + # Get rid of ccache since it is not present when parsing. > + if compiler.startswith('ccache '): > + compiler = compiler[7:] > + try: > + env = os.environ.copy() > + # datastore PATH does not contain session PATH as set by environment-setup-... > + # this breaks the install-buildtools use-case > + # env["PATH"] = d.getVar("PATH") > + output = subprocess.check_output("%s --version" % compiler, \ > + shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8") > + except subprocess.CalledProcessError as e: > + bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8"))) > + > + match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0]) > + if not match: > + bb.fatal("Can't get compiler version from %s --version output" % compiler) > + > + version = match.group(1) > + return compiler, version > + > + > +def host_gcc_version(d, taskcontextonly=False): > + import re, subprocess > + > + if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1': > + return > + > + compiler = d.getVar("BUILD_CC") > + # Get rid of ccache since it is not present when parsing. > + if compiler.startswith('ccache '): > + compiler = compiler[7:] > + try: > + env = os.environ.copy() > + env["PATH"] = d.getVar("PATH") > + output = subprocess.check_output("%s --version" % compiler, \ > + shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8") > + except subprocess.CalledProcessError as e: > + bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8"))) > + > + match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0]) > + if not match: > + bb.fatal("Can't get compiler version from %s --version output" % compiler) > + > + version = match.group(1) > + return "-%s" % version if version in ("4.8", "4.9") else "" > + > + > +def get_multilib_datastore(variant, d): > + localdata = bb.data.createCopy(d) > + if variant: > + overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant > + localdata.setVar("OVERRIDES", overrides) > + localdata.setVar("MLPREFIX", variant + "-") > + else: > + origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL") > + if origdefault: > + localdata.setVar("DEFAULTTUNE", origdefault) > + overrides = localdata.getVar("OVERRIDES", False).split(":") > + overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")]) > + localdata.setVar("OVERRIDES", overrides) > + localdata.setVar("MLPREFIX", "") > + return localdata > + > +# > +# Python 2.7 doesn't have threaded pools (just multiprocessing) > +# so implement a version here > +# > + > +from queue import Queue > +from threading import Thread > + > +class ThreadedWorker(Thread): > + """Thread executing tasks from a given tasks queue""" > + def __init__(self, tasks, worker_init, worker_end): > + Thread.__init__(self) > + self.tasks = tasks > + self.daemon = True > + > + self.worker_init = worker_init > + self.worker_end = worker_end > + > + def run(self): > + from queue import Empty > + > + if self.worker_init is not None: > + self.worker_init(self) > + > + while True: > + try: > + func, args, kargs = self.tasks.get(block=False) > + except Empty: > + if self.worker_end is not None: > + self.worker_end(self) > + break > + > + try: > + func(self, *args, **kargs) > + except Exception as e: > + print(e) > + finally: > + self.tasks.task_done() > + > +class ThreadedPool: > + """Pool of threads consuming tasks from a queue""" > + def __init__(self, num_workers, num_tasks, worker_init=None, > + worker_end=None): > + self.tasks = Queue(num_tasks) > + self.workers = [] > + > + for _ in range(num_workers): > + worker = ThreadedWorker(self.tasks, worker_init, worker_end) > + self.workers.append(worker) > + > + def start(self): > + for worker in self.workers: > + worker.start() > + > + def add_task(self, func, *args, **kargs): > + """Add a task to the queue""" > + self.tasks.put((func, args, kargs)) > + > + def wait_completion(self): > + """Wait for completion of all the tasks in the queue""" > + self.tasks.join() > + for worker in self.workers: > + worker.join() > + > +def write_ld_so_conf(d): > + # Some utils like prelink may not have the correct target library paths > + # so write an ld.so.conf to help them > + ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf") > + if os.path.exists(ldsoconf): > + bb.utils.remove(ldsoconf) > + bb.utils.mkdirhier(os.path.dirname(ldsoconf)) > + with open(ldsoconf, "w") as f: > + f.write(d.getVar("base_libdir") + '\n') > + f.write(d.getVar("libdir") + '\n') > + > +class ImageQAFailed(Exception): > + def __init__(self, description, name=None, logfile=None): > + self.description = description > + self.name = name > + self.logfile=logfile > + > + def __str__(self): > + msg = 'Function failed: %s' % self.name > + if self.description: > + msg = msg + ' (%s)' % self.description > + > + return msg > + > +def sh_quote(string): > + import shlex > + return shlex.quote(string) > + > +def directory_size(root, blocksize=4096): > + """ > + Calculate the size of the directory, taking into account hard links, > + rounding up every size to multiples of the blocksize. > + """ > + def roundup(size): > + """ > + Round the size up to the nearest multiple of the block size. > + """ > + import math > + return math.ceil(size / blocksize) * blocksize > + > + def getsize(filename): > + """ > + Get the size of the filename, not following symlinks, taking into > + account hard links. > + """ > + stat = os.lstat(filename) > + if stat.st_ino not in inodes: > + inodes.add(stat.st_ino) > + return stat.st_size > + else: > + return 0 > + > + inodes = set() > + total = 0 > + for root, dirs, files in os.walk(root): > + total += sum(roundup(getsize(os.path.join(root, name))) for name in files) > + total += roundup(getsize(root)) > + return total > -- Siemens AG, T RDA IOT Corporate Competence Center Embedded Linux