From c2fda55cfe14918ac4f67f43c885ec2749dab88d Mon Sep 17 00:00:00 2001 From: Mike Orr Date: Tue, 20 May 2014 17:22:12 -0400 Subject: [PATCH 1/3] hdfs cleanup and compile test against CDH 4.4. --- build_tools/build_detect_platform | 2 +- hdfs/README | 13 +- hdfs/env_hdfs.h | 2 +- hdfs/hdfs.h | 477 ------------------------------ hdfs/libhdfs.a | Bin 65218 -> 0 bytes hdfs/setup.sh | 7 + util/env_hdfs.cc | 48 ++- 7 files changed, 52 insertions(+), 497 deletions(-) delete mode 100644 hdfs/hdfs.h delete mode 100644 hdfs/libhdfs.a create mode 100644 hdfs/setup.sh diff --git a/build_tools/build_detect_platform b/build_tools/build_detect_platform index 88aa216ad..c8ed00487 100755 --- a/build_tools/build_detect_platform +++ b/build_tools/build_detect_platform @@ -289,7 +289,7 @@ if test "$USE_HDFS"; then exit 1 fi HDFS_CCFLAGS="$HDFS_CCFLAGS -I$JAVA_HOME/include -I$JAVA_HOME/include/linux -DUSE_HDFS" - HDFS_LDFLAGS="$HDFS_LDFLAGS -Wl,--no-whole-archive hdfs/libhdfs.a -L$JAVA_HOME/jre/lib/amd64" + HDFS_LDFLAGS="$HDFS_LDFLAGS -Wl,--no-whole-archive -lhdfs -L$JAVA_HOME/jre/lib/amd64" HDFS_LDFLAGS="$HDFS_LDFLAGS -L$JAVA_HOME/jre/lib/amd64/server -L$GLIBC_RUNTIME_PATH/lib" HDFS_LDFLAGS="$HDFS_LDFLAGS -ldl -lverify -ljava -ljvm" COMMON_FLAGS="$COMMON_FLAGS $HDFS_CCFLAGS" diff --git a/hdfs/README b/hdfs/README index 9b7d0a64d..f4f1106e4 100644 --- a/hdfs/README +++ b/hdfs/README @@ -1,19 +1,16 @@ This directory contains the hdfs extensions needed to make rocksdb store files in HDFS. -The hdfs.h file is copied from the Apache Hadoop 1.0 source code. -It defines the libhdfs library -(http://hadoop.apache.org/common/docs/r0.20.2/libhdfs.html) to access -data in HDFS. The libhdfs.a is copied from the Apache Hadoop 1.0 build. -It implements the API defined in hdfs.h. If your hadoop cluster is running -a different hadoop release, then install these two files manually from your -hadoop distribution and then recompile rocksdb. +It has been compiled and testing against CDH 4.4 (2.0.0+1475-1.cdh4.4.0.p0.23~precise-cdh4.4.0). + +The configuration assumes that packages libhdfs0, libhdfs0-dev are +installed which basically means that hdfs.h is in /usr/include and libhdfs in /usr/lib The env_hdfs.h file defines the rocksdb objects that are needed to talk to an underlying filesystem. If you want to compile rocksdb with hdfs support, please set the following -enviroment variables appropriately: +enviroment variables appropriately (also defined in setup.sh for convenience) USE_HDFS=1 JAVA_HOME=/usr/local/jdk-6u22-64 LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/jdk-6u22-64/jre/lib/amd64/server:/usr/local/jdk-6u22-64/jre/lib/amd64/:./snappy/libs diff --git a/hdfs/env_hdfs.h b/hdfs/env_hdfs.h index e6fb8db12..5e7de77d3 100644 --- a/hdfs/env_hdfs.h +++ b/hdfs/env_hdfs.h @@ -14,7 +14,7 @@ #include "rocksdb/status.h" #ifdef USE_HDFS -#include "hdfs/hdfs.h" +#include namespace rocksdb { diff --git a/hdfs/hdfs.h b/hdfs/hdfs.h deleted file mode 100644 index 8e8dfecb8..000000000 --- a/hdfs/hdfs.h +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// -#ifndef LIBHDFS_HDFS_H -#define LIBHDFS_HDFS_H - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include - -#ifndef O_RDONLY -#define O_RDONLY 1 -#endif - -#ifndef O_WRONLY -#define O_WRONLY 2 -#endif - -#ifndef EINTERNAL -#define EINTERNAL 255 -#endif - - -/** All APIs set errno to meaningful values */ -#ifdef __cplusplus -extern "C" { -#endif - - /** - * Some utility decls used in libhdfs. - */ - - typedef int32_t tSize; /// size of data for read/write io ops - typedef time_t tTime; /// time type in seconds - typedef int64_t tOffset;/// offset within the file - typedef uint16_t tPort; /// port - typedef enum tObjectKind { - kObjectKindFile = 'F', - kObjectKindDirectory = 'D', - } tObjectKind; - - - /** - * The C reflection of org.apache.org.hadoop.FileSystem . - */ - typedef void* hdfsFS; - - - /** - * The C equivalent of org.apache.org.hadoop.FSData(Input|Output)Stream . - */ - enum hdfsStreamType - { - UNINITIALIZED = 0, - INPUT = 1, - OUTPUT = 2, - }; - - - /** - * The 'file-handle' to a file in hdfs. - */ - struct hdfsFile_internal { - void* file; - enum hdfsStreamType type; - }; - typedef struct hdfsFile_internal* hdfsFile; - - - /** - * hdfsConnectAsUser - Connect to a hdfs file system as a specific user - * Connect to the hdfs. - * @param host A string containing either a host name, or an ip address - * of the namenode of a hdfs cluster. 'host' should be passed as NULL if - * you want to connect to local filesystem. 'host' should be passed as - * 'default' (and port as 0) to used the 'configured' filesystem - * (core-site/core-default.xml). - * @param port The port on which the server is listening. - * @param user the user name (this is hadoop domain user). Or NULL is equivelant to hhdfsConnect(host, port) - * @param groups the groups (these are hadoop domain groups) - * @return Returns a handle to the filesystem or NULL on error. - */ - hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user , const char *groups[], int groups_size ); - - - /** - * hdfsConnect - Connect to a hdfs file system. - * Connect to the hdfs. - * @param host A string containing either a host name, or an ip address - * of the namenode of a hdfs cluster. 'host' should be passed as NULL if - * you want to connect to local filesystem. 'host' should be passed as - * 'default' (and port as 0) to used the 'configured' filesystem - * (core-site/core-default.xml). - * @param port The port on which the server is listening. - * @return Returns a handle to the filesystem or NULL on error. - */ - hdfsFS hdfsConnect(const char* host, tPort port); - - - /** - * This are the same as hdfsConnectAsUser except that every invocation returns a new FileSystem handle. - * Applications should call a hdfsDisconnect for every call to hdfsConnectAsUserNewInstance. - */ - hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port, const char *user , const char *groups[], int groups_size ); - hdfsFS hdfsConnectNewInstance(const char* host, tPort port); - hdfsFS hdfsConnectPath(const char* uri); - - /** - * hdfsDisconnect - Disconnect from the hdfs file system. - * Disconnect from hdfs. - * @param fs The configured filesystem handle. - * @return Returns 0 on success, -1 on error. - */ - int hdfsDisconnect(hdfsFS fs); - - - /** - * hdfsOpenFile - Open a hdfs file in given mode. - * @param fs The configured filesystem handle. - * @param path The full path to the file. - * @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT), - * O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) which return NULL and set errno equal ENOTSUP. - * @param bufferSize Size of buffer for read/write - pass 0 if you want - * to use the default configured values. - * @param replication Block replication - pass 0 if you want to use - * the default configured values. - * @param blocksize Size of block - pass 0 if you want to use the - * default configured values. - * @return Returns the handle to the open file or NULL on error. - */ - hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags, - int bufferSize, short replication, tSize blocksize); - - - /** - * hdfsCloseFile - Close an open file. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @return Returns 0 on success, -1 on error. - */ - int hdfsCloseFile(hdfsFS fs, hdfsFile file); - - - /** - * hdfsExists - Checks if a given path exsits on the filesystem - * @param fs The configured filesystem handle. - * @param path The path to look for - * @return Returns 0 on exists, 1 on non-exists, -1/-2 on error. - */ - int hdfsExists(hdfsFS fs, const char *path); - - - /** - * hdfsSeek - Seek to given offset in file. - * This works only for files opened in read-only mode. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @param desiredPos Offset into the file to seek into. - * @return Returns 0 on success, -1 on error. - */ - int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos); - - - /** - * hdfsTell - Get the current offset in the file, in bytes. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @return Current offset, -1 on error. - */ - tOffset hdfsTell(hdfsFS fs, hdfsFile file); - - - /** - * hdfsRead - Read data from an open file. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @param buffer The buffer to copy read bytes into. - * @param length The length of the buffer. - * @return Returns the number of bytes actually read, possibly less - * than than length;-1 on error. - */ - tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length); - - - /** - * hdfsPread - Positional read of data from an open file. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @param position Position from which to read - * @param buffer The buffer to copy read bytes into. - * @param length The length of the buffer. - * @return Returns the number of bytes actually read, possibly less than - * than length;-1 on error. - */ - tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, - void* buffer, tSize length); - - - /** - * hdfsWrite - Write data into an open file. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @param buffer The data. - * @param length The no. of bytes to write. - * @return Returns the number of bytes written, -1 on error. - */ - tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer, - tSize length); - - - /** - * hdfsWrite - Flush the data. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @return Returns 0 on success, -1 on error. - */ - int hdfsFlush(hdfsFS fs, hdfsFile file); - - /** - * hdfsSync - Sync the data to persistent store. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @return Returns 0 on success, -1 on error. - */ - int hdfsSync(hdfsFS fs, hdfsFile file); - - /** - * hdfsGetNumReplicasInPipeline - get number of remaining replicas in - * pipeline - * @param fs The configured filesystem handle - * @param file the file handle - * @return returns the # of datanodes in the write pipeline; -1 on error - */ - int hdfsGetNumCurrentReplicas(hdfsFS, hdfsFile file); - - /** - * hdfsAvailable - Number of bytes that can be read from this - * input stream without blocking. - * @param fs The configured filesystem handle. - * @param file The file handle. - * @return Returns available bytes; -1 on error. - */ - int hdfsAvailable(hdfsFS fs, hdfsFile file); - - - /** - * hdfsCopy - Copy file from one filesystem to another. - * @param srcFS The handle to source filesystem. - * @param src The path of source file. - * @param dstFS The handle to destination filesystem. - * @param dst The path of destination file. - * @return Returns 0 on success, -1 on error. - */ - int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst); - - - /** - * hdfsMove - Move file from one filesystem to another. - * @param srcFS The handle to source filesystem. - * @param src The path of source file. - * @param dstFS The handle to destination filesystem. - * @param dst The path of destination file. - * @return Returns 0 on success, -1 on error. - */ - int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst); - - - /** - * hdfsDelete - Delete file. - * @param fs The configured filesystem handle. - * @param path The path of the file. - * @return Returns 0 on success, -1 on error. - */ - int hdfsDelete(hdfsFS fs, const char* path); - - - /** - * hdfsRename - Rename file. - * @param fs The configured filesystem handle. - * @param oldPath The path of the source file. - * @param newPath The path of the destination file. - * @return Returns 0 on success, -1 on error. - */ - int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath); - - - /** - * hdfsGetWorkingDirectory - Get the current working directory for - * the given filesystem. - * @param fs The configured filesystem handle. - * @param buffer The user-buffer to copy path of cwd into. - * @param bufferSize The length of user-buffer. - * @return Returns buffer, NULL on error. - */ - char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize); - - - /** - * hdfsSetWorkingDirectory - Set the working directory. All relative - * paths will be resolved relative to it. - * @param fs The configured filesystem handle. - * @param path The path of the new 'cwd'. - * @return Returns 0 on success, -1 on error. - */ - int hdfsSetWorkingDirectory(hdfsFS fs, const char* path); - - - /** - * hdfsCreateDirectory - Make the given file and all non-existent - * parents into directories. - * @param fs The configured filesystem handle. - * @param path The path of the directory. - * @return Returns 0 on success, -1 on error. - */ - int hdfsCreateDirectory(hdfsFS fs, const char* path); - - - /** - * hdfsSetReplication - Set the replication of the specified - * file to the supplied value - * @param fs The configured filesystem handle. - * @param path The path of the file. - * @return Returns 0 on success, -1 on error. - */ - int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication); - - - /** - * hdfsFileInfo - Information about a file/directory. - */ - typedef struct { - tObjectKind mKind; /* file or directory */ - char *mName; /* the name of the file */ - tTime mLastMod; /* the last modification time for the file in seconds */ - tOffset mSize; /* the size of the file in bytes */ - short mReplication; /* the count of replicas */ - tOffset mBlockSize; /* the block size for the file */ - char *mOwner; /* the owner of the file */ - char *mGroup; /* the group associated with the file */ - short mPermissions; /* the permissions associated with the file */ - tTime mLastAccess; /* the last access time for the file in seconds */ - } hdfsFileInfo; - - - /** - * hdfsListDirectory - Get list of files/directories for a given - * directory-path. hdfsFreeFileInfo should be called to deallocate memory if - * the function returns non-NULL value. - * @param fs The configured filesystem handle. - * @param path The path of the directory. - * @param numEntries Set to the number of files/directories in path. - * @return Returns a dynamically-allocated array of hdfsFileInfo - * objects; NULL if empty or on error. - * on error, numEntries will be -1. - */ - hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, - int *numEntries); - - - /** - * hdfsGetPathInfo - Get information about a path as a (dynamically - * allocated) single hdfsFileInfo struct. hdfsFreeFileInfo should be - * called when the pointer is no longer needed. - * @param fs The configured filesystem handle. - * @param path The path of the file. - * @return Returns a dynamically-allocated hdfsFileInfo object; - * NULL on error. - */ - hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path); - - - /** - * hdfsFreeFileInfo - Free up the hdfsFileInfo array (including fields) - * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo - * objects. - * @param numEntries The size of the array. - */ - void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries); - - - /** - * hdfsGetHosts - Get hostnames where a particular block (determined by - * pos & blocksize) of a file is stored. The last element in the array - * is NULL. Due to replication, a single block could be present on - * multiple hosts. - * @param fs The configured filesystem handle. - * @param path The path of the file. - * @param start The start of the block. - * @param length The length of the block. - * @return Returns a dynamically-allocated 2-d array of blocks-hosts; - * NULL on error. - */ - char*** hdfsGetHosts(hdfsFS fs, const char* path, - tOffset start, tOffset length); - - - /** - * hdfsFreeHosts - Free up the structure returned by hdfsGetHosts - * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo - * objects. - * @param numEntries The size of the array. - */ - void hdfsFreeHosts(char ***blockHosts); - - - /** - * hdfsGetDefaultBlockSize - Get the optimum blocksize. - * @param fs The configured filesystem handle. - * @return Returns the blocksize; -1 on error. - */ - tOffset hdfsGetDefaultBlockSize(hdfsFS fs); - - - /** - * hdfsGetCapacity - Return the raw capacity of the filesystem. - * @param fs The configured filesystem handle. - * @return Returns the raw-capacity; -1 on error. - */ - tOffset hdfsGetCapacity(hdfsFS fs); - - - /** - * hdfsGetUsed - Return the total raw size of all files in the filesystem. - * @param fs The configured filesystem handle. - * @return Returns the total-size; -1 on error. - */ - tOffset hdfsGetUsed(hdfsFS fs); - - /** - * hdfsChown - * @param fs The configured filesystem handle. - * @param path the path to the file or directory - * @param owner this is a string in Hadoop land. Set to null or "" if only setting group - * @param group this is a string in Hadoop land. Set to null or "" if only setting user - * @return 0 on success else -1 - */ - int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group); - - /** - * hdfsChmod - * @param fs The configured filesystem handle. - * @param path the path to the file or directory - * @param mode the bitmask to set it to - * @return 0 on success else -1 - */ - int hdfsChmod(hdfsFS fs, const char* path, short mode); - - /** - * hdfsUtime - * @param fs The configured filesystem handle. - * @param path the path to the file or directory - * @param mtime new modification time or 0 for only set access time in seconds - * @param atime new access time or 0 for only set modification time in seconds - * @return 0 on success else -1 - */ - int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime); - -#ifdef __cplusplus -} -#endif - -#endif /*LIBHDFS_HDFS_H*/ - -/** - * vim: ts=4: sw=4: et - */ diff --git a/hdfs/libhdfs.a b/hdfs/libhdfs.a deleted file mode 100644 index 4d1f19f0b9cceb4a8923d7c876c64841eceb2495..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65218 zcmd_T3w&HvwLgCP(3V0;0l3 zB?L1Lk?N)1UO^3tzZ(>>DvyXQgvV{-lYkY&wQ5x4PE3E7qA1Z@?f<*hUTbE}&P-a0 zcz^%j?|j;vbH01+wbxpE?X@50%-Qd{u)LvS!-7e(LfOZhbIzGJZ|=Mc&Yv9$;U5nB zHx#y5s>eOtd<*^Ovd#A=F@v16ufhEz#SVP^7 z2Ek>zbX=#ajxMgQWMeUFu1W=}Yp<`nF1j)r+W@7nt*N`Fyk>Dtd1K@1C<=;_V{PQL zWtB@C>Navx05?L}@roE+Y*9l)`HgF04XBSSeAP7{M&+!kk{rbHGF0KJDwhyk8;vc$ zY+1PWdKRg;F4OL%#m}2JFBF<{{=B*PJDb`#{+@sCxuMW`XP!U>7^ z{zyyH{vbA$Xd=lVl5Fk*he#3$$)@3hed!Fdks%dH#0Nl`a+LGl!jwRoQV07Yi7@ie z|EAQkMEAEzkVomyWAul(1}XXjx?vPL9K>fuk{2VH*GwBgx;qV-P{6s`M4K~d>F1x1lh6%;Mp z5=m^E2)druCHO1+B6``vVg&`at|TFoPP!z6VwYB;jkN6IEb#)zhWHGAkF&V+MCg_(Y<&;IZ!3Zsk6ixYuNH zAE1mqt^GI&_dZ`&>I$W6B@5r4Jdk@&0BN*8hC$}~96bVTH9B3t$ilR$}G)K1{f8~D_|9;YRV z)~j%;N<0)JuqV77l+-*s1x8DSWRjzLhLZ zRD&OSE3zh=wr8~SA_Z7Bde-JOVDu|aAj+7`Z#t9@wGL9wynI?ELO>it zg+hn{2r&@EZ&q7SILKwIit#vG>)K!(J)w0yBY8~LuK8&Wei83YguGOr=b z*m2ExGIF*Zo3^tzuoY90_Odo8vz2m#a3GRcGChO~^jb@%PsK$CT}+AeESWx$ZQC<` zI-M7>&ZZG%G;i&W-6ST+tJfCDY4o~#*ywc!-FSL^%n=N*lC|yz9V#>~ryHYj`8akf zSA|O)rL)ak%i~PT<5aq7;no|(H+d#GA_JW68-P1?XQNh>mKZm zg!?T~s?v30tW?hvkb`uajT74es_deh-#-vZ^hRFmi?pQ1MA|n^5zB?U3m$3bn3>$h z9Fk4leiZW@JKjwm!G7K4RbtnXc=2eJ*!?HgcMDaItHd2%k?c3rf_5B9+3y*}ftGOD zLlf0|(&3@(kK_{ZE^ZhRce8_U=F?3}Q#V31?zoX<)Z`WBMTK?lYxJ`e^>InY%_w2zgdf!GjUPsVCn34H{|CtW%BYyMr zhX5Ur({ym(dBy1U0Nr?ceby1U4z8VUjFM|@pH}ZEQA_CkDr~mP>E<8SjXj3a{;%uC z2KTd4|Ic(|e}0+u{U7PZnz-y7-PkApiyMpU#k-V`!I&vkGa|aZZR2Z>dR2moEFp|J~;SsN~Oh~ z_A+YcsXT*fS_i_&$zKA(u=-3Bb-*E>GNZ~_P%SY|iW+(+%1OCi-A(p+PJ;y}5P)@% z4F4d~@^r-X+*4_=mf%?(R)J_vmDolTtVH`d{9TghUXp01uD>eLx)vujP8-y18!;0N zfr5$|Trh!DZgmsl@YW4LYZ{eI17ZirbRu$)kr}cOmo|h{#;osBzw4<8O@-> z?I}9ZI&UOL%{B&fb0u4Vz5}a@zaPELdbvlUa11{5+`qB`kVNBW5v?IV$i?PmgG;l~S zAn;tm<;HNl-HoB!w-Mo&rt(yOa1tF5YNc8zzt@a zA#zu;l{_gy1ABI%szi9Vj3c)r9fQqqo1O1BwPoh17!c%7L|2$UaiC=v6&n<^CSVN= zZ7?->XcJkuy{UOcvaAg=+o{16N(!SL=b!7+JfE6nB=P%5;%_SwKg}{z(dcIS6zqb| z@;B(nli_CAAni4`b{E`w8Te-TWI^kEK$yKzQ-kQA$S*pJgyq~fP4wpnS@c3DI#pT= zujxUo&*T>iDdS<$fg=~|+z}QHcSMpawX3V;N9Vu#}qtXw!7frl`o?XAQi`h742nc zMjt}UrEYb}^x28E(+izxLx~lCm+hx4QC#NKnDjYItxY1@R;}{WbOtOFk8MtYB$*K z#~J_2u-M4UlmjFBA6Z5v3nRRG%RK0!i!%<}1Y@oQxA=gG0B6HyH27h`h6-U+u?j^X zUP6{pPzp%{GzxlvZ8A=^)}K21 zMbr^3`Syf+i8WMq7Ud=2V~~e!_C-!&OZ1`x>O%(vecnizoNG*jijDUhvz!8<60~{@ zBS6OUe}hLiPLQE#X*2}1;Nfr|g3z7ac*vIs-=7TM54&ti+hxnX?Id@i2~LRxdklIK zEALbbVd2gWc%6lAWo;jDZr9=5uEV)q8~aC_7^w|L>+cEg<6HGcdNL6s@9W6reUA5P z;_WEleX+B3?Dt-9$~~H-2M#@9m-_^lyU#gCGe#-N_?<9PAFm$5T&R|IETpwJb)$+v z_C~764%^Y-<9()zHvQUp?mjZu&_)WJ;e9Pl`+}R!M4=?#ie%eDUdIY=Pp;f2)z+sy z;G%=i4xL5xvA5#y!KQaa+~eV7**>osiIKV5ddS=`A3H{0>eOEyJeUmcON4JthHpir zq1rZ;PZ8Nxek8GS>ykwHcFzM&7S`+mt(!?99D~o~a)M@df@UzLN^CwHCs6hrf->yu zv;E+_!}Nm;gIPIT;r9HlkPP3BUak`Yv&bt8ePqT877F0>@00Hd^3txt{|Su`amvNhcj*1%zyN zA!H}~2Hgt;-Vebk39Yx;wchUJN+k0uA_J!NX7!8IPr{kQkQW6qTjHyHR4S>w!@x9$`=f zKS9EW8Db254A$9?KnzP_xr9d(M`#_}z`!kQ>EzYD&N^=NFd=Jjkg>^@eQl(pbb@LU z>HVgP@D%$n1ZZR5eyj8C?Cx(X`}S7Q!EN|K84WSA`#a@Erbu(+D4g!_c(}ElxG+Dy z0zZTx9)n<>fr{4tlwHy5(fkLx*+Z}#Gu-!z%H&^uFZRu!kp)%Mp((DaXax%Y6ExPw z|1D3G0gaEX({Q22%;{h3vxD5)eE;fZ-Mh&OQes~J8teHNa~-NfHu%?@upHO%AEw0( z!KZeTlxS_VZUGEbS{yC9O;p|_@^}N=i-NsPoqUZd9-hYiOT(Xz@Q+nwz&FP<+(?e7=s&+?eo1>IYh~{*?jIRK0vR#F2J>k7j176;= zBlE1%8%1!#;R)+19Y!DGwJD0W(9YAK}K7{tFa~#Utv;xSJ|?xa?&#vhA2$ zbzpJ@mvNJ;d007`jT;P~Fu=wI8i{)ush2Kjgbk<6-~XGJk#eum+7Z%r*lLHFhwX>V zbs+5Lfack}A-ICN+w$?Hii#uVQepq6F%E8&?bZ?{fr4~SzOe}=;T;9o? z7l8}wM5&V@%dD%Rmtq%z3DgV@Zk4O<`@|$nCyFbj~#@p9T@r%YS{T zxsXiE-IqEuBj(V&wD2cbk_os$jdb7IO*L{SS&C}pPO@V%d}jiWb1{DzF{&rug?ZgN zq1K3mB%RpEG>KRVCX)~wd$ zMuF&p<6T;bIoHA;IdAa_9G_k=a}TOwsKaEMHC_XA<5ro z5rnk1tKpNOE=B5nBr($VvsCYbR6Ou|&^-;X zb~grnsPfqrT<bu(~e_$vw@{0cIRUI}U2A8sRgeYPhdt?iwK_wF{fe+D;f z?hdS0X7wrQ=ZLSjeOQyH?X{f>P5;#A44tutU?Q)@!1dYOzhrYmsOs(%US|<+-=4Ys z2Ris-mG|u<ue)4xum_n`1@Un!rqMflWC@1d#Fw;}k9 ze3@?^C?I1n_%nXjn&0Ar>9$6ajupMZa0~P1i z1d8p487OkhJPz@K+S?%+2XzW#ZK<=LhM-2e3GV&zW(ePbu+~W;BYlJDj1GvTrQ1mR zu6ms4m9g~ed%Qd$zc4+FpG%ECg}N+L?d~^JQ$gXBh$>Zc%*o3ON{uA&>G)g1Ef zB>#FBitT`bAv2KXbv!iWNg`f%p_lT)T`k>XB3O*M@ZU*yb%jpUBwm|wzs7N|%?#c* zmR7L4K>WnOxX3t6$J}$|mZq)@B{3Pi8I-c*EvLctfHHDyfE>v(dL9Ph+?zKTtJA={ z-Tiv9J^jk#>lyOnc~a|osw<58uN>W+2cOI`=gGKDn{%?eVbM|n-5A^cgu}lpChM(5 zv2CwY2t6^a_B&hQ2Y%x|)QP37T(%yUp(C53o*@15p74ugN;wU^BN=|t7(VGH7%kx! z#~@*Tc<1I{qS#JWng2+Y{Ru2Q_ETtu&56}MeymhsPk4_jjZcH0!Hr*8dte^X4L+61 zB+CQ^NRxXrl*bYMBa7Xm#ZDUw+J32@aT!iBaY0PuMFDytORiEM?LK(0CAb)Q326ClYtg4|f&Zekb}$B)S&A5Q(_yAn{V?xEoiqr}GWJX+<#(zQd=i zA2mf<_JuH^MaMwnreLVPCwzb}is-lh+7h5U%z8legIUuRbO2I~O32myO^-;cci7KO z)CH|Ku((YrHlhCd%DocSC1%Cgi+*AfpDOMkQ|=wed2o;>yD+<(tYGznKt#V@(hN8~ z2f?}!zcY?ilAJ&N)VI{TC=Zu`QJZ~62w%yXetq(BZg4$e7J;7;(s+{`2}2bQQadnO zuy13nqh&DFN5)P$^`WQoq7U&K;9$3%zWV?n1|Cm~h&!Y=VT|ga%|8I4S=BPg&#~7|k0-{o^ch&91|=$d-Mb zRQ2A&8U2+#y;;9Qk<(!w{Z?6du{@8faK$__V@d*-z@zj4nMzI@lyFo63#%4>s>C;l>leti7I z@Wqj$C&fmP^G&OtPdw_`Xv`auGoo%4vu!0iSU%TSJE84m-1j@ zz61TbKE3RMVo1;ba=_?QZ~OiZ96H`*o)A3`S!u=PZf^55<8_O|>EA@4bzqw2l1YQV zuW0ry7GJdaR5@DF6%E?M*wd;zHHid*szSUu37jT&GvPuR%Hv@h~ogJ&uG`2}^ha zfkC*74v+$Z=^6^P>n>VDft}0RFzHB)(&jK2kCKSSc>mhJCOhek82# z3(=2~GKLcMII$ zx>7%S_25BzPtAFS1ZYh>?HjDKJgw9E9y&Fh`>&V?rI#883U0rSnyU-iU8w;isz9k9 z^9{RUGc;<>UTw!@dLhQ@{ctYrBVZf0n&(cvP&*jh;*4y5j(BIpHcevC-J zI%49XE7-!1qsr1|9>!0F@E@-b%e*gj&wsNWPD0%ca|m-aBC~stffPah7^lWC%7I&i zd(+TTuKWa>CUW5|VOkrH8?en%owU(>U23NF(% z0`sEgxEZ-O=J$fleqJydMuiRs@p;0I*DHvFE04Z$MK}6=AT^YhID}Wlm2)Q-9!JeP z));RgE6utbmYVqhxh%2c#g`*3a-!o4Z%5$Gb`j`;_ipr+cB(KmXo{jky2?vmj->xE z)7Bj#2kH8c!ng5nIT$`9ruSQ)YfDmEl`Il?aiTEJNIE8eW#@O}r9G?8XnLoYTLU zxCBM-iB$ADLUwa4{SWsJo#iv9*y}j1+>>Uy-?4Oprrm))i9}BuVd5?w#UxhViyu#Q zfJY}=_TKs74$rc8vSmMq#wsmP7xT0NsB!4?-0P`L`!C>bANi|*%xf9F$SqmuM|SjL zG#a$>cJDfDiiHdmdG{AJ0&>#K9ed%+Rbxy$X= zT$?&l4|Z~$!_RZn`F2!xA;zCn^%2xLwb4Ti-$$!_d3}#Rqli|Vwe0-%`%LBQa_{kI z0NxDd@8=h{<0pcd$xPvj50_tGKC7m@_S#wQqsa4%E)VJ&uAN1ng5D6FwV}MSuC9Jo zRpTuBx^u9&y!MP(DE&Rn%oDQ~E%t|2s|aS}Ce zD#4>_j8??)q3#=t7gbb58|ic6_*!&LcvD5Rp1*m`*>dEg?~NDNH`K-ID(Y&A-yg4w zl~bl@WwbJrnUpN9t}CW5R$o>ZTUr;dHK~isYidHVx=;|R!Uw*imG7Dq6v7H3r)LT1 zUNsBSRaIXbZzxyMj>Im+sW^+%q#(R{^{Uk+p=%rJ;`NQ8jq%1^tMWwq6t=)3Nh zl8p4Z?}m+OGX&0nWXy~}&J99wakN66&K38><@|@g??IOB+km zHgP)Qche45%B@!;*DKq%5R#k~BG*ZWhN7T0v*UrQDQEb+{G6YZl#Iw{ zfWxtKHhF&kQ27mgPCY7U!-aqOz*XX^F+itC6;yl;Z`4rSNV`E$ zkJ{iizTp0i?ysS}B3yEgzGPX^GAeReZA|KAq<@n%*}WY%8p@J`;qs#8F1TTr20HTa z%LJg3k{OMKGa8FB<$L`XYSY!cF>tmm^_U~>j;NoK5|^oPId$Q*%gdqjXW57I6gsCXt!vETZ&Yt1 z?Fy$S@+#g&24#-))7<%7hDO?RhL2J5SyPpiaF#;p6RYcH*;MX?uV|nsS?MC%X!6*c z+!V8OrseY55xKyuB}>;VuBpcUIMZ=u8DZeeks!woI(hWu@q3z|$ByO7!3)EA*@?%A zIhvkX^_7qM*`kVl=8b0`|Q_Y(_ZY5!A-(u>Vox zDD|fH7sAoxSOY7g47-6;et%HDUWQBRFoDO0Q<%b)*H+35uEKR%n4HqIl*V`!yeZWo z?2==$mTM6L60D*E*HBzGLHTz+ok8`)cA5uxYHv7$nQ zShw}sDCKAI;ke|oAn%Bs+Fpkqx$=(G!}aIb->sqgns_W)$tHJ;6)1~RFU8bCHiiq` zfPjfLzFJJw>M+F%VUNC%88JqM=&Q?b&}@h24>p68OB>L=&N6w=y2J86l;?<{=SaKA zXkZO{I4bvpL3Jaxz&oVJ%VN;mNlw*+J4<$bt(B#4`dkOEC=j6>XE~b7t zlRuLIxmVUzR##ysLuw<`@DEpyTv>s+McNFb!_mj&0f!Ylhd-QrM2QU4`R1-y*DWgv zYnGF**L@>Hx`WXr;GFp~QHUESXUZ%UgAFf*a=EXE59iA8;r#i=HTilcHjL?;UQhX% z{wX5_#x@(U`hs-2JwK>He32%Yzg{yTtE9wa zz&x3!qISo~P_&_;u3=Je0&@$3F&|wWjM+40%(!DFOx%h`OVfbUe>zU2KZ}l?a@*L& zlku`JACz{XTx09joat9d`j_*hZO3OFt(_jZW{y={;J)FsZp`?E!4}bbB3ms*2&XIq!q|+CJbEV%c>3_B9 zM!ubrUS#cO()UXG_icV7{{cy-udL=Oe;V<{QEJl-{|hC3yiGU!>4j7}@}+N<^iSo< zf48I;9i|XT7Dzn-b17xkn}V1@Snz>5(L|9y5WDJq<`0@Tm2<{Y@YPZl73>I z^t&bf)I8}sCH+C0Zsgl5>9hxC+CE0U1Cma!CZy9%{%LrBoQ{j}I$N&v3nl$NB2xdR zYaG+?|0+r6H`X-Wl)qWhkF({Q^t%s{zEjeFmZ$ygmGqe9Z}dAL>E(I+XBz2^gMXN$ z@;iw2yHL_ULRgOUt0euKwtSO+v!s7CPyV|l{ro)XJ0<<M!Y2bLbyj zx_EI(sPNLu%8Ekg7SAa@J9PH!*>mU2Is4pD;cARUBjquN&ph{RtTh)m-ncPVehtpC z26x_|R}Imc^5R%@Q!FU1#Pf!r_?pJXptzyV-EeAj!_`#{wD?h6QMYj;R?~uFECNT1 zQTj}5l3kHr#T4Tm!6}41WCrcOl*K~1C|#$BJBw?i!BcSMW0W}MHX*shosY(e%9$we zR5_n6xHLIIgJ7#yt|s_p(FZ1o=aJ^8KAPl`kcTUGX!*k`sNgEU(k;M6uKaE6{6R3) zGpwKg0_68~BHmMa>2DA;OCs5iSZRJ0D*}wl_m7Sn98&Bh__Xgwx{pzoA4z^M`v}^% zfxx%D&0p>@@XMlm-}aQH{$nm9?U_Rdy$4!=|Gxf50n1hX`dg5IV~3aMm+z=X{w2Ec z!7KEp+K6M^2rD>}aXBR+`9{_-=`q&u;jttzs#n7Gp8Qjfae)LT1#qVNCHkd;eOb=L+-z$HB|B|a_hb2#@6?h^8QPMpTc`Xi5V=~~L9Q7q?IX)_RC z-I)i!D-ZsqJotlo@ZEWE`tD~gd7jUM{|xwXuuq@(6OX-t{ioo?QpG%`2KHyc4|??U zzA5oJpBgj{9%JK7-~7%c&)hsXeMmMJ{Z)DJiahvrz)7CJQbWeUV|tuFD)|267!G(W zkIRn>e$UAc=P^7kX)H*5{zM%D4j#MX+#&d}r!pMym>rk*34ZiD9nNEQTz*UNbKm7~ z9;4%Om*5BA?QkBO<8rs)$4_@SkI8ZQjNlbB9L{5HT)rUqwWnv}zZU$uGaSxiZ(Pz` zfb=?0=x`o$<8l&$7U4&q>2MxvXH%UO|u{F}q5d2YiNgOT!QXM7!+9)?%TEb@J`b*ffXC3l z?-KlX^iy*jJa)$U%Yt7{9T^TDGvoYq!GBP~up2AmlIAA4XL~u`w=b?@Pklgr3K|z@vhftakJiAL+P3@COB#Y6xg{O?-YQIFD^Xe}~}9 z)^hfM$F#Wog5dKmcQ}t_>5?C(R`6+c6wd}O;rBZ5R1f3Qx*Pcwq}9=?C?2C#MDYkw zUK+eCdc!Jy6K_=&FSB8lq_%D;UKPo_LWyNAT4$jb{wRSKWoYq9v7EdX(;J~dW2_SV zf~xukyZ}%YTzxfjzZx%;xc3J6z0u1s4qjE|gb1qK)BNi-OI1TO$|^0ctF4Vz;7vg+ zOUw15Ml9ksSOm%}gI?vN;Rr&T2=SL+wk%wGeU|Wx=*7u3Sk=a>CwA#5*oZZ;x{9Dt z^dQz){)P?HBCW!UNn{XLEWI(6z6-lILMYQ}dUJy=O8Kn}y1Ihik6>&Ky}Urs+GtG; zUo7Q!3kX_6uM5z{rO{Z9hvJm7DE(lZ?iSPIU%FU{N4tEn#68_5Y_)svN|)ewh5sZJ z{!&{Z+1KPqXZ}zi7R9PK>ZGp06$M(maLr`=RM1`F^)D6#PuVf?k;2{M4wP6KuV-v2 zD%^oWG2&T<;OQp}*VfcsQ(nUkm+@~Q;#GdSUV+ubjIr)!xmuEx_`^;@oW1#WW9H=yTsG@aL@MJ^OSN$x)_XM{l> zFCU=y@^Rd2aEfb+e@$@ZGh6N-G4wPvPu-4`~vA$XjnsOv;{34Uy(Cm$K${9 z`Hi6`y*A-L9gBz%2k{|%2i@TKf`|X7gM+sq&e1{1R<9ETSH0dM<-NnupMX3n&s7Gu zeCiBNV{)ax-r$zLRd6l$SSj}thTfL@w85zkmH(d&PICgq3uT;1`qB_g@y{EahG2?6 zU~pSsKNMW$)bZML9v%|;e`Wa4JWToj!Qj>oV`ZwM{3F6=g5V^-wZkz6w{k8wxaI!= zk3X%=)A3J+-tw<7xaI%2!D($y^?JtOmR{G6e197fdeyf}Md68-GqPxRrCY z!7cxCkH5zMDnoDiUuSU3|51b6`2RhFTl$|G+{*Kk;Ht02{~<$fI1f|K2>{pT9o%Cpqtqw)WJhTh6kYH+LXdXJCB&!~s1oVz_-b|cM9&eqe6pQ`R93jRQ`)RT;u<51Xp=9{ttP0xwOYuJU-gq-%6E) z! z^NGGT{#OdFdTIQxHS{+AcM7ihYW&~f;Tr!R_V{c3|GwdG#|e83ZuNT6C^x~z+; zJR1Mc@^H2D*@COSYUfJ@_w8J2a4XMxk55YM{53;wjhVR zwO`oe;YBjeyGd|g-))A!wg2Z0PM6y6Kj871CHxC%(ujlXX7!q9aLZ?-!Kq4>|7O8e z&WP~eV(4u=yv^X2|KkR?{D0)}-z5BhZ0IfjzX+~&en99a5D^Z=)z9DU;SUM@S`Y6Q zyxha}xx{7K@aZ`{1p%XuHdf;uJ%+vKS~~4`F?(!!L2=K2(Enkg@3z;kCE}~ zS3LYg!FxP>hTs!qVMOhDuHXe8{yxD^6kO%e@y~3*eZ4L)xYcWg$4AHaPYUkK^F0q& zdHOs)D$kJMzC5oQ+{!cI?ar@MFO}y5f~!2OVux}MzgzHYJw7VWErR>l{i;xKpZ|FVxBS<5e3buV9L>kDUl9)vWIk5y z;o6UU#KW~8`J~{!eg4JZR-Ug4u6|M|<$l-DQ&Z6RKamFaID9)yHMr&9EV$}BPx!Za z_!WYGQgD?=>-|2%-}==z4Q|VQ(&MvU_@7CcbI4y}aLea2dGPNT-1cAJ6I|tQlX9Oi z^c3>do_h>#%Y6?yB@VUQgTm*%9{#A{a|G9N)vs0v?)yWT!EL!wk59Mof6CBXeZTL~ z?-%-?7(Rz8w&|7`~%cIwN88-B`UdF#G z;Dh9`<*qb1g#@j~4;kFje=`sMOdkAIgIoE>z1zv7c3UrYo+voUY3pmU!L2->5M0~Q zCxy?aJ^W6=w+pW2Y8?Ki;ZIFg?eHCg+j76}@#zr$C%(t&H5Mmp&lv`{d_I;3?-X41 zdPex~@bEo?KP@o=q| zeI7oUGU9mE!`~+OaWh;x+287Wiova%?-g9_HdXk<4ZW?$R)bqUKg)w36kPSy{(Zvf zPEN%ym2xKwuJWiIrVH-tHPhf$o{Ky_rNaM4LvQs;7~JytWgdLo8QJoyUPpWQO;YaL z1o!nS6x`Qqj=`-wi#$GU!oSJTTfN#1Zu$Hs4}NT6wqCj}e3FMhBjrvL+}EpEa9^*v z2DkED>hT#6{>_5Z?F9UPzpRtCd3c%NpY-^I1n&^sm*+l%TY0|i@u?R+#}+yJ5Fcyj z(+qC;#PZ-@65QA8s~-M{l=}_A)y^6>o-+Jxd;fvKtvq`@KE1-f_{`jTEiky{^P=D? zkNW?hhwHp=*zlpHLG52Ay*E>??H}JExboNM0;d^z+dsa?;8xB~!H-5UI94D7==jjHw{O=H4?YvO<|BHuTE%@gI zSH0BFA2Ix`ox2Tg%kA^{Y!dz_ots2-#6!e_mQ-z9jJ;99Qw zXS3jbyGt0{miwSSG~UP@!uf) z`wYGHlM70moN9-yLchqvzajXgf~)*$hjPJvf2c9ImGfgBpGSp%v!SpY{ z{k(^l3x2QQs+T^8e%SCo)yVUGgIjri;qj>#{xu6+`MzHF8T=%}|FJyyQwF!?UUzYZ zKe?pZZTZ3sZuz(6!5_$jKWXq&f(-xX3~uE)X%UEUsQq=l>ogDloY-f&;A($u@8=8d z`_;t;x8<(#_;d;XPaAqG{}&Bz`Rwud>=!;S7<#G-weu?mxA7;qgw2qC?<+*X2FF_j zC%tU^nJu_)w|O4EPWUVkThV`OuQB|s-L5mZ)%Rl_ zpMK##Ah^m?c%suY<>6-u{$C!SiGl}9T)we5S$U2!xRvJ}f~(!;37?M&uJT+h_$?k@ zA$Xg|N9Fmf;J!RxGPsrJTOOZH!v9Z#t2`PX{^sGGLO(w2?4Ww7JSPe6%kxfyTX|-A ze7b~xgW$eAANBB;gudD1qw;)Oa9^Ig3~uH5s>dgwAdTbXrS9y@^KK76Pv{E;_w7?6 zxG&FQgIjslczhzlzfW*qo_-I%LFj+s@lpHi7u=WU&jz>h9CfL)gX+~R{8tFB^5{Cl zS`Y6O`VV@1RGu2aeR*y&xRvLV9-ke;|9gU~Ji5;CoQLZ=L%+vI*aW{5+?VGM2DkEz zi)7oUUl@HzaN=Xf1CI*s%lV{-9}qsh9)Gple;EEYF8$WvHlDxg^Le|I`_G2n#`C`! z-0B-%=JNS^Z4=zr>vJBySjzp9;ObYZ*F%QC)vMd!R=| zz3K&5y|xPfO&Hro$Gp#-CyD$zzdl}YU;fh!Zsnip@tG?8&o=Z{{<#LX^8dTwM~ghVPW3|% z*Y|>cEV#J8nGF;Fiz5f~)-NrM~X>@D9PhDY)8M$8SG0{H^@Y z8{C%rlE-I<@LzmcMt)2GPX?zUzmETF4Q}aQGWaEizIs)LzqQYe2Dg0v-Qc#|{RX%E zH+92k zYq?rqUo-q|eLZY&TkdX;kJi_Kp||?J?9sm{e)X!MxB8yAF1OuI698 z;8y;SW6$eFR5yAolndfd_=$4AUe1ckG%9HJ@FBVH@bJEI4(Rr9{f^auhwFE*f-x>Z z%hm5&>H7vyIWdi1*9yWYd~J5$XbuHTvJ@NoUkRJVugccunBT)#8LMuZ9~r+#ND zBt}pfh*Gu7$(K_4`oW9h4zx5K2x4dA@0i$h-39ezkb1kI%0}oz6L3K#VP9ppQS=({ znV4kVu~%^I<5WtD{-m+e$En|A3>0op?W8+J5>Gi=Yz2D}Wr`$Tc#O_n^oNsr!mrZx z&?!A!29=#D1Bp(US0gP?PrNcCQ3M++_hMt^ZftK^4=_R&ZFB8gooDkkyJ0H3z?c($Nu{SyU6 z>%LJ?RC-TAQRGtvMGLnicTriXEB?y$Q_$186Ss5>u7?jqk`J+X$z3FaXxlmw$&r>_ zbPD2ntfY!DtvHaC-2Nw&+u zJ|_^QJ2?0Zf))%o6?O>W9#(DP3v5)*=YofW3yb2v8;Q2H&Z5%VTlYh-o?tfdmz#y- z-Q6}yinNDAk;EkvB{9;zt(5X5N~bjMA`UBJlikIY1+DiGlb+VufN>SHuo;8+yev(& zqnn7dw=?hLHdZ^YgSkU@eSZa6cMPxIT zXl@q69fven8)qL{p!Pa1EC1|63gq&Oxe6X;Zf#=+|AvfC%rynAUx1iZJ*^A4ZdK6i znjJo*pj8QGkgsMp9a2(ZprSs)?5a?+tJAVBPy69Q$Vx|UY3XxYqgOQCh*;#4w)2qE z3WJOv5Jxf#l)`D6m?&?I+_)F8>>)^(-m zh}Nfj(nMKTV&#MAjQWWTQ4c$+ z@RsntF(`I^_`!nP>!{L_;QAJDxE@Fxf_uEJdco3u|%yT8s>99-qC8^rpIt6)pf{?k<&XbcO z?PXKYRZ{0kl&14cOGkQ;AP7ZFlHA7PBp)J$(NaV?7m92yZjb5I2vN7}qEir?iz})` z*B-e#ldVi4?UAx&jVlO%36mV~5VqU^hfnE-xDX97g&{56r>sA*a?gqcHNZIjH>Jds zIT~Ne8*ieebVJ=>a5pI}qxxjip48wUc(j#nWwaFIHv!H238b|&?Fr(i!;K*T8L^Fe zpX5peg0caKwO^VA+SU+e7kkFUF}qOOiR>S_Xk9y;kq#a-7+;wpo^Tys6&lVa!rkE8 zowgEWhSugCn+!_I(LZ=UDNiji9l%gV(|`?Nr=`=y&pBCw-%xEPwUc?V z!j6FilhGN>%rKCEK^7whE_9?)b5J25Vu5k)Ppeo86)``-+y;s}5;)P@*G3CVaT ztCk4g-_mq{5IaX&RI==TuN9f>J*^a@iRT~=J;w~129d|@}otk8Oeorgs!wA{Z=}<$&!vF%jp!-b@0i0Orq7MB*I%E zJO0P}onk%V0TM6L{!l&HG1(+I)(n`?;cA3|p?9=ENukbCR9Y=^eVKGhhWD@wkIyZw z>pxvjn=P$N*~O11PowEyp*>+0FV$;x^WcXF|O50(J#FLRHemx$#pvxIQDVGUTEm)#LhWX(=n|okK z(yynr6n|+?5aa}t)^)graTBd;ak0B~J^rFT;>~ExL>X!4fdvbu4Z7s@HRw!p8^Ro+ zsh4>k%53-Sb_mG1>o&g5Pn!Y+Ben-93CWk@rC#t2|Euu@d-sX<%H`PN{qmI%0z1T` zTDWws2^hR`Qm}Z%qBUzu7p;xJF16QJH`LWq{e`YCZ>Xla38mB7p4dPEo7v7>LIM}F zAjS3gEc`{mjK&#_!HQ*Y#@U8T3;B1ig8Nw1^^pCf&*q|d@f-S+ga3t+eovnK zS4sM1)VF&1|6ki1?#t?EWJ??vjb$D5K7x-CLG!yE**q8C_%|VB)`<@0J+SEVbo?hA zBd@F@<&~?6epzQBfq44)^yU@-aW6sTSK0-*$d!N7+Z_e1 zWZJ{ePd}aWbt3vMQVa*pWayxGDyTVW{;34wh~U3h1mpXKpqTG6e|%bcCCbN$`ZV*r zE|YsCExq3`!uBSAxy_7gzbv}M_d&bQJ9gXYskXOlr_X)1|+y?;r zfZ#7kh9~7*EBIZ)ietW{{XO=KYXL8+SO1`eom}{*^5FO8!SBz5@5+PIVpOhjf0PIR zO&*-yy`jCFjuYqRz7W{3;6EmZ#zFb%m<;<8{nx0WDMc_UUm3?ft-*({-{>xApuUy{gUc}XjaCLc^u<0$ zD|v*!76besiv*)%>_8O@*Tx!dTvi*az6Be&XpubmS&k81nKcHoc8jcIJ4k%kE0e-nS~})<|x&zO0gDbA#~otN>eN**&l< zuA78O4%*%;YvV2%nB!d%?6OYVgUS9{Ifv+!9>wNcy02EPY^(()wDA;9vAXL5e>5HN zW?4L6u;fzmY&~sKpM$o?eICFS388{{@>(G z^e5rJ@_*RiB>#H3=Xo-&iI26z(~?enXvm{{bnio-{w%ahI()k=FgOieln<>8)8X@3 zVd!nS>kMw~qkHL5xz?W7=Ao}QxV7hI!PTA-mM#cxGxXM;yA8dyo9=n(+u`SikF~=~ zdH4(&dTWO&VvK{j6T1Ao+<#xrbfUJSb}kr)8ytk&cZ@HV#zTDQ@_PKIqe^h1xAv(M zTL9q$&|CX-8Qk*e@%VHKpC1@{+kQ`xe$bcaOuc{DrW#}!R=LJ{09ZMN; zOqa87w=IGz{}&y3aGRmG_Iy_Qcc0I13~uePPjKa*5dMEM^wyrI$+*I|!%V@I&yesr z+t6D(e97Qe9{r}B^3fsY6NcW(bMmpSAf+!6f35KF<$`bUaBa7fWE?_z(HL3%_I$yW zzqaT3hCXEIe`x5bJubw5I-WE1HlF|1;O_>VmitG8Tl@Ug@F!Po6TK!75e|~y+UFR- zNq#HO+YG&J*9#0i@qbA8FE;e=0j_eE8JsQ^w|jOFpKjq(ZRo8(=w2SaoQ;Ow%DGK& za#P|zDEzt8#XZ?!I{vFX)2J{UT8{^W{&c~Keg^(4z1@R@_>Un19CHo5wZkO_C;B>} zf5G6i7NPuKG58qg;h4z2!gC;FeEC9{gd07lN0{x!d5je(hcr zr0*2bx8KlPKm57ie*OO1(A)YQOPwnYVq1j&D$g2&TY0V&T^`ELflRPNV%xXzQCJzVF>9UiXpz-|xMdEJ1A>u+#0 bzN(x$KMYCxP+aGe3q4%tf$Ke7=XL)dL#AO} diff --git a/hdfs/setup.sh b/hdfs/setup.sh new file mode 100644 index 000000000..ac69b525d --- /dev/null +++ b/hdfs/setup.sh @@ -0,0 +1,7 @@ +export USE_HDFS=1 +export LD_LIBRARY_PATH=$JAVA_HOME/jre/lib/amd64/server:$JAVA_HOME/jre/lib/amd64:/usr/lib/hadoop/lib/native + +export CLASSPATH= +for f in `find /usr/lib/hadoop-hdfs | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done +for f in `find /usr/lib/hadoop | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done +for f in `find /usr/lib/hadoop/client | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done diff --git a/util/env_hdfs.cc b/util/env_hdfs.cc index eb2b12cba..09ee03438 100644 --- a/util/env_hdfs.cc +++ b/util/env_hdfs.cc @@ -15,11 +15,11 @@ #include #include "rocksdb/env.h" #include "rocksdb/status.h" -#include "hdfs/hdfs.h" #include "hdfs/env_hdfs.h" #define HDFS_EXISTS 0 -#define HDFS_DOESNT_EXIST 1 +#define HDFS_DOESNT_EXIST -1 +#define HDFS_SUCCESS 0 // // This file defines an HDFS environment for rocksdb. It uses the libhdfs @@ -223,7 +223,7 @@ class HdfsWritableFile: public WritableFile { if (hdfsFlush(fileSys_, hfile_) == -1) { return IOError(filename_, errno); } - if (hdfsSync(fileSys_, hfile_) == -1) { + if (hdfsHSync(fileSys_, hfile_) == -1) { return IOError(filename_, errno); } Log(mylog, "[hdfs] HdfsWritableFile Synced %s\n", filename_.c_str()); @@ -398,12 +398,40 @@ Status HdfsEnv::NewRandomRWFile(const std::string& fname, return Status::NotSupported("NewRandomRWFile not supported on HdfsEnv"); } +class HdfsDirectory : public Directory { + public: + explicit HdfsDirectory(int fd) : fd_(fd) {} + ~HdfsDirectory() { + //close(fd_); + } + + virtual Status Fsync() { + //if (fsync(fd_) == -1) { + // return IOError("directory", errno); + // } + return Status::OK(); + } + + private: + int fd_; +}; + Status HdfsEnv::NewDirectory(const std::string& name, unique_ptr* result) { - return Status::NotSupported("NewDirectory not supported on HdfsEnv"); + + int value = hdfsCreateDirectory(fileSys_, name.c_str()); + result->reset(new HdfsDirectory(0)); + switch (value) { + case HDFS_SUCCESS: // directory created + return Status::OK(); + default: + Log(mylog, "directory already exists "); + return Status::OK(); + } } bool HdfsEnv::FileExists(const std::string& fname) { + int value = hdfsExists(fileSys_, fname.c_str()); switch (value) { case HDFS_EXISTS: @@ -412,8 +440,8 @@ bool HdfsEnv::FileExists(const std::string& fname) { return false; default: // anything else should be an error Log(mylog, "FileExists hdfsExists call failed"); - throw HdfsFatalException("hdfsExists call failed with error " + - std::to_string(value) + ".\n"); + throw HdfsFatalException("1. hdfsExists call failed with error " + + std::to_string(value) + " on path " + fname + ".\n"); } } @@ -449,13 +477,13 @@ Status HdfsEnv::GetChildren(const std::string& path, default: // anything else should be an error Log(mylog, "GetChildren hdfsExists call failed"); throw HdfsFatalException("hdfsExists call failed with error " + - std::to_string(value) + ".\n"); + std::to_string(value) + " on path " + path.c_str() + ".\n"); } return Status::OK(); } Status HdfsEnv::DeleteFile(const std::string& fname) { - if (hdfsDelete(fileSys_, fname.c_str()) == 0) { + if (hdfsDelete(fileSys_, fname.c_str(),1) == 0) { return Status::OK(); } return IOError(fname, errno); @@ -478,7 +506,7 @@ Status HdfsEnv::CreateDirIfMissing(const std::string& name) { return CreateDir(name); default: // anything else should be an error Log(mylog, "CreateDirIfMissing hdfsExists call failed"); - throw HdfsFatalException("hdfsExists call failed with error " + + throw HdfsFatalException("3. hdfsExists call failed with error " + std::to_string(value) + ".\n"); } }; @@ -514,7 +542,7 @@ Status HdfsEnv::GetFileModificationTime(const std::string& fname, // target already exists. So, we delete the target before attemting the // rename. Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) { - hdfsDelete(fileSys_, target.c_str()); + hdfsDelete(fileSys_, target.c_str(), 1); if (hdfsRename(fileSys_, src.c_str(), target.c_str()) == 0) { return Status::OK(); } From d788bb8f719c16cfcb789d8656555315783b9f56 Mon Sep 17 00:00:00 2001 From: Mike Orr Date: Wed, 21 May 2014 07:50:37 -0400 Subject: [PATCH 2/3] - hdfs cleanup; fix to NewDirectory to comply with definition in env.h - fix compile error with env_test; static casts added --- util/env_hdfs.cc | 37 ++++++++++++++++--------------------- util/env_test.cc | 8 ++++---- 2 files changed, 20 insertions(+), 25 deletions(-) diff --git a/util/env_hdfs.cc b/util/env_hdfs.cc index 09ee03438..6b6d56c00 100644 --- a/util/env_hdfs.cc +++ b/util/env_hdfs.cc @@ -401,16 +401,9 @@ Status HdfsEnv::NewRandomRWFile(const std::string& fname, class HdfsDirectory : public Directory { public: explicit HdfsDirectory(int fd) : fd_(fd) {} - ~HdfsDirectory() { - //close(fd_); - } + ~HdfsDirectory() {} - virtual Status Fsync() { - //if (fsync(fd_) == -1) { - // return IOError("directory", errno); - // } - return Status::OK(); - } + virtual Status Fsync() { return Status::OK(); } private: int fd_; @@ -418,20 +411,21 @@ class HdfsDirectory : public Directory { Status HdfsEnv::NewDirectory(const std::string& name, unique_ptr* result) { - - int value = hdfsCreateDirectory(fileSys_, name.c_str()); - result->reset(new HdfsDirectory(0)); + int value = hdfsExists(fileSys_, name.c_str()); switch (value) { - case HDFS_SUCCESS: // directory created - return Status::OK(); - default: - Log(mylog, "directory already exists "); + case HDFS_EXISTS: + result->reset(new HdfsDirectory(0)); return Status::OK(); + default: // fail if the directory doesn't exist + Log(mylog, "NewDirectory hdfsExists call failed"); + throw HdfsFatalException("hdfsExists call failed with error " + + std::to_string(value) + " on path " + name + + ".\n"); } } bool HdfsEnv::FileExists(const std::string& fname) { - + int value = hdfsExists(fileSys_, fname.c_str()); switch (value) { case HDFS_EXISTS: @@ -440,8 +434,9 @@ bool HdfsEnv::FileExists(const std::string& fname) { return false; default: // anything else should be an error Log(mylog, "FileExists hdfsExists call failed"); - throw HdfsFatalException("1. hdfsExists call failed with error " + - std::to_string(value) + " on path " + fname + ".\n"); + throw HdfsFatalException("hdfsExists call failed with error " + + std::to_string(value) + " on path " + fname + + ".\n"); } } @@ -477,13 +472,13 @@ Status HdfsEnv::GetChildren(const std::string& path, default: // anything else should be an error Log(mylog, "GetChildren hdfsExists call failed"); throw HdfsFatalException("hdfsExists call failed with error " + - std::to_string(value) + " on path " + path.c_str() + ".\n"); + std::to_string(value) + ".\n"); } return Status::OK(); } Status HdfsEnv::DeleteFile(const std::string& fname) { - if (hdfsDelete(fileSys_, fname.c_str(),1) == 0) { + if (hdfsDelete(fileSys_, fname.c_str(), 1) == 0) { return Status::OK(); } return IOError(fname, errno); diff --git a/util/env_test.cc b/util/env_test.cc index 2abce6f3a..c0d00ce94 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -285,7 +285,7 @@ TEST(EnvPosixTest, DecreaseNumBgThreads) { // Increase to 5 threads. Task 0 and 2 running. env_->SetBackgroundThreads(5, Env::Priority::HIGH); Env::Default()->SleepForMicroseconds(kDelayMicros); - ASSERT_EQ(0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH)); + ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH)); ASSERT_TRUE(tasks[0].IsSleeping()); ASSERT_TRUE(tasks[2].IsSleeping()); @@ -330,7 +330,7 @@ TEST(EnvPosixTest, DecreaseNumBgThreads) { tasks[4].WakeUp(); Env::Default()->SleepForMicroseconds(kDelayMicros); - ASSERT_EQ(0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH)); + ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH)); for (size_t i = 5; i < 8; i++) { ASSERT_TRUE(tasks[i].IsSleeping()); } @@ -360,13 +360,13 @@ TEST(EnvPosixTest, DecreaseNumBgThreads) { env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &tasks[9], Env::Priority::HIGH); Env::Default()->SleepForMicroseconds(kDelayMicros); - ASSERT_GT(env_->GetThreadPoolQueueLen(Env::Priority::HIGH), 0); + ASSERT_GT(env_->GetThreadPoolQueueLen(Env::Priority::HIGH), (unsigned int)0); ASSERT_TRUE(!tasks[8].IsSleeping() || !tasks[9].IsSleeping()); // Increase to 4 threads. Task 5, 8, 9 running. env_->SetBackgroundThreads(4, Env::Priority::HIGH); Env::Default()->SleepForMicroseconds(kDelayMicros); - ASSERT_EQ(0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH)); + ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH)); ASSERT_TRUE(tasks[8].IsSleeping()); ASSERT_TRUE(tasks[9].IsSleeping()); From 591f71285c1349fc6b6d2a113b360d0d2da46cf6 Mon Sep 17 00:00:00 2001 From: Mike Orr Date: Wed, 21 May 2014 07:54:22 -0400 Subject: [PATCH 3/3] cleanup exception text --- util/env_hdfs.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/env_hdfs.cc b/util/env_hdfs.cc index 6b6d56c00..1618e5468 100644 --- a/util/env_hdfs.cc +++ b/util/env_hdfs.cc @@ -501,7 +501,7 @@ Status HdfsEnv::CreateDirIfMissing(const std::string& name) { return CreateDir(name); default: // anything else should be an error Log(mylog, "CreateDirIfMissing hdfsExists call failed"); - throw HdfsFatalException("3. hdfsExists call failed with error " + + throw HdfsFatalException("hdfsExists call failed with error " + std::to_string(value) + ".\n"); } };